Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * NETLINK      Kernel-user communication protocol.
   3 *
   4 * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
   6 * 				Patrick McHardy <kaber@trash.net>
   7 *
   8 *		This program is free software; you can redistribute it and/or
   9 *		modify it under the terms of the GNU General Public License
  10 *		as published by the Free Software Foundation; either version
  11 *		2 of the License, or (at your option) any later version.
  12 *
  13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
  14 *                               added netlink_proto_exit
  15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
  16 * 				 use nlk_sk, as sk->protinfo is on a diet 8)
  17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
  18 * 				 - inc module use count of module that owns
  19 * 				   the kernel socket in case userspace opens
  20 * 				   socket of same protocol
  21 * 				 - remove all module support, since netlink is
  22 * 				   mandatory if CONFIG_NET=y these days
  23 */
  24
  25#include <linux/module.h>
  26
  27#include <linux/capability.h>
  28#include <linux/kernel.h>
  29#include <linux/init.h>
  30#include <linux/signal.h>
  31#include <linux/sched.h>
  32#include <linux/errno.h>
  33#include <linux/string.h>
  34#include <linux/stat.h>
  35#include <linux/socket.h>
  36#include <linux/un.h>
  37#include <linux/fcntl.h>
  38#include <linux/termios.h>
  39#include <linux/sockios.h>
  40#include <linux/net.h>
  41#include <linux/fs.h>
  42#include <linux/slab.h>
  43#include <linux/uaccess.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/proc_fs.h>
  48#include <linux/seq_file.h>
  49#include <linux/notifier.h>
  50#include <linux/security.h>
  51#include <linux/jhash.h>
  52#include <linux/jiffies.h>
  53#include <linux/random.h>
  54#include <linux/bitops.h>
  55#include <linux/mm.h>
  56#include <linux/types.h>
  57#include <linux/audit.h>
  58#include <linux/mutex.h>
  59#include <linux/vmalloc.h>
  60#include <linux/if_arp.h>
  61#include <linux/rhashtable.h>
  62#include <asm/cacheflush.h>
  63#include <linux/hash.h>
  64#include <linux/genetlink.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/sock.h>
  68#include <net/scm.h>
  69#include <net/netlink.h>
  70
  71#include "af_netlink.h"
  72
  73struct listeners {
  74	struct rcu_head		rcu;
  75	unsigned long		masks[0];
  76};
  77
  78/* state bits */
  79#define NETLINK_S_CONGESTED		0x0
  80
  81/* flags */
  82#define NETLINK_F_KERNEL_SOCKET		0x1
  83#define NETLINK_F_RECV_PKTINFO		0x2
  84#define NETLINK_F_BROADCAST_SEND_ERROR	0x4
  85#define NETLINK_F_RECV_NO_ENOBUFS	0x8
  86#define NETLINK_F_LISTEN_ALL_NSID	0x10
  87#define NETLINK_F_CAP_ACK		0x20
  88
  89static inline int netlink_is_kernel(struct sock *sk)
  90{
  91	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
  92}
  93
  94struct netlink_table *nl_table __read_mostly;
  95EXPORT_SYMBOL_GPL(nl_table);
  96
  97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
  98
  99static int netlink_dump(struct sock *sk);
 100static void netlink_skb_destructor(struct sk_buff *skb);
 101
 102/* nl_table locking explained:
 103 * Lookup and traversal are protected with an RCU read-side lock. Insertion
 104 * and removal are protected with per bucket lock while using RCU list
 105 * modification primitives and may run in parallel to RCU protected lookups.
 106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
 107 * been acquired * either during or after the socket has been removed from
 108 * the list and after an RCU grace period.
 109 */
 110DEFINE_RWLOCK(nl_table_lock);
 111EXPORT_SYMBOL_GPL(nl_table_lock);
 112static atomic_t nl_table_users = ATOMIC_INIT(0);
 113
 114#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
 115
 116static BLOCKING_NOTIFIER_HEAD(netlink_chain);
 117
 118static DEFINE_SPINLOCK(netlink_tap_lock);
 119static struct list_head netlink_tap_all __read_mostly;
 120
 121static const struct rhashtable_params netlink_rhashtable_params;
 122
 123static inline u32 netlink_group_mask(u32 group)
 124{
 125	return group ? 1 << (group - 1) : 0;
 126}
 127
 128static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
 129					   gfp_t gfp_mask)
 130{
 131	unsigned int len = skb_end_offset(skb);
 132	struct sk_buff *new;
 133
 134	new = alloc_skb(len, gfp_mask);
 135	if (new == NULL)
 136		return NULL;
 137
 138	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
 139	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
 140	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
 141
 142	memcpy(skb_put(new, len), skb->data, len);
 143	return new;
 144}
 145
 146int netlink_add_tap(struct netlink_tap *nt)
 147{
 148	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
 149		return -EINVAL;
 150
 151	spin_lock(&netlink_tap_lock);
 152	list_add_rcu(&nt->list, &netlink_tap_all);
 153	spin_unlock(&netlink_tap_lock);
 154
 155	__module_get(nt->module);
 
 156
 157	return 0;
 158}
 159EXPORT_SYMBOL_GPL(netlink_add_tap);
 160
 161static int __netlink_remove_tap(struct netlink_tap *nt)
 162{
 163	bool found = false;
 164	struct netlink_tap *tmp;
 165
 166	spin_lock(&netlink_tap_lock);
 167
 168	list_for_each_entry(tmp, &netlink_tap_all, list) {
 169		if (nt == tmp) {
 170			list_del_rcu(&nt->list);
 171			found = true;
 172			goto out;
 173		}
 174	}
 175
 176	pr_warn("__netlink_remove_tap: %p not found\n", nt);
 177out:
 178	spin_unlock(&netlink_tap_lock);
 179
 180	if (found)
 181		module_put(nt->module);
 182
 183	return found ? 0 : -ENODEV;
 184}
 185
 186int netlink_remove_tap(struct netlink_tap *nt)
 187{
 188	int ret;
 189
 190	ret = __netlink_remove_tap(nt);
 191	synchronize_net();
 192
 193	return ret;
 194}
 195EXPORT_SYMBOL_GPL(netlink_remove_tap);
 196
 197static bool netlink_filter_tap(const struct sk_buff *skb)
 198{
 199	struct sock *sk = skb->sk;
 
 200
 201	/* We take the more conservative approach and
 202	 * whitelist socket protocols that may pass.
 203	 */
 204	switch (sk->sk_protocol) {
 205	case NETLINK_ROUTE:
 206	case NETLINK_USERSOCK:
 207	case NETLINK_SOCK_DIAG:
 208	case NETLINK_NFLOG:
 209	case NETLINK_XFRM:
 210	case NETLINK_FIB_LOOKUP:
 211	case NETLINK_NETFILTER:
 212	case NETLINK_GENERIC:
 213		return true;
 
 214	}
 215
 216	return false;
 217}
 218
 219static int __netlink_deliver_tap_skb(struct sk_buff *skb,
 220				     struct net_device *dev)
 221{
 222	struct sk_buff *nskb;
 223	struct sock *sk = skb->sk;
 224	int ret = -ENOMEM;
 225
 226	dev_hold(dev);
 227
 228	if (is_vmalloc_addr(skb->head))
 229		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
 230	else
 231		nskb = skb_clone(skb, GFP_ATOMIC);
 232	if (nskb) {
 233		nskb->dev = dev;
 234		nskb->protocol = htons((u16) sk->sk_protocol);
 235		nskb->pkt_type = netlink_is_kernel(sk) ?
 236				 PACKET_KERNEL : PACKET_USER;
 237		skb_reset_network_header(nskb);
 238		ret = dev_queue_xmit(nskb);
 239		if (unlikely(ret > 0))
 240			ret = net_xmit_errno(ret);
 241	}
 242
 243	dev_put(dev);
 244	return ret;
 245}
 246
 247static void __netlink_deliver_tap(struct sk_buff *skb)
 248{
 249	int ret;
 250	struct netlink_tap *tmp;
 251
 252	if (!netlink_filter_tap(skb))
 253		return;
 254
 255	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
 256		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
 257		if (unlikely(ret))
 258			break;
 259	}
 260}
 261
 262static void netlink_deliver_tap(struct sk_buff *skb)
 263{
 264	rcu_read_lock();
 265
 266	if (unlikely(!list_empty(&netlink_tap_all)))
 267		__netlink_deliver_tap(skb);
 268
 269	rcu_read_unlock();
 270}
 271
 272static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
 273				       struct sk_buff *skb)
 274{
 275	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
 276		netlink_deliver_tap(skb);
 277}
 278
 279static void netlink_overrun(struct sock *sk)
 280{
 281	struct netlink_sock *nlk = nlk_sk(sk);
 282
 283	if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
 284		if (!test_and_set_bit(NETLINK_S_CONGESTED,
 285				      &nlk_sk(sk)->state)) {
 286			sk->sk_err = ENOBUFS;
 287			sk->sk_error_report(sk);
 288		}
 289	}
 290	atomic_inc(&sk->sk_drops);
 291}
 292
 293static void netlink_rcv_wake(struct sock *sk)
 294{
 295	struct netlink_sock *nlk = nlk_sk(sk);
 296
 297	if (skb_queue_empty(&sk->sk_receive_queue))
 298		clear_bit(NETLINK_S_CONGESTED, &nlk->state);
 299	if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
 300		wake_up_interruptible(&nlk->wait);
 301}
 302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 303static void netlink_skb_destructor(struct sk_buff *skb)
 304{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305	if (is_vmalloc_addr(skb->head)) {
 306		if (!skb->cloned ||
 307		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
 308			vfree(skb->head);
 309
 310		skb->head = NULL;
 311	}
 312	if (skb->sk != NULL)
 313		sock_rfree(skb);
 314}
 315
 316static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
 317{
 318	WARN_ON(skb->sk != NULL);
 319	skb->sk = sk;
 320	skb->destructor = netlink_skb_destructor;
 321	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 322	sk_mem_charge(sk, skb->truesize);
 323}
 324
 325static void netlink_sock_destruct(struct sock *sk)
 326{
 327	struct netlink_sock *nlk = nlk_sk(sk);
 328
 329	if (nlk->cb_running) {
 330		if (nlk->cb.done)
 331			nlk->cb.done(&nlk->cb);
 
 332		module_put(nlk->cb.module);
 333		kfree_skb(nlk->cb.skb);
 334	}
 335
 336	skb_queue_purge(&sk->sk_receive_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 337
 338	if (!sock_flag(sk, SOCK_DEAD)) {
 339		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
 340		return;
 341	}
 342
 343	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 344	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
 345	WARN_ON(nlk_sk(sk)->groups);
 346}
 347
 348static void netlink_sock_destruct_work(struct work_struct *work)
 349{
 350	struct netlink_sock *nlk = container_of(work, struct netlink_sock,
 351						work);
 352
 353	sk_free(&nlk->sk);
 354}
 355
 356/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
 357 * SMP. Look, when several writers sleep and reader wakes them up, all but one
 358 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 359 * this, _but_ remember, it adds useless work on UP machines.
 360 */
 361
 362void netlink_table_grab(void)
 363	__acquires(nl_table_lock)
 364{
 365	might_sleep();
 366
 367	write_lock_irq(&nl_table_lock);
 368
 369	if (atomic_read(&nl_table_users)) {
 370		DECLARE_WAITQUEUE(wait, current);
 371
 372		add_wait_queue_exclusive(&nl_table_wait, &wait);
 373		for (;;) {
 374			set_current_state(TASK_UNINTERRUPTIBLE);
 375			if (atomic_read(&nl_table_users) == 0)
 376				break;
 377			write_unlock_irq(&nl_table_lock);
 378			schedule();
 379			write_lock_irq(&nl_table_lock);
 380		}
 381
 382		__set_current_state(TASK_RUNNING);
 383		remove_wait_queue(&nl_table_wait, &wait);
 384	}
 385}
 386
 387void netlink_table_ungrab(void)
 388	__releases(nl_table_lock)
 389{
 390	write_unlock_irq(&nl_table_lock);
 391	wake_up(&nl_table_wait);
 392}
 393
 394static inline void
 395netlink_lock_table(void)
 396{
 397	/* read_lock() synchronizes us to netlink_table_grab */
 398
 399	read_lock(&nl_table_lock);
 400	atomic_inc(&nl_table_users);
 401	read_unlock(&nl_table_lock);
 402}
 403
 404static inline void
 405netlink_unlock_table(void)
 406{
 407	if (atomic_dec_and_test(&nl_table_users))
 408		wake_up(&nl_table_wait);
 409}
 410
 411struct netlink_compare_arg
 412{
 413	possible_net_t pnet;
 414	u32 portid;
 415};
 416
 417/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
 418#define netlink_compare_arg_len \
 419	(offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
 420
 421static inline int netlink_compare(struct rhashtable_compare_arg *arg,
 422				  const void *ptr)
 423{
 424	const struct netlink_compare_arg *x = arg->key;
 425	const struct netlink_sock *nlk = ptr;
 
 
 426
 427	return nlk->portid != x->portid ||
 428	       !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
 
 
 
 
 
 
 
 
 
 
 
 429}
 430
 431static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
 432				     struct net *net, u32 portid)
 433{
 434	memset(arg, 0, sizeof(*arg));
 435	write_pnet(&arg->pnet, net);
 436	arg->portid = portid;
 
 
 
 437}
 438
 439static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
 440				     struct net *net)
 441{
 442	struct netlink_compare_arg arg;
 443
 444	netlink_compare_arg_init(&arg, net, portid);
 445	return rhashtable_lookup_fast(&table->hash, &arg,
 446				      netlink_rhashtable_params);
 447}
 448
 449static int __netlink_insert(struct netlink_table *table, struct sock *sk)
 450{
 451	struct netlink_compare_arg arg;
 
 
 
 452
 453	netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
 454	return rhashtable_lookup_insert_key(&table->hash, &arg,
 455					    &nlk_sk(sk)->node,
 456					    netlink_rhashtable_params);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 457}
 458
 459static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 460{
 461	struct netlink_table *table = &nl_table[protocol];
 462	struct sock *sk;
 463
 464	rcu_read_lock();
 465	sk = __netlink_lookup(table, portid, net);
 466	if (sk)
 467		sock_hold(sk);
 468	rcu_read_unlock();
 469
 470	return sk;
 
 
 
 
 
 471}
 472
 473static const struct proto_ops netlink_ops;
 474
 475static void
 476netlink_update_listeners(struct sock *sk)
 477{
 478	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
 479	unsigned long mask;
 480	unsigned int i;
 481	struct listeners *listeners;
 482
 483	listeners = nl_deref_protected(tbl->listeners);
 484	if (!listeners)
 485		return;
 486
 487	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
 488		mask = 0;
 489		sk_for_each_bound(sk, &tbl->mc_list) {
 490			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
 491				mask |= nlk_sk(sk)->groups[i];
 492		}
 493		listeners->masks[i] = mask;
 494	}
 495	/* this function is only called with the netlink table "grabbed", which
 496	 * makes sure updates are visible before bind or setsockopt return. */
 497}
 498
 499static int netlink_insert(struct sock *sk, u32 portid)
 500{
 501	struct netlink_table *table = &nl_table[sk->sk_protocol];
 502	int err;
 
 
 
 
 503
 504	lock_sock(sk);
 
 
 
 
 
 
 
 
 
 
 505
 506	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
 507	if (nlk_sk(sk)->bound)
 508		goto err;
 509
 510	err = -ENOMEM;
 511	if (BITS_PER_LONG > 32 &&
 512	    unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
 513		goto err;
 514
 
 
 
 515	nlk_sk(sk)->portid = portid;
 516	sock_hold(sk);
 517
 518	err = __netlink_insert(table, sk);
 519	if (err) {
 520		/* In case the hashtable backend returns with -EBUSY
 521		 * from here, it must not escape to the caller.
 522		 */
 523		if (unlikely(err == -EBUSY))
 524			err = -EOVERFLOW;
 525		if (err == -EEXIST)
 526			err = -EADDRINUSE;
 527		sock_put(sk);
 528		goto err;
 529	}
 530
 531	/* We need to ensure that the socket is hashed and visible. */
 532	smp_wmb();
 533	nlk_sk(sk)->bound = portid;
 534
 535err:
 536	release_sock(sk);
 537	return err;
 538}
 539
 540static void netlink_remove(struct sock *sk)
 541{
 542	struct netlink_table *table;
 543
 544	table = &nl_table[sk->sk_protocol];
 545	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
 546				    netlink_rhashtable_params)) {
 547		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
 548		__sock_put(sk);
 549	}
 550
 551	netlink_table_grab();
 552	if (nlk_sk(sk)->subscriptions) {
 
 
 553		__sk_del_bind_node(sk);
 554		netlink_update_listeners(sk);
 555	}
 556	if (sk->sk_protocol == NETLINK_GENERIC)
 557		atomic_inc(&genl_sk_destructing_cnt);
 558	netlink_table_ungrab();
 559}
 560
 561static struct proto netlink_proto = {
 562	.name	  = "NETLINK",
 563	.owner	  = THIS_MODULE,
 564	.obj_size = sizeof(struct netlink_sock),
 565};
 566
 567static int __netlink_create(struct net *net, struct socket *sock,
 568			    struct mutex *cb_mutex, int protocol,
 569			    int kern)
 570{
 571	struct sock *sk;
 572	struct netlink_sock *nlk;
 573
 574	sock->ops = &netlink_ops;
 575
 576	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
 577	if (!sk)
 578		return -ENOMEM;
 579
 580	sock_init_data(sock, sk);
 581
 582	nlk = nlk_sk(sk);
 583	if (cb_mutex) {
 584		nlk->cb_mutex = cb_mutex;
 585	} else {
 586		nlk->cb_mutex = &nlk->cb_def_mutex;
 587		mutex_init(nlk->cb_mutex);
 588	}
 589	init_waitqueue_head(&nlk->wait);
 
 
 
 590
 591	sk->sk_destruct = netlink_sock_destruct;
 592	sk->sk_protocol = protocol;
 593	return 0;
 594}
 595
 596static int netlink_create(struct net *net, struct socket *sock, int protocol,
 597			  int kern)
 598{
 599	struct module *module = NULL;
 600	struct mutex *cb_mutex;
 601	struct netlink_sock *nlk;
 602	int (*bind)(struct net *net, int group);
 603	void (*unbind)(struct net *net, int group);
 604	int err = 0;
 605
 606	sock->state = SS_UNCONNECTED;
 607
 608	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
 609		return -ESOCKTNOSUPPORT;
 610
 611	if (protocol < 0 || protocol >= MAX_LINKS)
 612		return -EPROTONOSUPPORT;
 613
 614	netlink_lock_table();
 615#ifdef CONFIG_MODULES
 616	if (!nl_table[protocol].registered) {
 617		netlink_unlock_table();
 618		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
 619		netlink_lock_table();
 620	}
 621#endif
 622	if (nl_table[protocol].registered &&
 623	    try_module_get(nl_table[protocol].module))
 624		module = nl_table[protocol].module;
 625	else
 626		err = -EPROTONOSUPPORT;
 627	cb_mutex = nl_table[protocol].cb_mutex;
 628	bind = nl_table[protocol].bind;
 629	unbind = nl_table[protocol].unbind;
 630	netlink_unlock_table();
 631
 632	if (err < 0)
 633		goto out;
 634
 635	err = __netlink_create(net, sock, cb_mutex, protocol, kern);
 636	if (err < 0)
 637		goto out_module;
 638
 639	local_bh_disable();
 640	sock_prot_inuse_add(net, &netlink_proto, 1);
 641	local_bh_enable();
 642
 643	nlk = nlk_sk(sock->sk);
 644	nlk->module = module;
 645	nlk->netlink_bind = bind;
 646	nlk->netlink_unbind = unbind;
 647out:
 648	return err;
 649
 650out_module:
 651	module_put(module);
 652	goto out;
 653}
 654
 655static void deferred_put_nlk_sk(struct rcu_head *head)
 656{
 657	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
 658	struct sock *sk = &nlk->sk;
 659
 660	if (!atomic_dec_and_test(&sk->sk_refcnt))
 661		return;
 662
 663	if (nlk->cb_running && nlk->cb.done) {
 664		INIT_WORK(&nlk->work, netlink_sock_destruct_work);
 665		schedule_work(&nlk->work);
 666		return;
 667	}
 668
 669	sk_free(sk);
 670}
 671
 672static int netlink_release(struct socket *sock)
 673{
 674	struct sock *sk = sock->sk;
 675	struct netlink_sock *nlk;
 676
 677	if (!sk)
 678		return 0;
 679
 680	netlink_remove(sk);
 681	sock_orphan(sk);
 682	nlk = nlk_sk(sk);
 683
 684	/*
 685	 * OK. Socket is unlinked, any packets that arrive now
 686	 * will be purged.
 687	 */
 688
 689	/* must not acquire netlink_table_lock in any way again before unbind
 690	 * and notifying genetlink is done as otherwise it might deadlock
 691	 */
 692	if (nlk->netlink_unbind) {
 693		int i;
 694
 695		for (i = 0; i < nlk->ngroups; i++)
 696			if (test_bit(i, nlk->groups))
 697				nlk->netlink_unbind(sock_net(sk), i + 1);
 698	}
 699	if (sk->sk_protocol == NETLINK_GENERIC &&
 700	    atomic_dec_return(&genl_sk_destructing_cnt) == 0)
 701		wake_up(&genl_sk_destructing_waitq);
 702
 703	sock->sk = NULL;
 704	wake_up_interruptible_all(&nlk->wait);
 705
 706	skb_queue_purge(&sk->sk_write_queue);
 707
 708	if (nlk->portid && nlk->bound) {
 709		struct netlink_notify n = {
 710						.net = sock_net(sk),
 711						.protocol = sk->sk_protocol,
 712						.portid = nlk->portid,
 713					  };
 714		blocking_notifier_call_chain(&netlink_chain,
 715				NETLINK_URELEASE, &n);
 716	}
 717
 718	module_put(nlk->module);
 719
 
 720	if (netlink_is_kernel(sk)) {
 721		netlink_table_grab();
 722		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
 723		if (--nl_table[sk->sk_protocol].registered == 0) {
 724			struct listeners *old;
 725
 726			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
 727			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
 728			kfree_rcu(old, rcu);
 729			nl_table[sk->sk_protocol].module = NULL;
 730			nl_table[sk->sk_protocol].bind = NULL;
 731			nl_table[sk->sk_protocol].unbind = NULL;
 732			nl_table[sk->sk_protocol].flags = 0;
 733			nl_table[sk->sk_protocol].registered = 0;
 734		}
 735		netlink_table_ungrab();
 
 736	}
 
 737
 738	kfree(nlk->groups);
 739	nlk->groups = NULL;
 740
 741	local_bh_disable();
 742	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
 743	local_bh_enable();
 744	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
 745	return 0;
 746}
 747
 748static int netlink_autobind(struct socket *sock)
 749{
 750	struct sock *sk = sock->sk;
 751	struct net *net = sock_net(sk);
 752	struct netlink_table *table = &nl_table[sk->sk_protocol];
 
 
 
 753	s32 portid = task_tgid_vnr(current);
 754	int err;
 755	s32 rover = -4096;
 756	bool ok;
 757
 758retry:
 759	cond_resched();
 760	rcu_read_lock();
 761	ok = !__netlink_lookup(table, portid, net);
 762	rcu_read_unlock();
 763	if (!ok) {
 764		/* Bind collision, search negative portid values. */
 765		if (rover == -4096)
 766			/* rover will be in range [S32_MIN, -4097] */
 767			rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
 768		else if (rover >= -4096)
 769			rover = -4097;
 770		portid = rover--;
 771		goto retry;
 
 772	}
 
 773
 774	err = netlink_insert(sk, portid);
 775	if (err == -EADDRINUSE)
 776		goto retry;
 777
 778	/* If 2 threads race to autobind, that is fine.  */
 779	if (err == -EBUSY)
 780		err = 0;
 781
 782	return err;
 783}
 784
 785/**
 786 * __netlink_ns_capable - General netlink message capability test
 787 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
 788 * @user_ns: The user namespace of the capability to use
 789 * @cap: The capability to use
 790 *
 791 * Test to see if the opener of the socket we received the message
 792 * from had when the netlink socket was created and the sender of the
 793 * message has has the capability @cap in the user namespace @user_ns.
 794 */
 795bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
 796			struct user_namespace *user_ns, int cap)
 797{
 798	return ((nsp->flags & NETLINK_SKB_DST) ||
 799		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
 800		ns_capable(user_ns, cap);
 801}
 802EXPORT_SYMBOL(__netlink_ns_capable);
 803
 804/**
 805 * netlink_ns_capable - General netlink message capability test
 806 * @skb: socket buffer holding a netlink command from userspace
 807 * @user_ns: The user namespace of the capability to use
 808 * @cap: The capability to use
 809 *
 810 * Test to see if the opener of the socket we received the message
 811 * from had when the netlink socket was created and the sender of the
 812 * message has has the capability @cap in the user namespace @user_ns.
 813 */
 814bool netlink_ns_capable(const struct sk_buff *skb,
 815			struct user_namespace *user_ns, int cap)
 816{
 817	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
 818}
 819EXPORT_SYMBOL(netlink_ns_capable);
 820
 821/**
 822 * netlink_capable - Netlink global message capability test
 823 * @skb: socket buffer holding a netlink command from userspace
 824 * @cap: The capability to use
 825 *
 826 * Test to see if the opener of the socket we received the message
 827 * from had when the netlink socket was created and the sender of the
 828 * message has has the capability @cap in all user namespaces.
 829 */
 830bool netlink_capable(const struct sk_buff *skb, int cap)
 831{
 832	return netlink_ns_capable(skb, &init_user_ns, cap);
 833}
 834EXPORT_SYMBOL(netlink_capable);
 835
 836/**
 837 * netlink_net_capable - Netlink network namespace message capability test
 838 * @skb: socket buffer holding a netlink command from userspace
 839 * @cap: The capability to use
 840 *
 841 * Test to see if the opener of the socket we received the message
 842 * from had when the netlink socket was created and the sender of the
 843 * message has has the capability @cap over the network namespace of
 844 * the socket we received the message from.
 845 */
 846bool netlink_net_capable(const struct sk_buff *skb, int cap)
 847{
 848	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
 849}
 850EXPORT_SYMBOL(netlink_net_capable);
 851
 852static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
 853{
 854	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
 855		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
 856}
 857
 858static void
 859netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
 860{
 861	struct netlink_sock *nlk = nlk_sk(sk);
 862
 863	if (nlk->subscriptions && !subscriptions)
 864		__sk_del_bind_node(sk);
 865	else if (!nlk->subscriptions && subscriptions)
 866		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
 867	nlk->subscriptions = subscriptions;
 868}
 869
 870static int netlink_realloc_groups(struct sock *sk)
 871{
 872	struct netlink_sock *nlk = nlk_sk(sk);
 873	unsigned int groups;
 874	unsigned long *new_groups;
 875	int err = 0;
 876
 877	netlink_table_grab();
 878
 879	groups = nl_table[sk->sk_protocol].groups;
 880	if (!nl_table[sk->sk_protocol].registered) {
 881		err = -ENOENT;
 882		goto out_unlock;
 883	}
 884
 885	if (nlk->ngroups >= groups)
 886		goto out_unlock;
 887
 888	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
 889	if (new_groups == NULL) {
 890		err = -ENOMEM;
 891		goto out_unlock;
 892	}
 893	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
 894	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
 895
 896	nlk->groups = new_groups;
 897	nlk->ngroups = groups;
 898 out_unlock:
 899	netlink_table_ungrab();
 900	return err;
 901}
 902
 903static void netlink_undo_bind(int group, long unsigned int groups,
 904			      struct sock *sk)
 905{
 906	struct netlink_sock *nlk = nlk_sk(sk);
 907	int undo;
 908
 909	if (!nlk->netlink_unbind)
 910		return;
 911
 912	for (undo = 0; undo < group; undo++)
 913		if (test_bit(undo, &groups))
 914			nlk->netlink_unbind(sock_net(sk), undo + 1);
 915}
 916
 917static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 918			int addr_len)
 919{
 920	struct sock *sk = sock->sk;
 921	struct net *net = sock_net(sk);
 922	struct netlink_sock *nlk = nlk_sk(sk);
 923	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
 924	int err;
 925	long unsigned int groups = nladdr->nl_groups;
 926	bool bound;
 927
 928	if (addr_len < sizeof(struct sockaddr_nl))
 929		return -EINVAL;
 930
 931	if (nladdr->nl_family != AF_NETLINK)
 932		return -EINVAL;
 933
 934	/* Only superuser is allowed to listen multicasts */
 935	if (groups) {
 936		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
 937			return -EPERM;
 938		err = netlink_realloc_groups(sk);
 939		if (err)
 940			return err;
 941	}
 942
 943	bound = nlk->bound;
 944	if (bound) {
 945		/* Ensure nlk->portid is up-to-date. */
 946		smp_rmb();
 947
 948		if (nladdr->nl_pid != nlk->portid)
 949			return -EINVAL;
 950	}
 951
 952	if (nlk->netlink_bind && groups) {
 953		int group;
 954
 955		for (group = 0; group < nlk->ngroups; group++) {
 956			if (!test_bit(group, &groups))
 957				continue;
 958			err = nlk->netlink_bind(net, group + 1);
 959			if (!err)
 960				continue;
 961			netlink_undo_bind(group, groups, sk);
 962			return err;
 963		}
 964	}
 965
 966	/* No need for barriers here as we return to user-space without
 967	 * using any of the bound attributes.
 968	 */
 969	if (!bound) {
 970		err = nladdr->nl_pid ?
 971			netlink_insert(sk, nladdr->nl_pid) :
 972			netlink_autobind(sock);
 973		if (err) {
 974			netlink_undo_bind(nlk->ngroups, groups, sk);
 975			return err;
 976		}
 977	}
 978
 979	if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
 980		return 0;
 981
 982	netlink_table_grab();
 983	netlink_update_subscriptions(sk, nlk->subscriptions +
 984					 hweight32(groups) -
 985					 hweight32(nlk->groups[0]));
 986	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
 987	netlink_update_listeners(sk);
 988	netlink_table_ungrab();
 989
 
 
 
 
 
 
 
 
 
 990	return 0;
 991}
 992
 993static int netlink_connect(struct socket *sock, struct sockaddr *addr,
 994			   int alen, int flags)
 995{
 996	int err = 0;
 997	struct sock *sk = sock->sk;
 998	struct netlink_sock *nlk = nlk_sk(sk);
 999	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1000
1001	if (alen < sizeof(addr->sa_family))
1002		return -EINVAL;
1003
1004	if (addr->sa_family == AF_UNSPEC) {
1005		sk->sk_state	= NETLINK_UNCONNECTED;
1006		nlk->dst_portid	= 0;
1007		nlk->dst_group  = 0;
1008		return 0;
1009	}
1010	if (addr->sa_family != AF_NETLINK)
1011		return -EINVAL;
1012
1013	if ((nladdr->nl_groups || nladdr->nl_pid) &&
1014	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1015		return -EPERM;
1016
1017	/* No need for barriers here as we return to user-space without
1018	 * using any of the bound attributes.
1019	 */
1020	if (!nlk->bound)
1021		err = netlink_autobind(sock);
1022
1023	if (err == 0) {
1024		sk->sk_state	= NETLINK_CONNECTED;
1025		nlk->dst_portid = nladdr->nl_pid;
1026		nlk->dst_group  = ffs(nladdr->nl_groups);
1027	}
1028
1029	return err;
1030}
1031
1032static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1033			   int *addr_len, int peer)
1034{
1035	struct sock *sk = sock->sk;
1036	struct netlink_sock *nlk = nlk_sk(sk);
1037	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1038
1039	nladdr->nl_family = AF_NETLINK;
1040	nladdr->nl_pad = 0;
1041	*addr_len = sizeof(*nladdr);
1042
1043	if (peer) {
1044		nladdr->nl_pid = nlk->dst_portid;
1045		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1046	} else {
1047		nladdr->nl_pid = nlk->portid;
1048		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1049	}
1050	return 0;
1051}
1052
1053static int netlink_ioctl(struct socket *sock, unsigned int cmd,
1054			 unsigned long arg)
1055{
1056	/* try to hand this ioctl down to the NIC drivers.
1057	 */
1058	return -ENOIOCTLCMD;
1059}
1060
1061static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1062{
1063	struct sock *sock;
1064	struct netlink_sock *nlk;
1065
1066	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1067	if (!sock)
1068		return ERR_PTR(-ECONNREFUSED);
1069
1070	/* Don't bother queuing skb if kernel socket has no input function */
1071	nlk = nlk_sk(sock);
1072	if (sock->sk_state == NETLINK_CONNECTED &&
1073	    nlk->dst_portid != nlk_sk(ssk)->portid) {
1074		sock_put(sock);
1075		return ERR_PTR(-ECONNREFUSED);
1076	}
1077	return sock;
1078}
1079
1080struct sock *netlink_getsockbyfilp(struct file *filp)
1081{
1082	struct inode *inode = file_inode(filp);
1083	struct sock *sock;
1084
1085	if (!S_ISSOCK(inode->i_mode))
1086		return ERR_PTR(-ENOTSOCK);
1087
1088	sock = SOCKET_I(inode)->sk;
1089	if (sock->sk_family != AF_NETLINK)
1090		return ERR_PTR(-EINVAL);
1091
1092	sock_hold(sock);
1093	return sock;
1094}
1095
1096static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1097					       int broadcast)
1098{
1099	struct sk_buff *skb;
1100	void *data;
1101
1102	if (size <= NLMSG_GOODSIZE || broadcast)
1103		return alloc_skb(size, GFP_KERNEL);
1104
1105	size = SKB_DATA_ALIGN(size) +
1106	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1107
1108	data = vmalloc(size);
1109	if (data == NULL)
1110		return NULL;
1111
1112	skb = __build_skb(data, size);
1113	if (skb == NULL)
1114		vfree(data);
1115	else
 
1116		skb->destructor = netlink_skb_destructor;
 
1117
1118	return skb;
1119}
1120
1121/*
1122 * Attach a skb to a netlink socket.
1123 * The caller must hold a reference to the destination socket. On error, the
1124 * reference is dropped. The skb is not send to the destination, just all
1125 * all error checks are performed and memory in the queue is reserved.
1126 * Return values:
1127 * < 0: error. skb freed, reference to sock dropped.
1128 * 0: continue
1129 * 1: repeat lookup - reference dropped while waiting for socket memory.
1130 */
1131int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1132		      long *timeo, struct sock *ssk)
1133{
1134	struct netlink_sock *nlk;
1135
1136	nlk = nlk_sk(sk);
1137
1138	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1139	     test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
 
1140		DECLARE_WAITQUEUE(wait, current);
1141		if (!*timeo) {
1142			if (!ssk || netlink_is_kernel(ssk))
1143				netlink_overrun(sk);
1144			sock_put(sk);
1145			kfree_skb(skb);
1146			return -EAGAIN;
1147		}
1148
1149		__set_current_state(TASK_INTERRUPTIBLE);
1150		add_wait_queue(&nlk->wait, &wait);
1151
1152		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1153		     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1154		    !sock_flag(sk, SOCK_DEAD))
1155			*timeo = schedule_timeout(*timeo);
1156
1157		__set_current_state(TASK_RUNNING);
1158		remove_wait_queue(&nlk->wait, &wait);
1159		sock_put(sk);
1160
1161		if (signal_pending(current)) {
1162			kfree_skb(skb);
1163			return sock_intr_errno(*timeo);
1164		}
1165		return 1;
1166	}
1167	netlink_skb_set_owner_r(skb, sk);
1168	return 0;
1169}
1170
1171static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1172{
1173	int len = skb->len;
1174
1175	netlink_deliver_tap(skb);
1176
1177	skb_queue_tail(&sk->sk_receive_queue, skb);
 
 
 
 
 
 
 
1178	sk->sk_data_ready(sk);
1179	return len;
1180}
1181
1182int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1183{
1184	int len = __netlink_sendskb(sk, skb);
1185
1186	sock_put(sk);
1187	return len;
1188}
1189
1190void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1191{
1192	kfree_skb(skb);
1193	sock_put(sk);
1194}
1195
1196static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1197{
1198	int delta;
1199
1200	WARN_ON(skb->sk != NULL);
 
 
 
1201	delta = skb->end - skb->tail;
1202	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1203		return skb;
1204
1205	if (skb_shared(skb)) {
1206		struct sk_buff *nskb = skb_clone(skb, allocation);
1207		if (!nskb)
1208			return skb;
1209		consume_skb(skb);
1210		skb = nskb;
1211	}
1212
1213	if (!pskb_expand_head(skb, 0, -delta, allocation))
1214		skb->truesize -= delta;
1215
1216	return skb;
1217}
1218
1219static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1220				  struct sock *ssk)
1221{
1222	int ret;
1223	struct netlink_sock *nlk = nlk_sk(sk);
1224
1225	ret = -ECONNREFUSED;
1226	if (nlk->netlink_rcv != NULL) {
1227		ret = skb->len;
1228		netlink_skb_set_owner_r(skb, sk);
1229		NETLINK_CB(skb).sk = ssk;
1230		netlink_deliver_tap_kernel(sk, ssk, skb);
1231		nlk->netlink_rcv(skb);
1232		consume_skb(skb);
1233	} else {
1234		kfree_skb(skb);
1235	}
1236	sock_put(sk);
1237	return ret;
1238}
1239
1240int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1241		    u32 portid, int nonblock)
1242{
1243	struct sock *sk;
1244	int err;
1245	long timeo;
1246
1247	skb = netlink_trim(skb, gfp_any());
1248
1249	timeo = sock_sndtimeo(ssk, nonblock);
1250retry:
1251	sk = netlink_getsockbyportid(ssk, portid);
1252	if (IS_ERR(sk)) {
1253		kfree_skb(skb);
1254		return PTR_ERR(sk);
1255	}
1256	if (netlink_is_kernel(sk))
1257		return netlink_unicast_kernel(sk, skb, ssk);
1258
1259	if (sk_filter(sk, skb)) {
1260		err = skb->len;
1261		kfree_skb(skb);
1262		sock_put(sk);
1263		return err;
1264	}
1265
1266	err = netlink_attachskb(sk, skb, &timeo, ssk);
1267	if (err == 1)
1268		goto retry;
1269	if (err)
1270		return err;
1271
1272	return netlink_sendskb(sk, skb);
1273}
1274EXPORT_SYMBOL(netlink_unicast);
1275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276int netlink_has_listeners(struct sock *sk, unsigned int group)
1277{
1278	int res = 0;
1279	struct listeners *listeners;
1280
1281	BUG_ON(!netlink_is_kernel(sk));
1282
1283	rcu_read_lock();
1284	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1285
1286	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1287		res = test_bit(group - 1, listeners->masks);
1288
1289	rcu_read_unlock();
1290
1291	return res;
1292}
1293EXPORT_SYMBOL_GPL(netlink_has_listeners);
1294
1295static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1296{
1297	struct netlink_sock *nlk = nlk_sk(sk);
1298
1299	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1300	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1301		netlink_skb_set_owner_r(skb, sk);
1302		__netlink_sendskb(sk, skb);
1303		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1304	}
1305	return -1;
1306}
1307
1308struct netlink_broadcast_data {
1309	struct sock *exclude_sk;
1310	struct net *net;
1311	u32 portid;
1312	u32 group;
1313	int failure;
1314	int delivery_failure;
1315	int congested;
1316	int delivered;
1317	gfp_t allocation;
1318	struct sk_buff *skb, *skb2;
1319	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1320	void *tx_data;
1321};
1322
1323static void do_one_broadcast(struct sock *sk,
1324				    struct netlink_broadcast_data *p)
1325{
1326	struct netlink_sock *nlk = nlk_sk(sk);
1327	int val;
1328
1329	if (p->exclude_sk == sk)
1330		return;
1331
1332	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1333	    !test_bit(p->group - 1, nlk->groups))
1334		return;
1335
1336	if (!net_eq(sock_net(sk), p->net)) {
1337		if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1338			return;
1339
1340		if (!peernet_has_id(sock_net(sk), p->net))
1341			return;
1342
1343		if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1344				     CAP_NET_BROADCAST))
1345			return;
1346	}
1347
1348	if (p->failure) {
1349		netlink_overrun(sk);
1350		return;
1351	}
1352
1353	sock_hold(sk);
1354	if (p->skb2 == NULL) {
1355		if (skb_shared(p->skb)) {
1356			p->skb2 = skb_clone(p->skb, p->allocation);
1357		} else {
1358			p->skb2 = skb_get(p->skb);
1359			/*
1360			 * skb ownership may have been set when
1361			 * delivered to a previous socket.
1362			 */
1363			skb_orphan(p->skb2);
1364		}
1365	}
1366	if (p->skb2 == NULL) {
1367		netlink_overrun(sk);
1368		/* Clone failed. Notify ALL listeners. */
1369		p->failure = 1;
1370		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1371			p->delivery_failure = 1;
1372		goto out;
1373	}
1374	if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1375		kfree_skb(p->skb2);
1376		p->skb2 = NULL;
1377		goto out;
1378	}
1379	if (sk_filter(sk, p->skb2)) {
1380		kfree_skb(p->skb2);
1381		p->skb2 = NULL;
1382		goto out;
1383	}
1384	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1385	NETLINK_CB(p->skb2).nsid_is_set = true;
1386	val = netlink_broadcast_deliver(sk, p->skb2);
1387	if (val < 0) {
1388		netlink_overrun(sk);
1389		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1390			p->delivery_failure = 1;
1391	} else {
1392		p->congested |= val;
1393		p->delivered = 1;
1394		p->skb2 = NULL;
1395	}
1396out:
1397	sock_put(sk);
 
 
 
1398}
1399
1400int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1401	u32 group, gfp_t allocation,
1402	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1403	void *filter_data)
1404{
1405	struct net *net = sock_net(ssk);
1406	struct netlink_broadcast_data info;
1407	struct sock *sk;
1408
1409	skb = netlink_trim(skb, allocation);
1410
1411	info.exclude_sk = ssk;
1412	info.net = net;
1413	info.portid = portid;
1414	info.group = group;
1415	info.failure = 0;
1416	info.delivery_failure = 0;
1417	info.congested = 0;
1418	info.delivered = 0;
1419	info.allocation = allocation;
1420	info.skb = skb;
1421	info.skb2 = NULL;
1422	info.tx_filter = filter;
1423	info.tx_data = filter_data;
1424
1425	/* While we sleep in clone, do not allow to change socket list */
1426
1427	netlink_lock_table();
1428
1429	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1430		do_one_broadcast(sk, &info);
1431
1432	consume_skb(skb);
1433
1434	netlink_unlock_table();
1435
1436	if (info.delivery_failure) {
1437		kfree_skb(info.skb2);
1438		return -ENOBUFS;
1439	}
1440	consume_skb(info.skb2);
1441
1442	if (info.delivered) {
1443		if (info.congested && gfpflags_allow_blocking(allocation))
1444			yield();
1445		return 0;
1446	}
1447	return -ESRCH;
1448}
1449EXPORT_SYMBOL(netlink_broadcast_filtered);
1450
1451int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1452		      u32 group, gfp_t allocation)
1453{
1454	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1455		NULL, NULL);
1456}
1457EXPORT_SYMBOL(netlink_broadcast);
1458
1459struct netlink_set_err_data {
1460	struct sock *exclude_sk;
1461	u32 portid;
1462	u32 group;
1463	int code;
1464};
1465
1466static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1467{
1468	struct netlink_sock *nlk = nlk_sk(sk);
1469	int ret = 0;
1470
1471	if (sk == p->exclude_sk)
1472		goto out;
1473
1474	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1475		goto out;
1476
1477	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1478	    !test_bit(p->group - 1, nlk->groups))
1479		goto out;
1480
1481	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1482		ret = 1;
1483		goto out;
1484	}
1485
1486	sk->sk_err = p->code;
1487	sk->sk_error_report(sk);
1488out:
1489	return ret;
1490}
1491
1492/**
1493 * netlink_set_err - report error to broadcast listeners
1494 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1495 * @portid: the PORTID of a process that we want to skip (if any)
1496 * @group: the broadcast group that will notice the error
1497 * @code: error code, must be negative (as usual in kernelspace)
1498 *
1499 * This function returns the number of broadcast listeners that have set the
1500 * NETLINK_NO_ENOBUFS socket option.
1501 */
1502int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1503{
1504	struct netlink_set_err_data info;
1505	struct sock *sk;
1506	int ret = 0;
1507
1508	info.exclude_sk = ssk;
1509	info.portid = portid;
1510	info.group = group;
1511	/* sk->sk_err wants a positive error value */
1512	info.code = -code;
1513
1514	read_lock(&nl_table_lock);
1515
1516	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1517		ret += do_one_set_err(sk, &info);
1518
1519	read_unlock(&nl_table_lock);
1520	return ret;
1521}
1522EXPORT_SYMBOL(netlink_set_err);
1523
1524/* must be called with netlink table grabbed */
1525static void netlink_update_socket_mc(struct netlink_sock *nlk,
1526				     unsigned int group,
1527				     int is_new)
1528{
1529	int old, new = !!is_new, subscriptions;
1530
1531	old = test_bit(group - 1, nlk->groups);
1532	subscriptions = nlk->subscriptions - old + new;
1533	if (new)
1534		__set_bit(group - 1, nlk->groups);
1535	else
1536		__clear_bit(group - 1, nlk->groups);
1537	netlink_update_subscriptions(&nlk->sk, subscriptions);
1538	netlink_update_listeners(&nlk->sk);
1539}
1540
1541static int netlink_setsockopt(struct socket *sock, int level, int optname,
1542			      char __user *optval, unsigned int optlen)
1543{
1544	struct sock *sk = sock->sk;
1545	struct netlink_sock *nlk = nlk_sk(sk);
1546	unsigned int val = 0;
1547	int err;
1548
1549	if (level != SOL_NETLINK)
1550		return -ENOPROTOOPT;
1551
1552	if (optlen >= sizeof(int) &&
 
1553	    get_user(val, (unsigned int __user *)optval))
1554		return -EFAULT;
1555
1556	switch (optname) {
1557	case NETLINK_PKTINFO:
1558		if (val)
1559			nlk->flags |= NETLINK_F_RECV_PKTINFO;
1560		else
1561			nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
1562		err = 0;
1563		break;
1564	case NETLINK_ADD_MEMBERSHIP:
1565	case NETLINK_DROP_MEMBERSHIP: {
1566		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1567			return -EPERM;
1568		err = netlink_realloc_groups(sk);
1569		if (err)
1570			return err;
1571		if (!val || val - 1 >= nlk->ngroups)
1572			return -EINVAL;
1573		if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1574			err = nlk->netlink_bind(sock_net(sk), val);
1575			if (err)
1576				return err;
1577		}
1578		netlink_table_grab();
1579		netlink_update_socket_mc(nlk, val,
1580					 optname == NETLINK_ADD_MEMBERSHIP);
1581		netlink_table_ungrab();
1582		if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1583			nlk->netlink_unbind(sock_net(sk), val);
 
1584
1585		err = 0;
1586		break;
1587	}
1588	case NETLINK_BROADCAST_ERROR:
1589		if (val)
1590			nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
1591		else
1592			nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
1593		err = 0;
1594		break;
1595	case NETLINK_NO_ENOBUFS:
1596		if (val) {
1597			nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
1598			clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1599			wake_up_interruptible(&nlk->wait);
1600		} else {
1601			nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
1602		}
1603		err = 0;
1604		break;
1605	case NETLINK_LISTEN_ALL_NSID:
1606		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
1607			return -EPERM;
 
1608
1609		if (val)
1610			nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
1611		else
1612			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
1613		err = 0;
1614		break;
1615	case NETLINK_CAP_ACK:
1616		if (val)
1617			nlk->flags |= NETLINK_F_CAP_ACK;
1618		else
1619			nlk->flags &= ~NETLINK_F_CAP_ACK;
1620		err = 0;
1621		break;
 
 
1622	default:
1623		err = -ENOPROTOOPT;
1624	}
1625	return err;
1626}
1627
1628static int netlink_getsockopt(struct socket *sock, int level, int optname,
1629			      char __user *optval, int __user *optlen)
1630{
1631	struct sock *sk = sock->sk;
1632	struct netlink_sock *nlk = nlk_sk(sk);
1633	int len, val, err;
1634
1635	if (level != SOL_NETLINK)
1636		return -ENOPROTOOPT;
1637
1638	if (get_user(len, optlen))
1639		return -EFAULT;
1640	if (len < 0)
1641		return -EINVAL;
1642
1643	switch (optname) {
1644	case NETLINK_PKTINFO:
1645		if (len < sizeof(int))
1646			return -EINVAL;
1647		len = sizeof(int);
1648		val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
1649		if (put_user(len, optlen) ||
1650		    put_user(val, optval))
1651			return -EFAULT;
1652		err = 0;
1653		break;
1654	case NETLINK_BROADCAST_ERROR:
1655		if (len < sizeof(int))
1656			return -EINVAL;
1657		len = sizeof(int);
1658		val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
1659		if (put_user(len, optlen) ||
1660		    put_user(val, optval))
1661			return -EFAULT;
1662		err = 0;
1663		break;
1664	case NETLINK_NO_ENOBUFS:
1665		if (len < sizeof(int))
1666			return -EINVAL;
1667		len = sizeof(int);
1668		val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
1669		if (put_user(len, optlen) ||
1670		    put_user(val, optval))
1671			return -EFAULT;
1672		err = 0;
1673		break;
1674	case NETLINK_LIST_MEMBERSHIPS: {
1675		int pos, idx, shift;
1676
1677		err = 0;
1678		netlink_lock_table();
1679		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1680			if (len - pos < sizeof(u32))
1681				break;
1682
1683			idx = pos / sizeof(unsigned long);
1684			shift = (pos % sizeof(unsigned long)) * 8;
1685			if (put_user((u32)(nlk->groups[idx] >> shift),
1686				     (u32 __user *)(optval + pos))) {
1687				err = -EFAULT;
1688				break;
1689			}
1690		}
1691		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
1692			err = -EFAULT;
1693		netlink_unlock_table();
1694		break;
1695	}
1696	case NETLINK_CAP_ACK:
1697		if (len < sizeof(int))
1698			return -EINVAL;
1699		len = sizeof(int);
1700		val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
1701		if (put_user(len, optlen) ||
1702		    put_user(val, optval))
1703			return -EFAULT;
1704		err = 0;
1705		break;
1706	default:
1707		err = -ENOPROTOOPT;
1708	}
1709	return err;
1710}
1711
1712static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1713{
1714	struct nl_pktinfo info;
1715
1716	info.group = NETLINK_CB(skb).dst_group;
1717	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1718}
1719
1720static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
1721					 struct sk_buff *skb)
1722{
1723	if (!NETLINK_CB(skb).nsid_is_set)
1724		return;
1725
1726	put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
1727		 &NETLINK_CB(skb).nsid);
1728}
1729
1730static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1731{
 
1732	struct sock *sk = sock->sk;
1733	struct netlink_sock *nlk = nlk_sk(sk);
1734	DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1735	u32 dst_portid;
1736	u32 dst_group;
1737	struct sk_buff *skb;
1738	int err;
1739	struct scm_cookie scm;
1740	u32 netlink_skb_flags = 0;
1741
1742	if (msg->msg_flags&MSG_OOB)
1743		return -EOPNOTSUPP;
1744
1745	err = scm_send(sock, msg, &scm, true);
 
 
 
1746	if (err < 0)
1747		return err;
1748
1749	if (msg->msg_namelen) {
1750		err = -EINVAL;
1751		if (addr->nl_family != AF_NETLINK)
1752			goto out;
1753		dst_portid = addr->nl_pid;
1754		dst_group = ffs(addr->nl_groups);
1755		err =  -EPERM;
1756		if ((dst_group || dst_portid) &&
1757		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1758			goto out;
1759		netlink_skb_flags |= NETLINK_SKB_DST;
1760	} else {
1761		dst_portid = nlk->dst_portid;
1762		dst_group = nlk->dst_group;
1763	}
1764
1765	if (!nlk->bound) {
1766		err = netlink_autobind(sock);
1767		if (err)
1768			goto out;
1769	} else {
1770		/* Ensure nlk is hashed and visible. */
1771		smp_rmb();
 
 
 
 
1772	}
1773
1774	err = -EMSGSIZE;
1775	if (len > sk->sk_sndbuf - 32)
1776		goto out;
1777	err = -ENOBUFS;
1778	skb = netlink_alloc_large_skb(len, dst_group);
1779	if (skb == NULL)
1780		goto out;
1781
1782	NETLINK_CB(skb).portid	= nlk->portid;
1783	NETLINK_CB(skb).dst_group = dst_group;
1784	NETLINK_CB(skb).creds	= scm.creds;
1785	NETLINK_CB(skb).flags	= netlink_skb_flags;
1786
1787	err = -EFAULT;
1788	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1789		kfree_skb(skb);
1790		goto out;
1791	}
1792
1793	err = security_netlink_send(sk, skb);
1794	if (err) {
1795		kfree_skb(skb);
1796		goto out;
1797	}
1798
1799	if (dst_group) {
1800		atomic_inc(&skb->users);
1801		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1802	}
1803	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1804
1805out:
1806	scm_destroy(&scm);
1807	return err;
1808}
1809
1810static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
1811			   int flags)
1812{
 
1813	struct scm_cookie scm;
1814	struct sock *sk = sock->sk;
1815	struct netlink_sock *nlk = nlk_sk(sk);
1816	int noblock = flags&MSG_DONTWAIT;
1817	size_t copied;
1818	struct sk_buff *skb, *data_skb;
1819	int err, ret;
1820
1821	if (flags&MSG_OOB)
1822		return -EOPNOTSUPP;
1823
1824	copied = 0;
1825
1826	skb = skb_recv_datagram(sk, flags, noblock, &err);
1827	if (skb == NULL)
1828		goto out;
1829
1830	data_skb = skb;
1831
1832#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1833	if (unlikely(skb_shinfo(skb)->frag_list)) {
1834		/*
1835		 * If this skb has a frag_list, then here that means that we
1836		 * will have to use the frag_list skb's data for compat tasks
1837		 * and the regular skb's data for normal (non-compat) tasks.
1838		 *
1839		 * If we need to send the compat skb, assign it to the
1840		 * 'data_skb' variable so that it will be used below for data
1841		 * copying. We keep 'skb' for everything else, including
1842		 * freeing both later.
1843		 */
1844		if (flags & MSG_CMSG_COMPAT)
1845			data_skb = skb_shinfo(skb)->frag_list;
1846	}
1847#endif
1848
1849	/* Record the max length of recvmsg() calls for future allocations */
1850	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
1851	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1852				     SKB_WITH_OVERHEAD(32768));
1853
1854	copied = data_skb->len;
1855	if (len < copied) {
1856		msg->msg_flags |= MSG_TRUNC;
1857		copied = len;
1858	}
1859
1860	skb_reset_transport_header(data_skb);
1861	err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1862
1863	if (msg->msg_name) {
1864		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1865		addr->nl_family = AF_NETLINK;
1866		addr->nl_pad    = 0;
1867		addr->nl_pid	= NETLINK_CB(skb).portid;
1868		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
1869		msg->msg_namelen = sizeof(*addr);
1870	}
1871
1872	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
1873		netlink_cmsg_recv_pktinfo(msg, skb);
1874	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
1875		netlink_cmsg_listen_all_nsid(sk, msg, skb);
1876
1877	memset(&scm, 0, sizeof(scm));
1878	scm.creds = *NETLINK_CREDS(skb);
 
 
 
1879	if (flags & MSG_TRUNC)
1880		copied = data_skb->len;
1881
1882	skb_free_datagram(sk, skb);
1883
1884	if (nlk->cb_running &&
1885	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1886		ret = netlink_dump(sk);
1887		if (ret) {
1888			sk->sk_err = -ret;
1889			sk->sk_error_report(sk);
1890		}
1891	}
1892
1893	scm_recv(sock, msg, &scm, flags);
1894out:
1895	netlink_rcv_wake(sk);
1896	return err ? : copied;
1897}
1898
1899static void netlink_data_ready(struct sock *sk)
1900{
1901	BUG();
1902}
1903
1904/*
1905 *	We export these functions to other modules. They provide a
1906 *	complete set of kernel non-blocking support for message
1907 *	queueing.
1908 */
1909
1910struct sock *
1911__netlink_kernel_create(struct net *net, int unit, struct module *module,
1912			struct netlink_kernel_cfg *cfg)
1913{
1914	struct socket *sock;
1915	struct sock *sk;
1916	struct netlink_sock *nlk;
1917	struct listeners *listeners = NULL;
1918	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1919	unsigned int groups;
1920
1921	BUG_ON(!nl_table);
1922
1923	if (unit < 0 || unit >= MAX_LINKS)
1924		return NULL;
1925
1926	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1927		return NULL;
1928
1929	if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
 
 
 
 
 
 
1930		goto out_sock_release_nosk;
1931
1932	sk = sock->sk;
 
1933
1934	if (!cfg || cfg->groups < 32)
1935		groups = 32;
1936	else
1937		groups = cfg->groups;
1938
1939	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1940	if (!listeners)
1941		goto out_sock_release;
1942
1943	sk->sk_data_ready = netlink_data_ready;
1944	if (cfg && cfg->input)
1945		nlk_sk(sk)->netlink_rcv = cfg->input;
1946
1947	if (netlink_insert(sk, 0))
1948		goto out_sock_release;
1949
1950	nlk = nlk_sk(sk);
1951	nlk->flags |= NETLINK_F_KERNEL_SOCKET;
1952
1953	netlink_table_grab();
1954	if (!nl_table[unit].registered) {
1955		nl_table[unit].groups = groups;
1956		rcu_assign_pointer(nl_table[unit].listeners, listeners);
1957		nl_table[unit].cb_mutex = cb_mutex;
1958		nl_table[unit].module = module;
1959		if (cfg) {
1960			nl_table[unit].bind = cfg->bind;
1961			nl_table[unit].unbind = cfg->unbind;
1962			nl_table[unit].flags = cfg->flags;
1963			if (cfg->compare)
1964				nl_table[unit].compare = cfg->compare;
1965		}
1966		nl_table[unit].registered = 1;
1967	} else {
1968		kfree(listeners);
1969		nl_table[unit].registered++;
1970	}
1971	netlink_table_ungrab();
1972	return sk;
1973
1974out_sock_release:
1975	kfree(listeners);
1976	netlink_kernel_release(sk);
1977	return NULL;
1978
1979out_sock_release_nosk:
1980	sock_release(sock);
1981	return NULL;
1982}
1983EXPORT_SYMBOL(__netlink_kernel_create);
1984
1985void
1986netlink_kernel_release(struct sock *sk)
1987{
1988	if (sk == NULL || sk->sk_socket == NULL)
1989		return;
1990
1991	sock_release(sk->sk_socket);
1992}
1993EXPORT_SYMBOL(netlink_kernel_release);
1994
1995int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1996{
1997	struct listeners *new, *old;
1998	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1999
2000	if (groups < 32)
2001		groups = 32;
2002
2003	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2004		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2005		if (!new)
2006			return -ENOMEM;
2007		old = nl_deref_protected(tbl->listeners);
2008		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2009		rcu_assign_pointer(tbl->listeners, new);
2010
2011		kfree_rcu(old, rcu);
2012	}
2013	tbl->groups = groups;
2014
2015	return 0;
2016}
2017
2018/**
2019 * netlink_change_ngroups - change number of multicast groups
2020 *
2021 * This changes the number of multicast groups that are available
2022 * on a certain netlink family. Note that it is not possible to
2023 * change the number of groups to below 32. Also note that it does
2024 * not implicitly call netlink_clear_multicast_users() when the
2025 * number of groups is reduced.
2026 *
2027 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2028 * @groups: The new number of groups.
2029 */
2030int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2031{
2032	int err;
2033
2034	netlink_table_grab();
2035	err = __netlink_change_ngroups(sk, groups);
2036	netlink_table_ungrab();
2037
2038	return err;
2039}
2040
2041void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2042{
2043	struct sock *sk;
2044	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2045
2046	sk_for_each_bound(sk, &tbl->mc_list)
2047		netlink_update_socket_mc(nlk_sk(sk), group, 0);
2048}
2049
2050struct nlmsghdr *
2051__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2052{
2053	struct nlmsghdr *nlh;
2054	int size = nlmsg_msg_size(len);
2055
2056	nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2057	nlh->nlmsg_type = type;
2058	nlh->nlmsg_len = size;
2059	nlh->nlmsg_flags = flags;
2060	nlh->nlmsg_pid = portid;
2061	nlh->nlmsg_seq = seq;
2062	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2063		memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2064	return nlh;
2065}
2066EXPORT_SYMBOL(__nlmsg_put);
2067
2068/*
2069 * It looks a bit ugly.
2070 * It would be better to create kernel thread.
2071 */
2072
2073static int netlink_dump(struct sock *sk)
2074{
2075	struct netlink_sock *nlk = nlk_sk(sk);
2076	struct netlink_callback *cb;
2077	struct sk_buff *skb = NULL;
2078	struct nlmsghdr *nlh;
2079	struct module *module;
2080	int len, err = -ENOBUFS;
2081	int alloc_min_size;
2082	int alloc_size;
2083
2084	mutex_lock(nlk->cb_mutex);
2085	if (!nlk->cb_running) {
2086		err = -EINVAL;
2087		goto errout_skb;
2088	}
2089
2090	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 
 
 
 
2091		goto errout_skb;
2092
2093	/* NLMSG_GOODSIZE is small to avoid high order allocations being
2094	 * required, but it makes sense to _attempt_ a 16K bytes allocation
2095	 * to reduce number of system calls on dump operations, if user
2096	 * ever provided a big enough buffer.
2097	 */
2098	cb = &nlk->cb;
2099	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2100
2101	if (alloc_min_size < nlk->max_recvmsg_len) {
2102		alloc_size = nlk->max_recvmsg_len;
2103		skb = alloc_skb(alloc_size,
2104				(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
2105				__GFP_NOWARN | __GFP_NORETRY);
2106	}
2107	if (!skb) {
2108		alloc_size = alloc_min_size;
2109		skb = alloc_skb(alloc_size, GFP_KERNEL);
2110	}
2111	if (!skb)
 
 
 
2112		goto errout_skb;
2113
2114	/* Trim skb to allocated size. User is expected to provide buffer as
2115	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2116	 * netlink_recvmsg())). dump will pack as many smaller messages as
2117	 * could fit within the allocated skb. skb is typically allocated
2118	 * with larger space than required (could be as much as near 2x the
2119	 * requested size with align to next power of 2 approach). Allowing
2120	 * dump to use the excess space makes it difficult for a user to have a
2121	 * reasonable static buffer based on the expected largest dump of a
2122	 * single netdev. The outcome is MSG_TRUNC error.
2123	 */
2124	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2125	netlink_skb_set_owner_r(skb, sk);
2126
2127	len = cb->dump(skb, cb);
2128
2129	if (len > 0) {
2130		mutex_unlock(nlk->cb_mutex);
2131
2132		if (sk_filter(sk, skb))
2133			kfree_skb(skb);
2134		else
2135			__netlink_sendskb(sk, skb);
2136		return 0;
2137	}
2138
2139	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2140	if (!nlh)
2141		goto errout_skb;
2142
2143	nl_dump_check_consistent(cb, nlh);
2144
2145	memcpy(nlmsg_data(nlh), &len, sizeof(len));
2146
2147	if (sk_filter(sk, skb))
2148		kfree_skb(skb);
2149	else
2150		__netlink_sendskb(sk, skb);
2151
2152	if (cb->done)
2153		cb->done(cb);
2154
2155	nlk->cb_running = false;
2156	module = cb->module;
2157	skb = cb->skb;
2158	mutex_unlock(nlk->cb_mutex);
2159	module_put(module);
2160	consume_skb(skb);
2161	return 0;
2162
2163errout_skb:
2164	mutex_unlock(nlk->cb_mutex);
2165	kfree_skb(skb);
2166	return err;
2167}
2168
2169int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2170			 const struct nlmsghdr *nlh,
2171			 struct netlink_dump_control *control)
2172{
2173	struct netlink_callback *cb;
2174	struct sock *sk;
2175	struct netlink_sock *nlk;
2176	int ret;
2177
2178	atomic_inc(&skb->users);
 
 
 
 
 
 
 
 
 
2179
2180	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2181	if (sk == NULL) {
2182		ret = -ECONNREFUSED;
2183		goto error_free;
2184	}
2185
2186	nlk = nlk_sk(sk);
2187	mutex_lock(nlk->cb_mutex);
2188	/* A dump is in progress... */
2189	if (nlk->cb_running) {
2190		ret = -EBUSY;
2191		goto error_unlock;
2192	}
2193	/* add reference of module which cb->dump belongs to */
2194	if (!try_module_get(control->module)) {
2195		ret = -EPROTONOSUPPORT;
2196		goto error_unlock;
2197	}
2198
2199	cb = &nlk->cb;
2200	memset(cb, 0, sizeof(*cb));
2201	cb->start = control->start;
2202	cb->dump = control->dump;
2203	cb->done = control->done;
2204	cb->nlh = nlh;
2205	cb->data = control->data;
2206	cb->module = control->module;
2207	cb->min_dump_alloc = control->min_dump_alloc;
2208	cb->skb = skb;
2209
2210	nlk->cb_running = true;
2211
2212	mutex_unlock(nlk->cb_mutex);
2213
2214	if (cb->start)
2215		cb->start(cb);
2216
2217	ret = netlink_dump(sk);
2218	sock_put(sk);
2219
2220	if (ret)
2221		return ret;
2222
2223	/* We successfully started a dump, by returning -EINTR we
2224	 * signal not to send ACK even if it was requested.
2225	 */
2226	return -EINTR;
2227
2228error_unlock:
2229	sock_put(sk);
2230	mutex_unlock(nlk->cb_mutex);
2231error_free:
2232	kfree_skb(skb);
2233	return ret;
2234}
2235EXPORT_SYMBOL(__netlink_dump_start);
2236
2237void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2238{
2239	struct sk_buff *skb;
2240	struct nlmsghdr *rep;
2241	struct nlmsgerr *errmsg;
2242	size_t payload = sizeof(*errmsg);
2243	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2244
2245	/* Error messages get the original request appened, unless the user
2246	 * requests to cap the error message.
2247	 */
2248	if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
2249		payload += nlmsg_len(nlh);
2250
2251	skb = nlmsg_new(payload, GFP_KERNEL);
 
2252	if (!skb) {
2253		struct sock *sk;
2254
2255		sk = netlink_lookup(sock_net(in_skb->sk),
2256				    in_skb->sk->sk_protocol,
2257				    NETLINK_CB(in_skb).portid);
2258		if (sk) {
2259			sk->sk_err = ENOBUFS;
2260			sk->sk_error_report(sk);
2261			sock_put(sk);
2262		}
2263		return;
2264	}
2265
2266	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2267			  NLMSG_ERROR, payload, 0);
2268	errmsg = nlmsg_data(rep);
2269	errmsg->error = err;
2270	memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2271	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2272}
2273EXPORT_SYMBOL(netlink_ack);
2274
2275int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2276						     struct nlmsghdr *))
2277{
2278	struct nlmsghdr *nlh;
2279	int err;
2280
2281	while (skb->len >= nlmsg_total_size(0)) {
2282		int msglen;
2283
2284		nlh = nlmsg_hdr(skb);
2285		err = 0;
2286
2287		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2288			return 0;
2289
2290		/* Only requests are handled by the kernel */
2291		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2292			goto ack;
2293
2294		/* Skip control messages */
2295		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2296			goto ack;
2297
2298		err = cb(skb, nlh);
2299		if (err == -EINTR)
2300			goto skip;
2301
2302ack:
2303		if (nlh->nlmsg_flags & NLM_F_ACK || err)
2304			netlink_ack(skb, nlh, err);
2305
2306skip:
2307		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2308		if (msglen > skb->len)
2309			msglen = skb->len;
2310		skb_pull(skb, msglen);
2311	}
2312
2313	return 0;
2314}
2315EXPORT_SYMBOL(netlink_rcv_skb);
2316
2317/**
2318 * nlmsg_notify - send a notification netlink message
2319 * @sk: netlink socket to use
2320 * @skb: notification message
2321 * @portid: destination netlink portid for reports or 0
2322 * @group: destination multicast group or 0
2323 * @report: 1 to report back, 0 to disable
2324 * @flags: allocation flags
2325 */
2326int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2327		 unsigned int group, int report, gfp_t flags)
2328{
2329	int err = 0;
2330
2331	if (group) {
2332		int exclude_portid = 0;
2333
2334		if (report) {
2335			atomic_inc(&skb->users);
2336			exclude_portid = portid;
2337		}
2338
2339		/* errors reported via destination sk->sk_err, but propagate
2340		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2341		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2342	}
2343
2344	if (report) {
2345		int err2;
2346
2347		err2 = nlmsg_unicast(sk, skb, portid);
2348		if (!err || err == -ESRCH)
2349			err = err2;
2350	}
2351
2352	return err;
2353}
2354EXPORT_SYMBOL(nlmsg_notify);
2355
2356#ifdef CONFIG_PROC_FS
2357struct nl_seq_iter {
2358	struct seq_net_private p;
2359	struct rhashtable_iter hti;
2360	int link;
 
2361};
2362
2363static int netlink_walk_start(struct nl_seq_iter *iter)
2364{
2365	int err;
 
 
 
2366
2367	err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti,
2368				   GFP_KERNEL);
2369	if (err) {
2370		iter->link = MAX_LINKS;
2371		return err;
2372	}
2373
2374	err = rhashtable_walk_start(&iter->hti);
2375	return err == -EAGAIN ? 0 : err;
 
 
 
 
 
 
 
 
 
 
 
 
2376}
2377
2378static void netlink_walk_stop(struct nl_seq_iter *iter)
 
2379{
2380	rhashtable_walk_stop(&iter->hti);
2381	rhashtable_walk_exit(&iter->hti);
2382}
2383
2384static void *__netlink_seq_next(struct seq_file *seq)
2385{
2386	struct nl_seq_iter *iter = seq->private;
2387	struct netlink_sock *nlk;
 
 
2388
2389	do {
2390		for (;;) {
2391			int err;
2392
2393			nlk = rhashtable_walk_next(&iter->hti);
 
2394
2395			if (IS_ERR(nlk)) {
2396				if (PTR_ERR(nlk) == -EAGAIN)
2397					continue;
 
 
 
 
 
2398
2399				return nlk;
2400			}
2401
2402			if (nlk)
2403				break;
2404
2405			netlink_walk_stop(iter);
2406			if (++iter->link >= MAX_LINKS)
2407				return NULL;
2408
2409			err = netlink_walk_start(iter);
2410			if (err)
2411				return ERR_PTR(err);
 
 
 
 
2412		}
2413	} while (sock_net(&nlk->sk) != seq_file_net(seq));
2414
2415	return nlk;
2416}
2417
2418static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2419{
2420	struct nl_seq_iter *iter = seq->private;
2421	void *obj = SEQ_START_TOKEN;
2422	loff_t pos;
2423	int err;
2424
2425	iter->link = 0;
2426
2427	err = netlink_walk_start(iter);
2428	if (err)
2429		return ERR_PTR(err);
2430
2431	for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2432		obj = __netlink_seq_next(seq);
2433
2434	return obj;
2435}
2436
2437static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2438{
2439	++*pos;
2440	return __netlink_seq_next(seq);
2441}
2442
2443static void netlink_seq_stop(struct seq_file *seq, void *v)
 
2444{
2445	struct nl_seq_iter *iter = seq->private;
2446
2447	if (iter->link >= MAX_LINKS)
2448		return;
2449
2450	netlink_walk_stop(iter);
2451}
2452
2453
2454static int netlink_seq_show(struct seq_file *seq, void *v)
2455{
2456	if (v == SEQ_START_TOKEN) {
2457		seq_puts(seq,
2458			 "sk       Eth Pid    Groups   "
2459			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2460	} else {
2461		struct sock *s = v;
2462		struct netlink_sock *nlk = nlk_sk(s);
2463
2464		seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
2465			   s,
2466			   s->sk_protocol,
2467			   nlk->portid,
2468			   nlk->groups ? (u32)nlk->groups[0] : 0,
2469			   sk_rmem_alloc_get(s),
2470			   sk_wmem_alloc_get(s),
2471			   nlk->cb_running,
2472			   atomic_read(&s->sk_refcnt),
2473			   atomic_read(&s->sk_drops),
2474			   sock_i_ino(s)
2475			);
2476
2477	}
2478	return 0;
2479}
2480
2481static const struct seq_operations netlink_seq_ops = {
2482	.start  = netlink_seq_start,
2483	.next   = netlink_seq_next,
2484	.stop   = netlink_seq_stop,
2485	.show   = netlink_seq_show,
2486};
2487
2488
2489static int netlink_seq_open(struct inode *inode, struct file *file)
2490{
2491	return seq_open_net(inode, file, &netlink_seq_ops,
2492				sizeof(struct nl_seq_iter));
2493}
2494
2495static const struct file_operations netlink_seq_fops = {
2496	.owner		= THIS_MODULE,
2497	.open		= netlink_seq_open,
2498	.read		= seq_read,
2499	.llseek		= seq_lseek,
2500	.release	= seq_release_net,
2501};
2502
2503#endif
2504
2505int netlink_register_notifier(struct notifier_block *nb)
2506{
2507	return blocking_notifier_chain_register(&netlink_chain, nb);
2508}
2509EXPORT_SYMBOL(netlink_register_notifier);
2510
2511int netlink_unregister_notifier(struct notifier_block *nb)
2512{
2513	return blocking_notifier_chain_unregister(&netlink_chain, nb);
2514}
2515EXPORT_SYMBOL(netlink_unregister_notifier);
2516
2517static const struct proto_ops netlink_ops = {
2518	.family =	PF_NETLINK,
2519	.owner =	THIS_MODULE,
2520	.release =	netlink_release,
2521	.bind =		netlink_bind,
2522	.connect =	netlink_connect,
2523	.socketpair =	sock_no_socketpair,
2524	.accept =	sock_no_accept,
2525	.getname =	netlink_getname,
2526	.poll =		datagram_poll,
2527	.ioctl =	netlink_ioctl,
2528	.listen =	sock_no_listen,
2529	.shutdown =	sock_no_shutdown,
2530	.setsockopt =	netlink_setsockopt,
2531	.getsockopt =	netlink_getsockopt,
2532	.sendmsg =	netlink_sendmsg,
2533	.recvmsg =	netlink_recvmsg,
2534	.mmap =		sock_no_mmap,
2535	.sendpage =	sock_no_sendpage,
2536};
2537
2538static const struct net_proto_family netlink_family_ops = {
2539	.family = PF_NETLINK,
2540	.create = netlink_create,
2541	.owner	= THIS_MODULE,	/* for consistency 8) */
2542};
2543
2544static int __net_init netlink_net_init(struct net *net)
2545{
2546#ifdef CONFIG_PROC_FS
2547	if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2548		return -ENOMEM;
2549#endif
2550	return 0;
2551}
2552
2553static void __net_exit netlink_net_exit(struct net *net)
2554{
2555#ifdef CONFIG_PROC_FS
2556	remove_proc_entry("netlink", net->proc_net);
2557#endif
2558}
2559
2560static void __init netlink_add_usersock_entry(void)
2561{
2562	struct listeners *listeners;
2563	int groups = 32;
2564
2565	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2566	if (!listeners)
2567		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2568
2569	netlink_table_grab();
2570
2571	nl_table[NETLINK_USERSOCK].groups = groups;
2572	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2573	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2574	nl_table[NETLINK_USERSOCK].registered = 1;
2575	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2576
2577	netlink_table_ungrab();
2578}
2579
2580static struct pernet_operations __net_initdata netlink_net_ops = {
2581	.init = netlink_net_init,
2582	.exit = netlink_net_exit,
2583};
2584
2585static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2586{
2587	const struct netlink_sock *nlk = data;
2588	struct netlink_compare_arg arg;
2589
2590	netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2591	return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2592}
2593
2594static const struct rhashtable_params netlink_rhashtable_params = {
2595	.head_offset = offsetof(struct netlink_sock, node),
2596	.key_len = netlink_compare_arg_len,
2597	.obj_hashfn = netlink_hash,
2598	.obj_cmpfn = netlink_compare,
2599	.automatic_shrinking = true,
2600};
2601
2602static int __init netlink_proto_init(void)
2603{
2604	int i;
 
 
2605	int err = proto_register(&netlink_proto, 0);
2606
2607	if (err != 0)
2608		goto out;
2609
2610	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2611
2612	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2613	if (!nl_table)
2614		goto panic;
2615
 
 
 
 
 
 
 
 
 
2616	for (i = 0; i < MAX_LINKS; i++) {
2617		if (rhashtable_init(&nl_table[i].hash,
2618				    &netlink_rhashtable_params) < 0) {
2619			while (--i > 0)
2620				rhashtable_destroy(&nl_table[i].hash);
 
 
 
2621			kfree(nl_table);
2622			goto panic;
2623		}
 
 
 
 
 
 
2624	}
2625
2626	INIT_LIST_HEAD(&netlink_tap_all);
2627
2628	netlink_add_usersock_entry();
2629
2630	sock_register(&netlink_family_ops);
2631	register_pernet_subsys(&netlink_net_ops);
2632	/* The netlink device handler may be needed early. */
2633	rtnetlink_init();
2634out:
2635	return err;
2636panic:
2637	panic("netlink_init: Cannot allocate nl_table\n");
2638}
2639
2640core_initcall(netlink_proto_init);
v3.15
   1/*
   2 * NETLINK      Kernel-user communication protocol.
   3 *
   4 * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
   6 * 				Patrick McHardy <kaber@trash.net>
   7 *
   8 *		This program is free software; you can redistribute it and/or
   9 *		modify it under the terms of the GNU General Public License
  10 *		as published by the Free Software Foundation; either version
  11 *		2 of the License, or (at your option) any later version.
  12 *
  13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
  14 *                               added netlink_proto_exit
  15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
  16 * 				 use nlk_sk, as sk->protinfo is on a diet 8)
  17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
  18 * 				 - inc module use count of module that owns
  19 * 				   the kernel socket in case userspace opens
  20 * 				   socket of same protocol
  21 * 				 - remove all module support, since netlink is
  22 * 				   mandatory if CONFIG_NET=y these days
  23 */
  24
  25#include <linux/module.h>
  26
  27#include <linux/capability.h>
  28#include <linux/kernel.h>
  29#include <linux/init.h>
  30#include <linux/signal.h>
  31#include <linux/sched.h>
  32#include <linux/errno.h>
  33#include <linux/string.h>
  34#include <linux/stat.h>
  35#include <linux/socket.h>
  36#include <linux/un.h>
  37#include <linux/fcntl.h>
  38#include <linux/termios.h>
  39#include <linux/sockios.h>
  40#include <linux/net.h>
  41#include <linux/fs.h>
  42#include <linux/slab.h>
  43#include <asm/uaccess.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/proc_fs.h>
  48#include <linux/seq_file.h>
  49#include <linux/notifier.h>
  50#include <linux/security.h>
  51#include <linux/jhash.h>
  52#include <linux/jiffies.h>
  53#include <linux/random.h>
  54#include <linux/bitops.h>
  55#include <linux/mm.h>
  56#include <linux/types.h>
  57#include <linux/audit.h>
  58#include <linux/mutex.h>
  59#include <linux/vmalloc.h>
  60#include <linux/if_arp.h>
 
  61#include <asm/cacheflush.h>
 
 
  62
  63#include <net/net_namespace.h>
  64#include <net/sock.h>
  65#include <net/scm.h>
  66#include <net/netlink.h>
  67
  68#include "af_netlink.h"
  69
  70struct listeners {
  71	struct rcu_head		rcu;
  72	unsigned long		masks[0];
  73};
  74
  75/* state bits */
  76#define NETLINK_CONGESTED	0x0
  77
  78/* flags */
  79#define NETLINK_KERNEL_SOCKET	0x1
  80#define NETLINK_RECV_PKTINFO	0x2
  81#define NETLINK_BROADCAST_SEND_ERROR	0x4
  82#define NETLINK_RECV_NO_ENOBUFS	0x8
 
 
  83
  84static inline int netlink_is_kernel(struct sock *sk)
  85{
  86	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
  87}
  88
  89struct netlink_table *nl_table;
  90EXPORT_SYMBOL_GPL(nl_table);
  91
  92static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
  93
  94static int netlink_dump(struct sock *sk);
  95static void netlink_skb_destructor(struct sk_buff *skb);
  96
 
 
 
 
 
 
 
 
  97DEFINE_RWLOCK(nl_table_lock);
  98EXPORT_SYMBOL_GPL(nl_table_lock);
  99static atomic_t nl_table_users = ATOMIC_INIT(0);
 100
 101#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
 102
 103static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 104
 105static DEFINE_SPINLOCK(netlink_tap_lock);
 106static struct list_head netlink_tap_all __read_mostly;
 107
 
 
 108static inline u32 netlink_group_mask(u32 group)
 109{
 110	return group ? 1 << (group - 1) : 0;
 111}
 112
 113static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
 
 114{
 115	return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
 
 
 
 
 
 
 
 
 
 
 
 
 116}
 117
 118int netlink_add_tap(struct netlink_tap *nt)
 119{
 120	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
 121		return -EINVAL;
 122
 123	spin_lock(&netlink_tap_lock);
 124	list_add_rcu(&nt->list, &netlink_tap_all);
 125	spin_unlock(&netlink_tap_lock);
 126
 127	if (nt->module)
 128		__module_get(nt->module);
 129
 130	return 0;
 131}
 132EXPORT_SYMBOL_GPL(netlink_add_tap);
 133
 134static int __netlink_remove_tap(struct netlink_tap *nt)
 135{
 136	bool found = false;
 137	struct netlink_tap *tmp;
 138
 139	spin_lock(&netlink_tap_lock);
 140
 141	list_for_each_entry(tmp, &netlink_tap_all, list) {
 142		if (nt == tmp) {
 143			list_del_rcu(&nt->list);
 144			found = true;
 145			goto out;
 146		}
 147	}
 148
 149	pr_warn("__netlink_remove_tap: %p not found\n", nt);
 150out:
 151	spin_unlock(&netlink_tap_lock);
 152
 153	if (found && nt->module)
 154		module_put(nt->module);
 155
 156	return found ? 0 : -ENODEV;
 157}
 158
 159int netlink_remove_tap(struct netlink_tap *nt)
 160{
 161	int ret;
 162
 163	ret = __netlink_remove_tap(nt);
 164	synchronize_net();
 165
 166	return ret;
 167}
 168EXPORT_SYMBOL_GPL(netlink_remove_tap);
 169
 170static bool netlink_filter_tap(const struct sk_buff *skb)
 171{
 172	struct sock *sk = skb->sk;
 173	bool pass = false;
 174
 175	/* We take the more conservative approach and
 176	 * whitelist socket protocols that may pass.
 177	 */
 178	switch (sk->sk_protocol) {
 179	case NETLINK_ROUTE:
 180	case NETLINK_USERSOCK:
 181	case NETLINK_SOCK_DIAG:
 182	case NETLINK_NFLOG:
 183	case NETLINK_XFRM:
 184	case NETLINK_FIB_LOOKUP:
 185	case NETLINK_NETFILTER:
 186	case NETLINK_GENERIC:
 187		pass = true;
 188		break;
 189	}
 190
 191	return pass;
 192}
 193
 194static int __netlink_deliver_tap_skb(struct sk_buff *skb,
 195				     struct net_device *dev)
 196{
 197	struct sk_buff *nskb;
 198	struct sock *sk = skb->sk;
 199	int ret = -ENOMEM;
 200
 201	dev_hold(dev);
 202	nskb = skb_clone(skb, GFP_ATOMIC);
 
 
 
 
 203	if (nskb) {
 204		nskb->dev = dev;
 205		nskb->protocol = htons((u16) sk->sk_protocol);
 206		nskb->pkt_type = netlink_is_kernel(sk) ?
 207				 PACKET_KERNEL : PACKET_USER;
 208
 209		ret = dev_queue_xmit(nskb);
 210		if (unlikely(ret > 0))
 211			ret = net_xmit_errno(ret);
 212	}
 213
 214	dev_put(dev);
 215	return ret;
 216}
 217
 218static void __netlink_deliver_tap(struct sk_buff *skb)
 219{
 220	int ret;
 221	struct netlink_tap *tmp;
 222
 223	if (!netlink_filter_tap(skb))
 224		return;
 225
 226	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
 227		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
 228		if (unlikely(ret))
 229			break;
 230	}
 231}
 232
 233static void netlink_deliver_tap(struct sk_buff *skb)
 234{
 235	rcu_read_lock();
 236
 237	if (unlikely(!list_empty(&netlink_tap_all)))
 238		__netlink_deliver_tap(skb);
 239
 240	rcu_read_unlock();
 241}
 242
 243static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
 244				       struct sk_buff *skb)
 245{
 246	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
 247		netlink_deliver_tap(skb);
 248}
 249
 250static void netlink_overrun(struct sock *sk)
 251{
 252	struct netlink_sock *nlk = nlk_sk(sk);
 253
 254	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
 255		if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
 
 256			sk->sk_err = ENOBUFS;
 257			sk->sk_error_report(sk);
 258		}
 259	}
 260	atomic_inc(&sk->sk_drops);
 261}
 262
 263static void netlink_rcv_wake(struct sock *sk)
 264{
 265	struct netlink_sock *nlk = nlk_sk(sk);
 266
 267	if (skb_queue_empty(&sk->sk_receive_queue))
 268		clear_bit(NETLINK_CONGESTED, &nlk->state);
 269	if (!test_bit(NETLINK_CONGESTED, &nlk->state))
 270		wake_up_interruptible(&nlk->wait);
 271}
 272
 273#ifdef CONFIG_NETLINK_MMAP
 274static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
 275{
 276	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
 277}
 278
 279static bool netlink_rx_is_mmaped(struct sock *sk)
 280{
 281	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
 282}
 283
 284static bool netlink_tx_is_mmaped(struct sock *sk)
 285{
 286	return nlk_sk(sk)->tx_ring.pg_vec != NULL;
 287}
 288
 289static __pure struct page *pgvec_to_page(const void *addr)
 290{
 291	if (is_vmalloc_addr(addr))
 292		return vmalloc_to_page(addr);
 293	else
 294		return virt_to_page(addr);
 295}
 296
 297static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
 298{
 299	unsigned int i;
 300
 301	for (i = 0; i < len; i++) {
 302		if (pg_vec[i] != NULL) {
 303			if (is_vmalloc_addr(pg_vec[i]))
 304				vfree(pg_vec[i]);
 305			else
 306				free_pages((unsigned long)pg_vec[i], order);
 307		}
 308	}
 309	kfree(pg_vec);
 310}
 311
 312static void *alloc_one_pg_vec_page(unsigned long order)
 313{
 314	void *buffer;
 315	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
 316			  __GFP_NOWARN | __GFP_NORETRY;
 317
 318	buffer = (void *)__get_free_pages(gfp_flags, order);
 319	if (buffer != NULL)
 320		return buffer;
 321
 322	buffer = vzalloc((1 << order) * PAGE_SIZE);
 323	if (buffer != NULL)
 324		return buffer;
 325
 326	gfp_flags &= ~__GFP_NORETRY;
 327	return (void *)__get_free_pages(gfp_flags, order);
 328}
 329
 330static void **alloc_pg_vec(struct netlink_sock *nlk,
 331			   struct nl_mmap_req *req, unsigned int order)
 332{
 333	unsigned int block_nr = req->nm_block_nr;
 334	unsigned int i;
 335	void **pg_vec;
 336
 337	pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
 338	if (pg_vec == NULL)
 339		return NULL;
 340
 341	for (i = 0; i < block_nr; i++) {
 342		pg_vec[i] = alloc_one_pg_vec_page(order);
 343		if (pg_vec[i] == NULL)
 344			goto err1;
 345	}
 346
 347	return pg_vec;
 348err1:
 349	free_pg_vec(pg_vec, order, block_nr);
 350	return NULL;
 351}
 352
 353static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
 354			    bool closing, bool tx_ring)
 355{
 356	struct netlink_sock *nlk = nlk_sk(sk);
 357	struct netlink_ring *ring;
 358	struct sk_buff_head *queue;
 359	void **pg_vec = NULL;
 360	unsigned int order = 0;
 361	int err;
 362
 363	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
 364	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
 365
 366	if (!closing) {
 367		if (atomic_read(&nlk->mapped))
 368			return -EBUSY;
 369		if (atomic_read(&ring->pending))
 370			return -EBUSY;
 371	}
 372
 373	if (req->nm_block_nr) {
 374		if (ring->pg_vec != NULL)
 375			return -EBUSY;
 376
 377		if ((int)req->nm_block_size <= 0)
 378			return -EINVAL;
 379		if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
 380			return -EINVAL;
 381		if (req->nm_frame_size < NL_MMAP_HDRLEN)
 382			return -EINVAL;
 383		if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
 384			return -EINVAL;
 385
 386		ring->frames_per_block = req->nm_block_size /
 387					 req->nm_frame_size;
 388		if (ring->frames_per_block == 0)
 389			return -EINVAL;
 390		if (ring->frames_per_block * req->nm_block_nr !=
 391		    req->nm_frame_nr)
 392			return -EINVAL;
 393
 394		order = get_order(req->nm_block_size);
 395		pg_vec = alloc_pg_vec(nlk, req, order);
 396		if (pg_vec == NULL)
 397			return -ENOMEM;
 398	} else {
 399		if (req->nm_frame_nr)
 400			return -EINVAL;
 401	}
 402
 403	err = -EBUSY;
 404	mutex_lock(&nlk->pg_vec_lock);
 405	if (closing || atomic_read(&nlk->mapped) == 0) {
 406		err = 0;
 407		spin_lock_bh(&queue->lock);
 408
 409		ring->frame_max		= req->nm_frame_nr - 1;
 410		ring->head		= 0;
 411		ring->frame_size	= req->nm_frame_size;
 412		ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
 413
 414		swap(ring->pg_vec_len, req->nm_block_nr);
 415		swap(ring->pg_vec_order, order);
 416		swap(ring->pg_vec, pg_vec);
 417
 418		__skb_queue_purge(queue);
 419		spin_unlock_bh(&queue->lock);
 420
 421		WARN_ON(atomic_read(&nlk->mapped));
 422	}
 423	mutex_unlock(&nlk->pg_vec_lock);
 424
 425	if (pg_vec)
 426		free_pg_vec(pg_vec, order, req->nm_block_nr);
 427	return err;
 428}
 429
 430static void netlink_mm_open(struct vm_area_struct *vma)
 431{
 432	struct file *file = vma->vm_file;
 433	struct socket *sock = file->private_data;
 434	struct sock *sk = sock->sk;
 435
 436	if (sk)
 437		atomic_inc(&nlk_sk(sk)->mapped);
 438}
 439
 440static void netlink_mm_close(struct vm_area_struct *vma)
 441{
 442	struct file *file = vma->vm_file;
 443	struct socket *sock = file->private_data;
 444	struct sock *sk = sock->sk;
 445
 446	if (sk)
 447		atomic_dec(&nlk_sk(sk)->mapped);
 448}
 449
 450static const struct vm_operations_struct netlink_mmap_ops = {
 451	.open	= netlink_mm_open,
 452	.close	= netlink_mm_close,
 453};
 454
 455static int netlink_mmap(struct file *file, struct socket *sock,
 456			struct vm_area_struct *vma)
 457{
 458	struct sock *sk = sock->sk;
 459	struct netlink_sock *nlk = nlk_sk(sk);
 460	struct netlink_ring *ring;
 461	unsigned long start, size, expected;
 462	unsigned int i;
 463	int err = -EINVAL;
 464
 465	if (vma->vm_pgoff)
 466		return -EINVAL;
 467
 468	mutex_lock(&nlk->pg_vec_lock);
 469
 470	expected = 0;
 471	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
 472		if (ring->pg_vec == NULL)
 473			continue;
 474		expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
 475	}
 476
 477	if (expected == 0)
 478		goto out;
 479
 480	size = vma->vm_end - vma->vm_start;
 481	if (size != expected)
 482		goto out;
 483
 484	start = vma->vm_start;
 485	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
 486		if (ring->pg_vec == NULL)
 487			continue;
 488
 489		for (i = 0; i < ring->pg_vec_len; i++) {
 490			struct page *page;
 491			void *kaddr = ring->pg_vec[i];
 492			unsigned int pg_num;
 493
 494			for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
 495				page = pgvec_to_page(kaddr);
 496				err = vm_insert_page(vma, start, page);
 497				if (err < 0)
 498					goto out;
 499				start += PAGE_SIZE;
 500				kaddr += PAGE_SIZE;
 501			}
 502		}
 503	}
 504
 505	atomic_inc(&nlk->mapped);
 506	vma->vm_ops = &netlink_mmap_ops;
 507	err = 0;
 508out:
 509	mutex_unlock(&nlk->pg_vec_lock);
 510	return err;
 511}
 512
 513static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
 514{
 515#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 516	struct page *p_start, *p_end;
 517
 518	/* First page is flushed through netlink_{get,set}_status */
 519	p_start = pgvec_to_page(hdr + PAGE_SIZE);
 520	p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
 521	while (p_start <= p_end) {
 522		flush_dcache_page(p_start);
 523		p_start++;
 524	}
 525#endif
 526}
 527
 528static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
 529{
 530	smp_rmb();
 531	flush_dcache_page(pgvec_to_page(hdr));
 532	return hdr->nm_status;
 533}
 534
 535static void netlink_set_status(struct nl_mmap_hdr *hdr,
 536			       enum nl_mmap_status status)
 537{
 538	hdr->nm_status = status;
 539	flush_dcache_page(pgvec_to_page(hdr));
 540	smp_wmb();
 541}
 542
 543static struct nl_mmap_hdr *
 544__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
 545{
 546	unsigned int pg_vec_pos, frame_off;
 547
 548	pg_vec_pos = pos / ring->frames_per_block;
 549	frame_off  = pos % ring->frames_per_block;
 550
 551	return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
 552}
 553
 554static struct nl_mmap_hdr *
 555netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
 556		     enum nl_mmap_status status)
 557{
 558	struct nl_mmap_hdr *hdr;
 559
 560	hdr = __netlink_lookup_frame(ring, pos);
 561	if (netlink_get_status(hdr) != status)
 562		return NULL;
 563
 564	return hdr;
 565}
 566
 567static struct nl_mmap_hdr *
 568netlink_current_frame(const struct netlink_ring *ring,
 569		      enum nl_mmap_status status)
 570{
 571	return netlink_lookup_frame(ring, ring->head, status);
 572}
 573
 574static struct nl_mmap_hdr *
 575netlink_previous_frame(const struct netlink_ring *ring,
 576		       enum nl_mmap_status status)
 577{
 578	unsigned int prev;
 579
 580	prev = ring->head ? ring->head - 1 : ring->frame_max;
 581	return netlink_lookup_frame(ring, prev, status);
 582}
 583
 584static void netlink_increment_head(struct netlink_ring *ring)
 585{
 586	ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
 587}
 588
 589static void netlink_forward_ring(struct netlink_ring *ring)
 590{
 591	unsigned int head = ring->head, pos = head;
 592	const struct nl_mmap_hdr *hdr;
 593
 594	do {
 595		hdr = __netlink_lookup_frame(ring, pos);
 596		if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
 597			break;
 598		if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
 599			break;
 600		netlink_increment_head(ring);
 601	} while (ring->head != head);
 602}
 603
 604static bool netlink_dump_space(struct netlink_sock *nlk)
 605{
 606	struct netlink_ring *ring = &nlk->rx_ring;
 607	struct nl_mmap_hdr *hdr;
 608	unsigned int n;
 609
 610	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
 611	if (hdr == NULL)
 612		return false;
 613
 614	n = ring->head + ring->frame_max / 2;
 615	if (n > ring->frame_max)
 616		n -= ring->frame_max;
 617
 618	hdr = __netlink_lookup_frame(ring, n);
 619
 620	return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
 621}
 622
 623static unsigned int netlink_poll(struct file *file, struct socket *sock,
 624				 poll_table *wait)
 625{
 626	struct sock *sk = sock->sk;
 627	struct netlink_sock *nlk = nlk_sk(sk);
 628	unsigned int mask;
 629	int err;
 630
 631	if (nlk->rx_ring.pg_vec != NULL) {
 632		/* Memory mapped sockets don't call recvmsg(), so flow control
 633		 * for dumps is performed here. A dump is allowed to continue
 634		 * if at least half the ring is unused.
 635		 */
 636		while (nlk->cb_running && netlink_dump_space(nlk)) {
 637			err = netlink_dump(sk);
 638			if (err < 0) {
 639				sk->sk_err = err;
 640				sk->sk_error_report(sk);
 641				break;
 642			}
 643		}
 644		netlink_rcv_wake(sk);
 645	}
 646
 647	mask = datagram_poll(file, sock, wait);
 648
 649	spin_lock_bh(&sk->sk_receive_queue.lock);
 650	if (nlk->rx_ring.pg_vec) {
 651		netlink_forward_ring(&nlk->rx_ring);
 652		if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
 653			mask |= POLLIN | POLLRDNORM;
 654	}
 655	spin_unlock_bh(&sk->sk_receive_queue.lock);
 656
 657	spin_lock_bh(&sk->sk_write_queue.lock);
 658	if (nlk->tx_ring.pg_vec) {
 659		if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
 660			mask |= POLLOUT | POLLWRNORM;
 661	}
 662	spin_unlock_bh(&sk->sk_write_queue.lock);
 663
 664	return mask;
 665}
 666
 667static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
 668{
 669	return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
 670}
 671
 672static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
 673				   struct netlink_ring *ring,
 674				   struct nl_mmap_hdr *hdr)
 675{
 676	unsigned int size;
 677	void *data;
 678
 679	size = ring->frame_size - NL_MMAP_HDRLEN;
 680	data = (void *)hdr + NL_MMAP_HDRLEN;
 681
 682	skb->head	= data;
 683	skb->data	= data;
 684	skb_reset_tail_pointer(skb);
 685	skb->end	= skb->tail + size;
 686	skb->len	= 0;
 687
 688	skb->destructor	= netlink_skb_destructor;
 689	NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
 690	NETLINK_CB(skb).sk = sk;
 691}
 692
 693static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
 694				u32 dst_portid, u32 dst_group,
 695				struct sock_iocb *siocb)
 696{
 697	struct netlink_sock *nlk = nlk_sk(sk);
 698	struct netlink_ring *ring;
 699	struct nl_mmap_hdr *hdr;
 700	struct sk_buff *skb;
 701	unsigned int maxlen;
 702	bool excl = true;
 703	int err = 0, len = 0;
 704
 705	/* Netlink messages are validated by the receiver before processing.
 706	 * In order to avoid userspace changing the contents of the message
 707	 * after validation, the socket and the ring may only be used by a
 708	 * single process, otherwise we fall back to copying.
 709	 */
 710	if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
 711	    atomic_read(&nlk->mapped) > 1)
 712		excl = false;
 713
 714	mutex_lock(&nlk->pg_vec_lock);
 715
 716	ring   = &nlk->tx_ring;
 717	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
 718
 719	do {
 720		hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
 721		if (hdr == NULL) {
 722			if (!(msg->msg_flags & MSG_DONTWAIT) &&
 723			    atomic_read(&nlk->tx_ring.pending))
 724				schedule();
 725			continue;
 726		}
 727		if (hdr->nm_len > maxlen) {
 728			err = -EINVAL;
 729			goto out;
 730		}
 731
 732		netlink_frame_flush_dcache(hdr);
 733
 734		if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
 735			skb = alloc_skb_head(GFP_KERNEL);
 736			if (skb == NULL) {
 737				err = -ENOBUFS;
 738				goto out;
 739			}
 740			sock_hold(sk);
 741			netlink_ring_setup_skb(skb, sk, ring, hdr);
 742			NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
 743			__skb_put(skb, hdr->nm_len);
 744			netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
 745			atomic_inc(&ring->pending);
 746		} else {
 747			skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
 748			if (skb == NULL) {
 749				err = -ENOBUFS;
 750				goto out;
 751			}
 752			__skb_put(skb, hdr->nm_len);
 753			memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
 754			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
 755		}
 756
 757		netlink_increment_head(ring);
 758
 759		NETLINK_CB(skb).portid	  = nlk->portid;
 760		NETLINK_CB(skb).dst_group = dst_group;
 761		NETLINK_CB(skb).creds	  = siocb->scm->creds;
 762
 763		err = security_netlink_send(sk, skb);
 764		if (err) {
 765			kfree_skb(skb);
 766			goto out;
 767		}
 768
 769		if (unlikely(dst_group)) {
 770			atomic_inc(&skb->users);
 771			netlink_broadcast(sk, skb, dst_portid, dst_group,
 772					  GFP_KERNEL);
 773		}
 774		err = netlink_unicast(sk, skb, dst_portid,
 775				      msg->msg_flags & MSG_DONTWAIT);
 776		if (err < 0)
 777			goto out;
 778		len += err;
 779
 780	} while (hdr != NULL ||
 781		 (!(msg->msg_flags & MSG_DONTWAIT) &&
 782		  atomic_read(&nlk->tx_ring.pending)));
 783
 784	if (len > 0)
 785		err = len;
 786out:
 787	mutex_unlock(&nlk->pg_vec_lock);
 788	return err;
 789}
 790
 791static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
 792{
 793	struct nl_mmap_hdr *hdr;
 794
 795	hdr = netlink_mmap_hdr(skb);
 796	hdr->nm_len	= skb->len;
 797	hdr->nm_group	= NETLINK_CB(skb).dst_group;
 798	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
 799	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
 800	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
 801	netlink_frame_flush_dcache(hdr);
 802	netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
 803
 804	NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
 805	kfree_skb(skb);
 806}
 807
 808static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
 809{
 810	struct netlink_sock *nlk = nlk_sk(sk);
 811	struct netlink_ring *ring = &nlk->rx_ring;
 812	struct nl_mmap_hdr *hdr;
 813
 814	spin_lock_bh(&sk->sk_receive_queue.lock);
 815	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
 816	if (hdr == NULL) {
 817		spin_unlock_bh(&sk->sk_receive_queue.lock);
 818		kfree_skb(skb);
 819		netlink_overrun(sk);
 820		return;
 821	}
 822	netlink_increment_head(ring);
 823	__skb_queue_tail(&sk->sk_receive_queue, skb);
 824	spin_unlock_bh(&sk->sk_receive_queue.lock);
 825
 826	hdr->nm_len	= skb->len;
 827	hdr->nm_group	= NETLINK_CB(skb).dst_group;
 828	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
 829	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
 830	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
 831	netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
 832}
 833
 834#else /* CONFIG_NETLINK_MMAP */
 835#define netlink_skb_is_mmaped(skb)	false
 836#define netlink_rx_is_mmaped(sk)	false
 837#define netlink_tx_is_mmaped(sk)	false
 838#define netlink_mmap			sock_no_mmap
 839#define netlink_poll			datagram_poll
 840#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb)	0
 841#endif /* CONFIG_NETLINK_MMAP */
 842
 843static void netlink_skb_destructor(struct sk_buff *skb)
 844{
 845#ifdef CONFIG_NETLINK_MMAP
 846	struct nl_mmap_hdr *hdr;
 847	struct netlink_ring *ring;
 848	struct sock *sk;
 849
 850	/* If a packet from the kernel to userspace was freed because of an
 851	 * error without being delivered to userspace, the kernel must reset
 852	 * the status. In the direction userspace to kernel, the status is
 853	 * always reset here after the packet was processed and freed.
 854	 */
 855	if (netlink_skb_is_mmaped(skb)) {
 856		hdr = netlink_mmap_hdr(skb);
 857		sk = NETLINK_CB(skb).sk;
 858
 859		if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
 860			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
 861			ring = &nlk_sk(sk)->tx_ring;
 862		} else {
 863			if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
 864				hdr->nm_len = 0;
 865				netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
 866			}
 867			ring = &nlk_sk(sk)->rx_ring;
 868		}
 869
 870		WARN_ON(atomic_read(&ring->pending) == 0);
 871		atomic_dec(&ring->pending);
 872		sock_put(sk);
 873
 874		skb->head = NULL;
 875	}
 876#endif
 877	if (is_vmalloc_addr(skb->head)) {
 878		if (!skb->cloned ||
 879		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
 880			vfree(skb->head);
 881
 882		skb->head = NULL;
 883	}
 884	if (skb->sk != NULL)
 885		sock_rfree(skb);
 886}
 887
 888static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
 889{
 890	WARN_ON(skb->sk != NULL);
 891	skb->sk = sk;
 892	skb->destructor = netlink_skb_destructor;
 893	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 894	sk_mem_charge(sk, skb->truesize);
 895}
 896
 897static void netlink_sock_destruct(struct sock *sk)
 898{
 899	struct netlink_sock *nlk = nlk_sk(sk);
 900
 901	if (nlk->cb_running) {
 902		if (nlk->cb.done)
 903			nlk->cb.done(&nlk->cb);
 904
 905		module_put(nlk->cb.module);
 906		kfree_skb(nlk->cb.skb);
 907	}
 908
 909	skb_queue_purge(&sk->sk_receive_queue);
 910#ifdef CONFIG_NETLINK_MMAP
 911	if (1) {
 912		struct nl_mmap_req req;
 913
 914		memset(&req, 0, sizeof(req));
 915		if (nlk->rx_ring.pg_vec)
 916			netlink_set_ring(sk, &req, true, false);
 917		memset(&req, 0, sizeof(req));
 918		if (nlk->tx_ring.pg_vec)
 919			netlink_set_ring(sk, &req, true, true);
 920	}
 921#endif /* CONFIG_NETLINK_MMAP */
 922
 923	if (!sock_flag(sk, SOCK_DEAD)) {
 924		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
 925		return;
 926	}
 927
 928	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 929	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
 930	WARN_ON(nlk_sk(sk)->groups);
 931}
 932
 
 
 
 
 
 
 
 
 933/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
 934 * SMP. Look, when several writers sleep and reader wakes them up, all but one
 935 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 936 * this, _but_ remember, it adds useless work on UP machines.
 937 */
 938
 939void netlink_table_grab(void)
 940	__acquires(nl_table_lock)
 941{
 942	might_sleep();
 943
 944	write_lock_irq(&nl_table_lock);
 945
 946	if (atomic_read(&nl_table_users)) {
 947		DECLARE_WAITQUEUE(wait, current);
 948
 949		add_wait_queue_exclusive(&nl_table_wait, &wait);
 950		for (;;) {
 951			set_current_state(TASK_UNINTERRUPTIBLE);
 952			if (atomic_read(&nl_table_users) == 0)
 953				break;
 954			write_unlock_irq(&nl_table_lock);
 955			schedule();
 956			write_lock_irq(&nl_table_lock);
 957		}
 958
 959		__set_current_state(TASK_RUNNING);
 960		remove_wait_queue(&nl_table_wait, &wait);
 961	}
 962}
 963
 964void netlink_table_ungrab(void)
 965	__releases(nl_table_lock)
 966{
 967	write_unlock_irq(&nl_table_lock);
 968	wake_up(&nl_table_wait);
 969}
 970
 971static inline void
 972netlink_lock_table(void)
 973{
 974	/* read_lock() synchronizes us to netlink_table_grab */
 975
 976	read_lock(&nl_table_lock);
 977	atomic_inc(&nl_table_users);
 978	read_unlock(&nl_table_lock);
 979}
 980
 981static inline void
 982netlink_unlock_table(void)
 983{
 984	if (atomic_dec_and_test(&nl_table_users))
 985		wake_up(&nl_table_wait);
 986}
 987
 988static bool netlink_compare(struct net *net, struct sock *sk)
 989{
 990	return net_eq(sock_net(sk), net);
 991}
 
 
 
 
 
 992
 993static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 
 994{
 995	struct netlink_table *table = &nl_table[protocol];
 996	struct nl_portid_hash *hash = &table->hash;
 997	struct hlist_head *head;
 998	struct sock *sk;
 999
1000	read_lock(&nl_table_lock);
1001	head = nl_portid_hashfn(hash, portid);
1002	sk_for_each(sk, head) {
1003		if (table->compare(net, sk) &&
1004		    (nlk_sk(sk)->portid == portid)) {
1005			sock_hold(sk);
1006			goto found;
1007		}
1008	}
1009	sk = NULL;
1010found:
1011	read_unlock(&nl_table_lock);
1012	return sk;
1013}
1014
1015static struct hlist_head *nl_portid_hash_zalloc(size_t size)
 
1016{
1017	if (size <= PAGE_SIZE)
1018		return kzalloc(size, GFP_ATOMIC);
1019	else
1020		return (struct hlist_head *)
1021			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
1022					 get_order(size));
1023}
1024
1025static void nl_portid_hash_free(struct hlist_head *table, size_t size)
 
1026{
1027	if (size <= PAGE_SIZE)
1028		kfree(table);
1029	else
1030		free_pages((unsigned long)table, get_order(size));
 
1031}
1032
1033static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
1034{
1035	unsigned int omask, mask, shift;
1036	size_t osize, size;
1037	struct hlist_head *otable, *table;
1038	int i;
1039
1040	omask = mask = hash->mask;
1041	osize = size = (mask + 1) * sizeof(*table);
1042	shift = hash->shift;
1043
1044	if (grow) {
1045		if (++shift > hash->max_shift)
1046			return 0;
1047		mask = mask * 2 + 1;
1048		size *= 2;
1049	}
1050
1051	table = nl_portid_hash_zalloc(size);
1052	if (!table)
1053		return 0;
1054
1055	otable = hash->table;
1056	hash->table = table;
1057	hash->mask = mask;
1058	hash->shift = shift;
1059	get_random_bytes(&hash->rnd, sizeof(hash->rnd));
1060
1061	for (i = 0; i <= omask; i++) {
1062		struct sock *sk;
1063		struct hlist_node *tmp;
1064
1065		sk_for_each_safe(sk, tmp, &otable[i])
1066			__sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
1067	}
1068
1069	nl_portid_hash_free(otable, osize);
1070	hash->rehash_time = jiffies + 10 * 60 * HZ;
1071	return 1;
1072}
1073
1074static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
1075{
1076	int avg = hash->entries >> hash->shift;
 
1077
1078	if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
1079		return 1;
 
 
 
1080
1081	if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
1082		nl_portid_hash_rehash(hash, 0);
1083		return 1;
1084	}
1085
1086	return 0;
1087}
1088
1089static const struct proto_ops netlink_ops;
1090
1091static void
1092netlink_update_listeners(struct sock *sk)
1093{
1094	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1095	unsigned long mask;
1096	unsigned int i;
1097	struct listeners *listeners;
1098
1099	listeners = nl_deref_protected(tbl->listeners);
1100	if (!listeners)
1101		return;
1102
1103	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1104		mask = 0;
1105		sk_for_each_bound(sk, &tbl->mc_list) {
1106			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1107				mask |= nlk_sk(sk)->groups[i];
1108		}
1109		listeners->masks[i] = mask;
1110	}
1111	/* this function is only called with the netlink table "grabbed", which
1112	 * makes sure updates are visible before bind or setsockopt return. */
1113}
1114
1115static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1116{
1117	struct netlink_table *table = &nl_table[sk->sk_protocol];
1118	struct nl_portid_hash *hash = &table->hash;
1119	struct hlist_head *head;
1120	int err = -EADDRINUSE;
1121	struct sock *osk;
1122	int len;
1123
1124	netlink_table_grab();
1125	head = nl_portid_hashfn(hash, portid);
1126	len = 0;
1127	sk_for_each(osk, head) {
1128		if (table->compare(net, osk) &&
1129		    (nlk_sk(osk)->portid == portid))
1130			break;
1131		len++;
1132	}
1133	if (osk)
1134		goto err;
1135
1136	err = -EBUSY;
1137	if (nlk_sk(sk)->portid)
1138		goto err;
1139
1140	err = -ENOMEM;
1141	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
 
1142		goto err;
1143
1144	if (len && nl_portid_hash_dilute(hash, len))
1145		head = nl_portid_hashfn(hash, portid);
1146	hash->entries++;
1147	nlk_sk(sk)->portid = portid;
1148	sk_add_node(sk, head);
1149	err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150
1151err:
1152	netlink_table_ungrab();
1153	return err;
1154}
1155
1156static void netlink_remove(struct sock *sk)
1157{
 
 
 
 
 
 
 
 
 
1158	netlink_table_grab();
1159	if (sk_del_node_init(sk))
1160		nl_table[sk->sk_protocol].hash.entries--;
1161	if (nlk_sk(sk)->subscriptions)
1162		__sk_del_bind_node(sk);
 
 
 
 
1163	netlink_table_ungrab();
1164}
1165
1166static struct proto netlink_proto = {
1167	.name	  = "NETLINK",
1168	.owner	  = THIS_MODULE,
1169	.obj_size = sizeof(struct netlink_sock),
1170};
1171
1172static int __netlink_create(struct net *net, struct socket *sock,
1173			    struct mutex *cb_mutex, int protocol)
 
1174{
1175	struct sock *sk;
1176	struct netlink_sock *nlk;
1177
1178	sock->ops = &netlink_ops;
1179
1180	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1181	if (!sk)
1182		return -ENOMEM;
1183
1184	sock_init_data(sock, sk);
1185
1186	nlk = nlk_sk(sk);
1187	if (cb_mutex) {
1188		nlk->cb_mutex = cb_mutex;
1189	} else {
1190		nlk->cb_mutex = &nlk->cb_def_mutex;
1191		mutex_init(nlk->cb_mutex);
1192	}
1193	init_waitqueue_head(&nlk->wait);
1194#ifdef CONFIG_NETLINK_MMAP
1195	mutex_init(&nlk->pg_vec_lock);
1196#endif
1197
1198	sk->sk_destruct = netlink_sock_destruct;
1199	sk->sk_protocol = protocol;
1200	return 0;
1201}
1202
1203static int netlink_create(struct net *net, struct socket *sock, int protocol,
1204			  int kern)
1205{
1206	struct module *module = NULL;
1207	struct mutex *cb_mutex;
1208	struct netlink_sock *nlk;
1209	void (*bind)(int group);
 
1210	int err = 0;
1211
1212	sock->state = SS_UNCONNECTED;
1213
1214	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1215		return -ESOCKTNOSUPPORT;
1216
1217	if (protocol < 0 || protocol >= MAX_LINKS)
1218		return -EPROTONOSUPPORT;
1219
1220	netlink_lock_table();
1221#ifdef CONFIG_MODULES
1222	if (!nl_table[protocol].registered) {
1223		netlink_unlock_table();
1224		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1225		netlink_lock_table();
1226	}
1227#endif
1228	if (nl_table[protocol].registered &&
1229	    try_module_get(nl_table[protocol].module))
1230		module = nl_table[protocol].module;
1231	else
1232		err = -EPROTONOSUPPORT;
1233	cb_mutex = nl_table[protocol].cb_mutex;
1234	bind = nl_table[protocol].bind;
 
1235	netlink_unlock_table();
1236
1237	if (err < 0)
1238		goto out;
1239
1240	err = __netlink_create(net, sock, cb_mutex, protocol);
1241	if (err < 0)
1242		goto out_module;
1243
1244	local_bh_disable();
1245	sock_prot_inuse_add(net, &netlink_proto, 1);
1246	local_bh_enable();
1247
1248	nlk = nlk_sk(sock->sk);
1249	nlk->module = module;
1250	nlk->netlink_bind = bind;
 
1251out:
1252	return err;
1253
1254out_module:
1255	module_put(module);
1256	goto out;
1257}
1258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1259static int netlink_release(struct socket *sock)
1260{
1261	struct sock *sk = sock->sk;
1262	struct netlink_sock *nlk;
1263
1264	if (!sk)
1265		return 0;
1266
1267	netlink_remove(sk);
1268	sock_orphan(sk);
1269	nlk = nlk_sk(sk);
1270
1271	/*
1272	 * OK. Socket is unlinked, any packets that arrive now
1273	 * will be purged.
1274	 */
1275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276	sock->sk = NULL;
1277	wake_up_interruptible_all(&nlk->wait);
1278
1279	skb_queue_purge(&sk->sk_write_queue);
1280
1281	if (nlk->portid) {
1282		struct netlink_notify n = {
1283						.net = sock_net(sk),
1284						.protocol = sk->sk_protocol,
1285						.portid = nlk->portid,
1286					  };
1287		atomic_notifier_call_chain(&netlink_chain,
1288				NETLINK_URELEASE, &n);
1289	}
1290
1291	module_put(nlk->module);
1292
1293	netlink_table_grab();
1294	if (netlink_is_kernel(sk)) {
 
1295		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1296		if (--nl_table[sk->sk_protocol].registered == 0) {
1297			struct listeners *old;
1298
1299			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1300			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1301			kfree_rcu(old, rcu);
1302			nl_table[sk->sk_protocol].module = NULL;
1303			nl_table[sk->sk_protocol].bind = NULL;
 
1304			nl_table[sk->sk_protocol].flags = 0;
1305			nl_table[sk->sk_protocol].registered = 0;
1306		}
1307	} else if (nlk->subscriptions) {
1308		netlink_update_listeners(sk);
1309	}
1310	netlink_table_ungrab();
1311
1312	kfree(nlk->groups);
1313	nlk->groups = NULL;
1314
1315	local_bh_disable();
1316	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1317	local_bh_enable();
1318	sock_put(sk);
1319	return 0;
1320}
1321
1322static int netlink_autobind(struct socket *sock)
1323{
1324	struct sock *sk = sock->sk;
1325	struct net *net = sock_net(sk);
1326	struct netlink_table *table = &nl_table[sk->sk_protocol];
1327	struct nl_portid_hash *hash = &table->hash;
1328	struct hlist_head *head;
1329	struct sock *osk;
1330	s32 portid = task_tgid_vnr(current);
1331	int err;
1332	static s32 rover = -4097;
 
1333
1334retry:
1335	cond_resched();
1336	netlink_table_grab();
1337	head = nl_portid_hashfn(hash, portid);
1338	sk_for_each(osk, head) {
1339		if (!table->compare(net, osk))
1340			continue;
1341		if (nlk_sk(osk)->portid == portid) {
1342			/* Bind collision, search negative portid values. */
1343			portid = rover--;
1344			if (rover > -4097)
1345				rover = -4097;
1346			netlink_table_ungrab();
1347			goto retry;
1348		}
1349	}
1350	netlink_table_ungrab();
1351
1352	err = netlink_insert(sk, net, portid);
1353	if (err == -EADDRINUSE)
1354		goto retry;
1355
1356	/* If 2 threads race to autobind, that is fine.  */
1357	if (err == -EBUSY)
1358		err = 0;
1359
1360	return err;
1361}
1362
1363/**
1364 * __netlink_ns_capable - General netlink message capability test
1365 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1366 * @user_ns: The user namespace of the capability to use
1367 * @cap: The capability to use
1368 *
1369 * Test to see if the opener of the socket we received the message
1370 * from had when the netlink socket was created and the sender of the
1371 * message has has the capability @cap in the user namespace @user_ns.
1372 */
1373bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1374			struct user_namespace *user_ns, int cap)
1375{
1376	return ((nsp->flags & NETLINK_SKB_DST) ||
1377		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1378		ns_capable(user_ns, cap);
1379}
1380EXPORT_SYMBOL(__netlink_ns_capable);
1381
1382/**
1383 * netlink_ns_capable - General netlink message capability test
1384 * @skb: socket buffer holding a netlink command from userspace
1385 * @user_ns: The user namespace of the capability to use
1386 * @cap: The capability to use
1387 *
1388 * Test to see if the opener of the socket we received the message
1389 * from had when the netlink socket was created and the sender of the
1390 * message has has the capability @cap in the user namespace @user_ns.
1391 */
1392bool netlink_ns_capable(const struct sk_buff *skb,
1393			struct user_namespace *user_ns, int cap)
1394{
1395	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1396}
1397EXPORT_SYMBOL(netlink_ns_capable);
1398
1399/**
1400 * netlink_capable - Netlink global message capability test
1401 * @skb: socket buffer holding a netlink command from userspace
1402 * @cap: The capability to use
1403 *
1404 * Test to see if the opener of the socket we received the message
1405 * from had when the netlink socket was created and the sender of the
1406 * message has has the capability @cap in all user namespaces.
1407 */
1408bool netlink_capable(const struct sk_buff *skb, int cap)
1409{
1410	return netlink_ns_capable(skb, &init_user_ns, cap);
1411}
1412EXPORT_SYMBOL(netlink_capable);
1413
1414/**
1415 * netlink_net_capable - Netlink network namespace message capability test
1416 * @skb: socket buffer holding a netlink command from userspace
1417 * @cap: The capability to use
1418 *
1419 * Test to see if the opener of the socket we received the message
1420 * from had when the netlink socket was created and the sender of the
1421 * message has has the capability @cap over the network namespace of
1422 * the socket we received the message from.
1423 */
1424bool netlink_net_capable(const struct sk_buff *skb, int cap)
1425{
1426	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1427}
1428EXPORT_SYMBOL(netlink_net_capable);
1429
1430static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1431{
1432	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1433		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1434}
1435
1436static void
1437netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1438{
1439	struct netlink_sock *nlk = nlk_sk(sk);
1440
1441	if (nlk->subscriptions && !subscriptions)
1442		__sk_del_bind_node(sk);
1443	else if (!nlk->subscriptions && subscriptions)
1444		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1445	nlk->subscriptions = subscriptions;
1446}
1447
1448static int netlink_realloc_groups(struct sock *sk)
1449{
1450	struct netlink_sock *nlk = nlk_sk(sk);
1451	unsigned int groups;
1452	unsigned long *new_groups;
1453	int err = 0;
1454
1455	netlink_table_grab();
1456
1457	groups = nl_table[sk->sk_protocol].groups;
1458	if (!nl_table[sk->sk_protocol].registered) {
1459		err = -ENOENT;
1460		goto out_unlock;
1461	}
1462
1463	if (nlk->ngroups >= groups)
1464		goto out_unlock;
1465
1466	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1467	if (new_groups == NULL) {
1468		err = -ENOMEM;
1469		goto out_unlock;
1470	}
1471	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1472	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1473
1474	nlk->groups = new_groups;
1475	nlk->ngroups = groups;
1476 out_unlock:
1477	netlink_table_ungrab();
1478	return err;
1479}
1480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1481static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1482			int addr_len)
1483{
1484	struct sock *sk = sock->sk;
1485	struct net *net = sock_net(sk);
1486	struct netlink_sock *nlk = nlk_sk(sk);
1487	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1488	int err;
 
 
1489
1490	if (addr_len < sizeof(struct sockaddr_nl))
1491		return -EINVAL;
1492
1493	if (nladdr->nl_family != AF_NETLINK)
1494		return -EINVAL;
1495
1496	/* Only superuser is allowed to listen multicasts */
1497	if (nladdr->nl_groups) {
1498		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1499			return -EPERM;
1500		err = netlink_realloc_groups(sk);
1501		if (err)
1502			return err;
1503	}
1504
1505	if (nlk->portid) {
 
 
 
 
1506		if (nladdr->nl_pid != nlk->portid)
1507			return -EINVAL;
1508	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509		err = nladdr->nl_pid ?
1510			netlink_insert(sk, net, nladdr->nl_pid) :
1511			netlink_autobind(sock);
1512		if (err)
 
1513			return err;
 
1514	}
1515
1516	if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1517		return 0;
1518
1519	netlink_table_grab();
1520	netlink_update_subscriptions(sk, nlk->subscriptions +
1521					 hweight32(nladdr->nl_groups) -
1522					 hweight32(nlk->groups[0]));
1523	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1524	netlink_update_listeners(sk);
1525	netlink_table_ungrab();
1526
1527	if (nlk->netlink_bind && nlk->groups[0]) {
1528		int i;
1529
1530		for (i = 0; i < nlk->ngroups; i++) {
1531			if (test_bit(i, nlk->groups))
1532				nlk->netlink_bind(i);
1533		}
1534	}
1535
1536	return 0;
1537}
1538
1539static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1540			   int alen, int flags)
1541{
1542	int err = 0;
1543	struct sock *sk = sock->sk;
1544	struct netlink_sock *nlk = nlk_sk(sk);
1545	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1546
1547	if (alen < sizeof(addr->sa_family))
1548		return -EINVAL;
1549
1550	if (addr->sa_family == AF_UNSPEC) {
1551		sk->sk_state	= NETLINK_UNCONNECTED;
1552		nlk->dst_portid	= 0;
1553		nlk->dst_group  = 0;
1554		return 0;
1555	}
1556	if (addr->sa_family != AF_NETLINK)
1557		return -EINVAL;
1558
1559	if ((nladdr->nl_groups || nladdr->nl_pid) &&
1560	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1561		return -EPERM;
1562
1563	if (!nlk->portid)
 
 
 
1564		err = netlink_autobind(sock);
1565
1566	if (err == 0) {
1567		sk->sk_state	= NETLINK_CONNECTED;
1568		nlk->dst_portid = nladdr->nl_pid;
1569		nlk->dst_group  = ffs(nladdr->nl_groups);
1570	}
1571
1572	return err;
1573}
1574
1575static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1576			   int *addr_len, int peer)
1577{
1578	struct sock *sk = sock->sk;
1579	struct netlink_sock *nlk = nlk_sk(sk);
1580	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1581
1582	nladdr->nl_family = AF_NETLINK;
1583	nladdr->nl_pad = 0;
1584	*addr_len = sizeof(*nladdr);
1585
1586	if (peer) {
1587		nladdr->nl_pid = nlk->dst_portid;
1588		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1589	} else {
1590		nladdr->nl_pid = nlk->portid;
1591		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1592	}
1593	return 0;
1594}
1595
 
 
 
 
 
 
 
 
1596static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1597{
1598	struct sock *sock;
1599	struct netlink_sock *nlk;
1600
1601	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1602	if (!sock)
1603		return ERR_PTR(-ECONNREFUSED);
1604
1605	/* Don't bother queuing skb if kernel socket has no input function */
1606	nlk = nlk_sk(sock);
1607	if (sock->sk_state == NETLINK_CONNECTED &&
1608	    nlk->dst_portid != nlk_sk(ssk)->portid) {
1609		sock_put(sock);
1610		return ERR_PTR(-ECONNREFUSED);
1611	}
1612	return sock;
1613}
1614
1615struct sock *netlink_getsockbyfilp(struct file *filp)
1616{
1617	struct inode *inode = file_inode(filp);
1618	struct sock *sock;
1619
1620	if (!S_ISSOCK(inode->i_mode))
1621		return ERR_PTR(-ENOTSOCK);
1622
1623	sock = SOCKET_I(inode)->sk;
1624	if (sock->sk_family != AF_NETLINK)
1625		return ERR_PTR(-EINVAL);
1626
1627	sock_hold(sock);
1628	return sock;
1629}
1630
1631static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1632					       int broadcast)
1633{
1634	struct sk_buff *skb;
1635	void *data;
1636
1637	if (size <= NLMSG_GOODSIZE || broadcast)
1638		return alloc_skb(size, GFP_KERNEL);
1639
1640	size = SKB_DATA_ALIGN(size) +
1641	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1642
1643	data = vmalloc(size);
1644	if (data == NULL)
1645		return NULL;
1646
1647	skb = build_skb(data, size);
1648	if (skb == NULL)
1649		vfree(data);
1650	else {
1651		skb->head_frag = 0;
1652		skb->destructor = netlink_skb_destructor;
1653	}
1654
1655	return skb;
1656}
1657
1658/*
1659 * Attach a skb to a netlink socket.
1660 * The caller must hold a reference to the destination socket. On error, the
1661 * reference is dropped. The skb is not send to the destination, just all
1662 * all error checks are performed and memory in the queue is reserved.
1663 * Return values:
1664 * < 0: error. skb freed, reference to sock dropped.
1665 * 0: continue
1666 * 1: repeat lookup - reference dropped while waiting for socket memory.
1667 */
1668int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1669		      long *timeo, struct sock *ssk)
1670{
1671	struct netlink_sock *nlk;
1672
1673	nlk = nlk_sk(sk);
1674
1675	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1676	     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1677	    !netlink_skb_is_mmaped(skb)) {
1678		DECLARE_WAITQUEUE(wait, current);
1679		if (!*timeo) {
1680			if (!ssk || netlink_is_kernel(ssk))
1681				netlink_overrun(sk);
1682			sock_put(sk);
1683			kfree_skb(skb);
1684			return -EAGAIN;
1685		}
1686
1687		__set_current_state(TASK_INTERRUPTIBLE);
1688		add_wait_queue(&nlk->wait, &wait);
1689
1690		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1691		     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1692		    !sock_flag(sk, SOCK_DEAD))
1693			*timeo = schedule_timeout(*timeo);
1694
1695		__set_current_state(TASK_RUNNING);
1696		remove_wait_queue(&nlk->wait, &wait);
1697		sock_put(sk);
1698
1699		if (signal_pending(current)) {
1700			kfree_skb(skb);
1701			return sock_intr_errno(*timeo);
1702		}
1703		return 1;
1704	}
1705	netlink_skb_set_owner_r(skb, sk);
1706	return 0;
1707}
1708
1709static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1710{
1711	int len = skb->len;
1712
1713	netlink_deliver_tap(skb);
1714
1715#ifdef CONFIG_NETLINK_MMAP
1716	if (netlink_skb_is_mmaped(skb))
1717		netlink_queue_mmaped_skb(sk, skb);
1718	else if (netlink_rx_is_mmaped(sk))
1719		netlink_ring_set_copied(sk, skb);
1720	else
1721#endif /* CONFIG_NETLINK_MMAP */
1722		skb_queue_tail(&sk->sk_receive_queue, skb);
1723	sk->sk_data_ready(sk);
1724	return len;
1725}
1726
1727int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1728{
1729	int len = __netlink_sendskb(sk, skb);
1730
1731	sock_put(sk);
1732	return len;
1733}
1734
1735void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1736{
1737	kfree_skb(skb);
1738	sock_put(sk);
1739}
1740
1741static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1742{
1743	int delta;
1744
1745	WARN_ON(skb->sk != NULL);
1746	if (netlink_skb_is_mmaped(skb))
1747		return skb;
1748
1749	delta = skb->end - skb->tail;
1750	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1751		return skb;
1752
1753	if (skb_shared(skb)) {
1754		struct sk_buff *nskb = skb_clone(skb, allocation);
1755		if (!nskb)
1756			return skb;
1757		consume_skb(skb);
1758		skb = nskb;
1759	}
1760
1761	if (!pskb_expand_head(skb, 0, -delta, allocation))
1762		skb->truesize -= delta;
1763
1764	return skb;
1765}
1766
1767static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1768				  struct sock *ssk)
1769{
1770	int ret;
1771	struct netlink_sock *nlk = nlk_sk(sk);
1772
1773	ret = -ECONNREFUSED;
1774	if (nlk->netlink_rcv != NULL) {
1775		ret = skb->len;
1776		netlink_skb_set_owner_r(skb, sk);
1777		NETLINK_CB(skb).sk = ssk;
1778		netlink_deliver_tap_kernel(sk, ssk, skb);
1779		nlk->netlink_rcv(skb);
1780		consume_skb(skb);
1781	} else {
1782		kfree_skb(skb);
1783	}
1784	sock_put(sk);
1785	return ret;
1786}
1787
1788int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1789		    u32 portid, int nonblock)
1790{
1791	struct sock *sk;
1792	int err;
1793	long timeo;
1794
1795	skb = netlink_trim(skb, gfp_any());
1796
1797	timeo = sock_sndtimeo(ssk, nonblock);
1798retry:
1799	sk = netlink_getsockbyportid(ssk, portid);
1800	if (IS_ERR(sk)) {
1801		kfree_skb(skb);
1802		return PTR_ERR(sk);
1803	}
1804	if (netlink_is_kernel(sk))
1805		return netlink_unicast_kernel(sk, skb, ssk);
1806
1807	if (sk_filter(sk, skb)) {
1808		err = skb->len;
1809		kfree_skb(skb);
1810		sock_put(sk);
1811		return err;
1812	}
1813
1814	err = netlink_attachskb(sk, skb, &timeo, ssk);
1815	if (err == 1)
1816		goto retry;
1817	if (err)
1818		return err;
1819
1820	return netlink_sendskb(sk, skb);
1821}
1822EXPORT_SYMBOL(netlink_unicast);
1823
1824struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1825				  u32 dst_portid, gfp_t gfp_mask)
1826{
1827#ifdef CONFIG_NETLINK_MMAP
1828	struct sock *sk = NULL;
1829	struct sk_buff *skb;
1830	struct netlink_ring *ring;
1831	struct nl_mmap_hdr *hdr;
1832	unsigned int maxlen;
1833
1834	sk = netlink_getsockbyportid(ssk, dst_portid);
1835	if (IS_ERR(sk))
1836		goto out;
1837
1838	ring = &nlk_sk(sk)->rx_ring;
1839	/* fast-path without atomic ops for common case: non-mmaped receiver */
1840	if (ring->pg_vec == NULL)
1841		goto out_put;
1842
1843	if (ring->frame_size - NL_MMAP_HDRLEN < size)
1844		goto out_put;
1845
1846	skb = alloc_skb_head(gfp_mask);
1847	if (skb == NULL)
1848		goto err1;
1849
1850	spin_lock_bh(&sk->sk_receive_queue.lock);
1851	/* check again under lock */
1852	if (ring->pg_vec == NULL)
1853		goto out_free;
1854
1855	/* check again under lock */
1856	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1857	if (maxlen < size)
1858		goto out_free;
1859
1860	netlink_forward_ring(ring);
1861	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1862	if (hdr == NULL)
1863		goto err2;
1864	netlink_ring_setup_skb(skb, sk, ring, hdr);
1865	netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1866	atomic_inc(&ring->pending);
1867	netlink_increment_head(ring);
1868
1869	spin_unlock_bh(&sk->sk_receive_queue.lock);
1870	return skb;
1871
1872err2:
1873	kfree_skb(skb);
1874	spin_unlock_bh(&sk->sk_receive_queue.lock);
1875	netlink_overrun(sk);
1876err1:
1877	sock_put(sk);
1878	return NULL;
1879
1880out_free:
1881	kfree_skb(skb);
1882	spin_unlock_bh(&sk->sk_receive_queue.lock);
1883out_put:
1884	sock_put(sk);
1885out:
1886#endif
1887	return alloc_skb(size, gfp_mask);
1888}
1889EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1890
1891int netlink_has_listeners(struct sock *sk, unsigned int group)
1892{
1893	int res = 0;
1894	struct listeners *listeners;
1895
1896	BUG_ON(!netlink_is_kernel(sk));
1897
1898	rcu_read_lock();
1899	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1900
1901	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1902		res = test_bit(group - 1, listeners->masks);
1903
1904	rcu_read_unlock();
1905
1906	return res;
1907}
1908EXPORT_SYMBOL_GPL(netlink_has_listeners);
1909
1910static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1911{
1912	struct netlink_sock *nlk = nlk_sk(sk);
1913
1914	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1915	    !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1916		netlink_skb_set_owner_r(skb, sk);
1917		__netlink_sendskb(sk, skb);
1918		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1919	}
1920	return -1;
1921}
1922
1923struct netlink_broadcast_data {
1924	struct sock *exclude_sk;
1925	struct net *net;
1926	u32 portid;
1927	u32 group;
1928	int failure;
1929	int delivery_failure;
1930	int congested;
1931	int delivered;
1932	gfp_t allocation;
1933	struct sk_buff *skb, *skb2;
1934	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1935	void *tx_data;
1936};
1937
1938static int do_one_broadcast(struct sock *sk,
1939				   struct netlink_broadcast_data *p)
1940{
1941	struct netlink_sock *nlk = nlk_sk(sk);
1942	int val;
1943
1944	if (p->exclude_sk == sk)
1945		goto out;
1946
1947	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1948	    !test_bit(p->group - 1, nlk->groups))
1949		goto out;
1950
1951	if (!net_eq(sock_net(sk), p->net))
1952		goto out;
 
 
 
 
 
 
 
 
 
1953
1954	if (p->failure) {
1955		netlink_overrun(sk);
1956		goto out;
1957	}
1958
1959	sock_hold(sk);
1960	if (p->skb2 == NULL) {
1961		if (skb_shared(p->skb)) {
1962			p->skb2 = skb_clone(p->skb, p->allocation);
1963		} else {
1964			p->skb2 = skb_get(p->skb);
1965			/*
1966			 * skb ownership may have been set when
1967			 * delivered to a previous socket.
1968			 */
1969			skb_orphan(p->skb2);
1970		}
1971	}
1972	if (p->skb2 == NULL) {
1973		netlink_overrun(sk);
1974		/* Clone failed. Notify ALL listeners. */
1975		p->failure = 1;
1976		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1977			p->delivery_failure = 1;
1978	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
 
 
1979		kfree_skb(p->skb2);
1980		p->skb2 = NULL;
1981	} else if (sk_filter(sk, p->skb2)) {
 
 
1982		kfree_skb(p->skb2);
1983		p->skb2 = NULL;
1984	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
 
 
 
 
 
1985		netlink_overrun(sk);
1986		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1987			p->delivery_failure = 1;
1988	} else {
1989		p->congested |= val;
1990		p->delivered = 1;
1991		p->skb2 = NULL;
1992	}
 
1993	sock_put(sk);
1994
1995out:
1996	return 0;
1997}
1998
1999int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
2000	u32 group, gfp_t allocation,
2001	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2002	void *filter_data)
2003{
2004	struct net *net = sock_net(ssk);
2005	struct netlink_broadcast_data info;
2006	struct sock *sk;
2007
2008	skb = netlink_trim(skb, allocation);
2009
2010	info.exclude_sk = ssk;
2011	info.net = net;
2012	info.portid = portid;
2013	info.group = group;
2014	info.failure = 0;
2015	info.delivery_failure = 0;
2016	info.congested = 0;
2017	info.delivered = 0;
2018	info.allocation = allocation;
2019	info.skb = skb;
2020	info.skb2 = NULL;
2021	info.tx_filter = filter;
2022	info.tx_data = filter_data;
2023
2024	/* While we sleep in clone, do not allow to change socket list */
2025
2026	netlink_lock_table();
2027
2028	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2029		do_one_broadcast(sk, &info);
2030
2031	consume_skb(skb);
2032
2033	netlink_unlock_table();
2034
2035	if (info.delivery_failure) {
2036		kfree_skb(info.skb2);
2037		return -ENOBUFS;
2038	}
2039	consume_skb(info.skb2);
2040
2041	if (info.delivered) {
2042		if (info.congested && (allocation & __GFP_WAIT))
2043			yield();
2044		return 0;
2045	}
2046	return -ESRCH;
2047}
2048EXPORT_SYMBOL(netlink_broadcast_filtered);
2049
2050int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
2051		      u32 group, gfp_t allocation)
2052{
2053	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
2054		NULL, NULL);
2055}
2056EXPORT_SYMBOL(netlink_broadcast);
2057
2058struct netlink_set_err_data {
2059	struct sock *exclude_sk;
2060	u32 portid;
2061	u32 group;
2062	int code;
2063};
2064
2065static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
2066{
2067	struct netlink_sock *nlk = nlk_sk(sk);
2068	int ret = 0;
2069
2070	if (sk == p->exclude_sk)
2071		goto out;
2072
2073	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2074		goto out;
2075
2076	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2077	    !test_bit(p->group - 1, nlk->groups))
2078		goto out;
2079
2080	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2081		ret = 1;
2082		goto out;
2083	}
2084
2085	sk->sk_err = p->code;
2086	sk->sk_error_report(sk);
2087out:
2088	return ret;
2089}
2090
2091/**
2092 * netlink_set_err - report error to broadcast listeners
2093 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2094 * @portid: the PORTID of a process that we want to skip (if any)
2095 * @group: the broadcast group that will notice the error
2096 * @code: error code, must be negative (as usual in kernelspace)
2097 *
2098 * This function returns the number of broadcast listeners that have set the
2099 * NETLINK_RECV_NO_ENOBUFS socket option.
2100 */
2101int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2102{
2103	struct netlink_set_err_data info;
2104	struct sock *sk;
2105	int ret = 0;
2106
2107	info.exclude_sk = ssk;
2108	info.portid = portid;
2109	info.group = group;
2110	/* sk->sk_err wants a positive error value */
2111	info.code = -code;
2112
2113	read_lock(&nl_table_lock);
2114
2115	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2116		ret += do_one_set_err(sk, &info);
2117
2118	read_unlock(&nl_table_lock);
2119	return ret;
2120}
2121EXPORT_SYMBOL(netlink_set_err);
2122
2123/* must be called with netlink table grabbed */
2124static void netlink_update_socket_mc(struct netlink_sock *nlk,
2125				     unsigned int group,
2126				     int is_new)
2127{
2128	int old, new = !!is_new, subscriptions;
2129
2130	old = test_bit(group - 1, nlk->groups);
2131	subscriptions = nlk->subscriptions - old + new;
2132	if (new)
2133		__set_bit(group - 1, nlk->groups);
2134	else
2135		__clear_bit(group - 1, nlk->groups);
2136	netlink_update_subscriptions(&nlk->sk, subscriptions);
2137	netlink_update_listeners(&nlk->sk);
2138}
2139
2140static int netlink_setsockopt(struct socket *sock, int level, int optname,
2141			      char __user *optval, unsigned int optlen)
2142{
2143	struct sock *sk = sock->sk;
2144	struct netlink_sock *nlk = nlk_sk(sk);
2145	unsigned int val = 0;
2146	int err;
2147
2148	if (level != SOL_NETLINK)
2149		return -ENOPROTOOPT;
2150
2151	if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2152	    optlen >= sizeof(int) &&
2153	    get_user(val, (unsigned int __user *)optval))
2154		return -EFAULT;
2155
2156	switch (optname) {
2157	case NETLINK_PKTINFO:
2158		if (val)
2159			nlk->flags |= NETLINK_RECV_PKTINFO;
2160		else
2161			nlk->flags &= ~NETLINK_RECV_PKTINFO;
2162		err = 0;
2163		break;
2164	case NETLINK_ADD_MEMBERSHIP:
2165	case NETLINK_DROP_MEMBERSHIP: {
2166		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2167			return -EPERM;
2168		err = netlink_realloc_groups(sk);
2169		if (err)
2170			return err;
2171		if (!val || val - 1 >= nlk->ngroups)
2172			return -EINVAL;
 
 
 
 
 
2173		netlink_table_grab();
2174		netlink_update_socket_mc(nlk, val,
2175					 optname == NETLINK_ADD_MEMBERSHIP);
2176		netlink_table_ungrab();
2177
2178		if (nlk->netlink_bind)
2179			nlk->netlink_bind(val);
2180
2181		err = 0;
2182		break;
2183	}
2184	case NETLINK_BROADCAST_ERROR:
2185		if (val)
2186			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2187		else
2188			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2189		err = 0;
2190		break;
2191	case NETLINK_NO_ENOBUFS:
2192		if (val) {
2193			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2194			clear_bit(NETLINK_CONGESTED, &nlk->state);
2195			wake_up_interruptible(&nlk->wait);
2196		} else {
2197			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2198		}
2199		err = 0;
2200		break;
2201#ifdef CONFIG_NETLINK_MMAP
2202	case NETLINK_RX_RING:
2203	case NETLINK_TX_RING: {
2204		struct nl_mmap_req req;
2205
2206		/* Rings might consume more memory than queue limits, require
2207		 * CAP_NET_ADMIN.
2208		 */
2209		if (!capable(CAP_NET_ADMIN))
2210			return -EPERM;
2211		if (optlen < sizeof(req))
2212			return -EINVAL;
2213		if (copy_from_user(&req, optval, sizeof(req)))
2214			return -EFAULT;
2215		err = netlink_set_ring(sk, &req, false,
2216				       optname == NETLINK_TX_RING);
 
2217		break;
2218	}
2219#endif /* CONFIG_NETLINK_MMAP */
2220	default:
2221		err = -ENOPROTOOPT;
2222	}
2223	return err;
2224}
2225
2226static int netlink_getsockopt(struct socket *sock, int level, int optname,
2227			      char __user *optval, int __user *optlen)
2228{
2229	struct sock *sk = sock->sk;
2230	struct netlink_sock *nlk = nlk_sk(sk);
2231	int len, val, err;
2232
2233	if (level != SOL_NETLINK)
2234		return -ENOPROTOOPT;
2235
2236	if (get_user(len, optlen))
2237		return -EFAULT;
2238	if (len < 0)
2239		return -EINVAL;
2240
2241	switch (optname) {
2242	case NETLINK_PKTINFO:
2243		if (len < sizeof(int))
2244			return -EINVAL;
2245		len = sizeof(int);
2246		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2247		if (put_user(len, optlen) ||
2248		    put_user(val, optval))
2249			return -EFAULT;
2250		err = 0;
2251		break;
2252	case NETLINK_BROADCAST_ERROR:
2253		if (len < sizeof(int))
2254			return -EINVAL;
2255		len = sizeof(int);
2256		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2257		if (put_user(len, optlen) ||
2258		    put_user(val, optval))
2259			return -EFAULT;
2260		err = 0;
2261		break;
2262	case NETLINK_NO_ENOBUFS:
2263		if (len < sizeof(int))
2264			return -EINVAL;
2265		len = sizeof(int);
2266		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2267		if (put_user(len, optlen) ||
2268		    put_user(val, optval))
2269			return -EFAULT;
2270		err = 0;
2271		break;
2272	default:
2273		err = -ENOPROTOOPT;
2274	}
2275	return err;
2276}
2277
2278static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2279{
2280	struct nl_pktinfo info;
2281
2282	info.group = NETLINK_CB(skb).dst_group;
2283	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2284}
2285
2286static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2287			   struct msghdr *msg, size_t len)
 
 
 
 
 
 
 
 
 
2288{
2289	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2290	struct sock *sk = sock->sk;
2291	struct netlink_sock *nlk = nlk_sk(sk);
2292	DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2293	u32 dst_portid;
2294	u32 dst_group;
2295	struct sk_buff *skb;
2296	int err;
2297	struct scm_cookie scm;
2298	u32 netlink_skb_flags = 0;
2299
2300	if (msg->msg_flags&MSG_OOB)
2301		return -EOPNOTSUPP;
2302
2303	if (NULL == siocb->scm)
2304		siocb->scm = &scm;
2305
2306	err = scm_send(sock, msg, siocb->scm, true);
2307	if (err < 0)
2308		return err;
2309
2310	if (msg->msg_namelen) {
2311		err = -EINVAL;
2312		if (addr->nl_family != AF_NETLINK)
2313			goto out;
2314		dst_portid = addr->nl_pid;
2315		dst_group = ffs(addr->nl_groups);
2316		err =  -EPERM;
2317		if ((dst_group || dst_portid) &&
2318		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2319			goto out;
2320		netlink_skb_flags |= NETLINK_SKB_DST;
2321	} else {
2322		dst_portid = nlk->dst_portid;
2323		dst_group = nlk->dst_group;
2324	}
2325
2326	if (!nlk->portid) {
2327		err = netlink_autobind(sock);
2328		if (err)
2329			goto out;
2330	}
2331
2332	if (netlink_tx_is_mmaped(sk) &&
2333	    msg->msg_iov->iov_base == NULL) {
2334		err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2335					   siocb);
2336		goto out;
2337	}
2338
2339	err = -EMSGSIZE;
2340	if (len > sk->sk_sndbuf - 32)
2341		goto out;
2342	err = -ENOBUFS;
2343	skb = netlink_alloc_large_skb(len, dst_group);
2344	if (skb == NULL)
2345		goto out;
2346
2347	NETLINK_CB(skb).portid	= nlk->portid;
2348	NETLINK_CB(skb).dst_group = dst_group;
2349	NETLINK_CB(skb).creds	= siocb->scm->creds;
2350	NETLINK_CB(skb).flags	= netlink_skb_flags;
2351
2352	err = -EFAULT;
2353	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2354		kfree_skb(skb);
2355		goto out;
2356	}
2357
2358	err = security_netlink_send(sk, skb);
2359	if (err) {
2360		kfree_skb(skb);
2361		goto out;
2362	}
2363
2364	if (dst_group) {
2365		atomic_inc(&skb->users);
2366		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2367	}
2368	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2369
2370out:
2371	scm_destroy(siocb->scm);
2372	return err;
2373}
2374
2375static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2376			   struct msghdr *msg, size_t len,
2377			   int flags)
2378{
2379	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2380	struct scm_cookie scm;
2381	struct sock *sk = sock->sk;
2382	struct netlink_sock *nlk = nlk_sk(sk);
2383	int noblock = flags&MSG_DONTWAIT;
2384	size_t copied;
2385	struct sk_buff *skb, *data_skb;
2386	int err, ret;
2387
2388	if (flags&MSG_OOB)
2389		return -EOPNOTSUPP;
2390
2391	copied = 0;
2392
2393	skb = skb_recv_datagram(sk, flags, noblock, &err);
2394	if (skb == NULL)
2395		goto out;
2396
2397	data_skb = skb;
2398
2399#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2400	if (unlikely(skb_shinfo(skb)->frag_list)) {
2401		/*
2402		 * If this skb has a frag_list, then here that means that we
2403		 * will have to use the frag_list skb's data for compat tasks
2404		 * and the regular skb's data for normal (non-compat) tasks.
2405		 *
2406		 * If we need to send the compat skb, assign it to the
2407		 * 'data_skb' variable so that it will be used below for data
2408		 * copying. We keep 'skb' for everything else, including
2409		 * freeing both later.
2410		 */
2411		if (flags & MSG_CMSG_COMPAT)
2412			data_skb = skb_shinfo(skb)->frag_list;
2413	}
2414#endif
2415
2416	/* Record the max length of recvmsg() calls for future allocations */
2417	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2418	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2419				     16384);
2420
2421	copied = data_skb->len;
2422	if (len < copied) {
2423		msg->msg_flags |= MSG_TRUNC;
2424		copied = len;
2425	}
2426
2427	skb_reset_transport_header(data_skb);
2428	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2429
2430	if (msg->msg_name) {
2431		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2432		addr->nl_family = AF_NETLINK;
2433		addr->nl_pad    = 0;
2434		addr->nl_pid	= NETLINK_CB(skb).portid;
2435		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
2436		msg->msg_namelen = sizeof(*addr);
2437	}
2438
2439	if (nlk->flags & NETLINK_RECV_PKTINFO)
2440		netlink_cmsg_recv_pktinfo(msg, skb);
 
 
2441
2442	if (NULL == siocb->scm) {
2443		memset(&scm, 0, sizeof(scm));
2444		siocb->scm = &scm;
2445	}
2446	siocb->scm->creds = *NETLINK_CREDS(skb);
2447	if (flags & MSG_TRUNC)
2448		copied = data_skb->len;
2449
2450	skb_free_datagram(sk, skb);
2451
2452	if (nlk->cb_running &&
2453	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2454		ret = netlink_dump(sk);
2455		if (ret) {
2456			sk->sk_err = ret;
2457			sk->sk_error_report(sk);
2458		}
2459	}
2460
2461	scm_recv(sock, msg, siocb->scm, flags);
2462out:
2463	netlink_rcv_wake(sk);
2464	return err ? : copied;
2465}
2466
2467static void netlink_data_ready(struct sock *sk)
2468{
2469	BUG();
2470}
2471
2472/*
2473 *	We export these functions to other modules. They provide a
2474 *	complete set of kernel non-blocking support for message
2475 *	queueing.
2476 */
2477
2478struct sock *
2479__netlink_kernel_create(struct net *net, int unit, struct module *module,
2480			struct netlink_kernel_cfg *cfg)
2481{
2482	struct socket *sock;
2483	struct sock *sk;
2484	struct netlink_sock *nlk;
2485	struct listeners *listeners = NULL;
2486	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2487	unsigned int groups;
2488
2489	BUG_ON(!nl_table);
2490
2491	if (unit < 0 || unit >= MAX_LINKS)
2492		return NULL;
2493
2494	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2495		return NULL;
2496
2497	/*
2498	 * We have to just have a reference on the net from sk, but don't
2499	 * get_net it. Besides, we cannot get and then put the net here.
2500	 * So we create one inside init_net and the move it to net.
2501	 */
2502
2503	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2504		goto out_sock_release_nosk;
2505
2506	sk = sock->sk;
2507	sk_change_net(sk, net);
2508
2509	if (!cfg || cfg->groups < 32)
2510		groups = 32;
2511	else
2512		groups = cfg->groups;
2513
2514	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2515	if (!listeners)
2516		goto out_sock_release;
2517
2518	sk->sk_data_ready = netlink_data_ready;
2519	if (cfg && cfg->input)
2520		nlk_sk(sk)->netlink_rcv = cfg->input;
2521
2522	if (netlink_insert(sk, net, 0))
2523		goto out_sock_release;
2524
2525	nlk = nlk_sk(sk);
2526	nlk->flags |= NETLINK_KERNEL_SOCKET;
2527
2528	netlink_table_grab();
2529	if (!nl_table[unit].registered) {
2530		nl_table[unit].groups = groups;
2531		rcu_assign_pointer(nl_table[unit].listeners, listeners);
2532		nl_table[unit].cb_mutex = cb_mutex;
2533		nl_table[unit].module = module;
2534		if (cfg) {
2535			nl_table[unit].bind = cfg->bind;
 
2536			nl_table[unit].flags = cfg->flags;
2537			if (cfg->compare)
2538				nl_table[unit].compare = cfg->compare;
2539		}
2540		nl_table[unit].registered = 1;
2541	} else {
2542		kfree(listeners);
2543		nl_table[unit].registered++;
2544	}
2545	netlink_table_ungrab();
2546	return sk;
2547
2548out_sock_release:
2549	kfree(listeners);
2550	netlink_kernel_release(sk);
2551	return NULL;
2552
2553out_sock_release_nosk:
2554	sock_release(sock);
2555	return NULL;
2556}
2557EXPORT_SYMBOL(__netlink_kernel_create);
2558
2559void
2560netlink_kernel_release(struct sock *sk)
2561{
2562	sk_release_kernel(sk);
 
 
 
2563}
2564EXPORT_SYMBOL(netlink_kernel_release);
2565
2566int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2567{
2568	struct listeners *new, *old;
2569	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2570
2571	if (groups < 32)
2572		groups = 32;
2573
2574	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2575		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2576		if (!new)
2577			return -ENOMEM;
2578		old = nl_deref_protected(tbl->listeners);
2579		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2580		rcu_assign_pointer(tbl->listeners, new);
2581
2582		kfree_rcu(old, rcu);
2583	}
2584	tbl->groups = groups;
2585
2586	return 0;
2587}
2588
2589/**
2590 * netlink_change_ngroups - change number of multicast groups
2591 *
2592 * This changes the number of multicast groups that are available
2593 * on a certain netlink family. Note that it is not possible to
2594 * change the number of groups to below 32. Also note that it does
2595 * not implicitly call netlink_clear_multicast_users() when the
2596 * number of groups is reduced.
2597 *
2598 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2599 * @groups: The new number of groups.
2600 */
2601int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2602{
2603	int err;
2604
2605	netlink_table_grab();
2606	err = __netlink_change_ngroups(sk, groups);
2607	netlink_table_ungrab();
2608
2609	return err;
2610}
2611
2612void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2613{
2614	struct sock *sk;
2615	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2616
2617	sk_for_each_bound(sk, &tbl->mc_list)
2618		netlink_update_socket_mc(nlk_sk(sk), group, 0);
2619}
2620
2621struct nlmsghdr *
2622__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2623{
2624	struct nlmsghdr *nlh;
2625	int size = nlmsg_msg_size(len);
2626
2627	nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2628	nlh->nlmsg_type = type;
2629	nlh->nlmsg_len = size;
2630	nlh->nlmsg_flags = flags;
2631	nlh->nlmsg_pid = portid;
2632	nlh->nlmsg_seq = seq;
2633	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2634		memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2635	return nlh;
2636}
2637EXPORT_SYMBOL(__nlmsg_put);
2638
2639/*
2640 * It looks a bit ugly.
2641 * It would be better to create kernel thread.
2642 */
2643
2644static int netlink_dump(struct sock *sk)
2645{
2646	struct netlink_sock *nlk = nlk_sk(sk);
2647	struct netlink_callback *cb;
2648	struct sk_buff *skb = NULL;
2649	struct nlmsghdr *nlh;
 
2650	int len, err = -ENOBUFS;
 
2651	int alloc_size;
2652
2653	mutex_lock(nlk->cb_mutex);
2654	if (!nlk->cb_running) {
2655		err = -EINVAL;
2656		goto errout_skb;
2657	}
2658
2659	cb = &nlk->cb;
2660	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2661
2662	if (!netlink_rx_is_mmaped(sk) &&
2663	    atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2664		goto errout_skb;
2665
2666	/* NLMSG_GOODSIZE is small to avoid high order allocations being
2667	 * required, but it makes sense to _attempt_ a 16K bytes allocation
2668	 * to reduce number of system calls on dump operations, if user
2669	 * ever provided a big enough buffer.
2670	 */
2671	if (alloc_size < nlk->max_recvmsg_len) {
2672		skb = netlink_alloc_skb(sk,
2673					nlk->max_recvmsg_len,
2674					nlk->portid,
2675					GFP_KERNEL |
2676					__GFP_NOWARN |
2677					__GFP_NORETRY);
2678		/* available room should be exact amount to avoid MSG_TRUNC */
2679		if (skb)
2680			skb_reserve(skb, skb_tailroom(skb) -
2681					 nlk->max_recvmsg_len);
 
2682	}
2683	if (!skb)
2684		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2685					GFP_KERNEL);
2686	if (!skb)
2687		goto errout_skb;
 
 
 
 
 
 
 
 
 
 
 
 
2688	netlink_skb_set_owner_r(skb, sk);
2689
2690	len = cb->dump(skb, cb);
2691
2692	if (len > 0) {
2693		mutex_unlock(nlk->cb_mutex);
2694
2695		if (sk_filter(sk, skb))
2696			kfree_skb(skb);
2697		else
2698			__netlink_sendskb(sk, skb);
2699		return 0;
2700	}
2701
2702	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2703	if (!nlh)
2704		goto errout_skb;
2705
2706	nl_dump_check_consistent(cb, nlh);
2707
2708	memcpy(nlmsg_data(nlh), &len, sizeof(len));
2709
2710	if (sk_filter(sk, skb))
2711		kfree_skb(skb);
2712	else
2713		__netlink_sendskb(sk, skb);
2714
2715	if (cb->done)
2716		cb->done(cb);
2717
2718	nlk->cb_running = false;
 
 
2719	mutex_unlock(nlk->cb_mutex);
2720	module_put(cb->module);
2721	consume_skb(cb->skb);
2722	return 0;
2723
2724errout_skb:
2725	mutex_unlock(nlk->cb_mutex);
2726	kfree_skb(skb);
2727	return err;
2728}
2729
2730int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2731			 const struct nlmsghdr *nlh,
2732			 struct netlink_dump_control *control)
2733{
2734	struct netlink_callback *cb;
2735	struct sock *sk;
2736	struct netlink_sock *nlk;
2737	int ret;
2738
2739	/* Memory mapped dump requests need to be copied to avoid looping
2740	 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2741	 * a reference to the skb.
2742	 */
2743	if (netlink_skb_is_mmaped(skb)) {
2744		skb = skb_copy(skb, GFP_KERNEL);
2745		if (skb == NULL)
2746			return -ENOBUFS;
2747	} else
2748		atomic_inc(&skb->users);
2749
2750	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2751	if (sk == NULL) {
2752		ret = -ECONNREFUSED;
2753		goto error_free;
2754	}
2755
2756	nlk = nlk_sk(sk);
2757	mutex_lock(nlk->cb_mutex);
2758	/* A dump is in progress... */
2759	if (nlk->cb_running) {
2760		ret = -EBUSY;
2761		goto error_unlock;
2762	}
2763	/* add reference of module which cb->dump belongs to */
2764	if (!try_module_get(control->module)) {
2765		ret = -EPROTONOSUPPORT;
2766		goto error_unlock;
2767	}
2768
2769	cb = &nlk->cb;
2770	memset(cb, 0, sizeof(*cb));
 
2771	cb->dump = control->dump;
2772	cb->done = control->done;
2773	cb->nlh = nlh;
2774	cb->data = control->data;
2775	cb->module = control->module;
2776	cb->min_dump_alloc = control->min_dump_alloc;
2777	cb->skb = skb;
2778
2779	nlk->cb_running = true;
2780
2781	mutex_unlock(nlk->cb_mutex);
2782
 
 
 
2783	ret = netlink_dump(sk);
2784	sock_put(sk);
2785
2786	if (ret)
2787		return ret;
2788
2789	/* We successfully started a dump, by returning -EINTR we
2790	 * signal not to send ACK even if it was requested.
2791	 */
2792	return -EINTR;
2793
2794error_unlock:
2795	sock_put(sk);
2796	mutex_unlock(nlk->cb_mutex);
2797error_free:
2798	kfree_skb(skb);
2799	return ret;
2800}
2801EXPORT_SYMBOL(__netlink_dump_start);
2802
2803void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2804{
2805	struct sk_buff *skb;
2806	struct nlmsghdr *rep;
2807	struct nlmsgerr *errmsg;
2808	size_t payload = sizeof(*errmsg);
 
2809
2810	/* error messages get the original request appened */
2811	if (err)
 
 
2812		payload += nlmsg_len(nlh);
2813
2814	skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2815				NETLINK_CB(in_skb).portid, GFP_KERNEL);
2816	if (!skb) {
2817		struct sock *sk;
2818
2819		sk = netlink_lookup(sock_net(in_skb->sk),
2820				    in_skb->sk->sk_protocol,
2821				    NETLINK_CB(in_skb).portid);
2822		if (sk) {
2823			sk->sk_err = ENOBUFS;
2824			sk->sk_error_report(sk);
2825			sock_put(sk);
2826		}
2827		return;
2828	}
2829
2830	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2831			  NLMSG_ERROR, payload, 0);
2832	errmsg = nlmsg_data(rep);
2833	errmsg->error = err;
2834	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2835	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2836}
2837EXPORT_SYMBOL(netlink_ack);
2838
2839int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2840						     struct nlmsghdr *))
2841{
2842	struct nlmsghdr *nlh;
2843	int err;
2844
2845	while (skb->len >= nlmsg_total_size(0)) {
2846		int msglen;
2847
2848		nlh = nlmsg_hdr(skb);
2849		err = 0;
2850
2851		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2852			return 0;
2853
2854		/* Only requests are handled by the kernel */
2855		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2856			goto ack;
2857
2858		/* Skip control messages */
2859		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2860			goto ack;
2861
2862		err = cb(skb, nlh);
2863		if (err == -EINTR)
2864			goto skip;
2865
2866ack:
2867		if (nlh->nlmsg_flags & NLM_F_ACK || err)
2868			netlink_ack(skb, nlh, err);
2869
2870skip:
2871		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2872		if (msglen > skb->len)
2873			msglen = skb->len;
2874		skb_pull(skb, msglen);
2875	}
2876
2877	return 0;
2878}
2879EXPORT_SYMBOL(netlink_rcv_skb);
2880
2881/**
2882 * nlmsg_notify - send a notification netlink message
2883 * @sk: netlink socket to use
2884 * @skb: notification message
2885 * @portid: destination netlink portid for reports or 0
2886 * @group: destination multicast group or 0
2887 * @report: 1 to report back, 0 to disable
2888 * @flags: allocation flags
2889 */
2890int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2891		 unsigned int group, int report, gfp_t flags)
2892{
2893	int err = 0;
2894
2895	if (group) {
2896		int exclude_portid = 0;
2897
2898		if (report) {
2899			atomic_inc(&skb->users);
2900			exclude_portid = portid;
2901		}
2902
2903		/* errors reported via destination sk->sk_err, but propagate
2904		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2905		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2906	}
2907
2908	if (report) {
2909		int err2;
2910
2911		err2 = nlmsg_unicast(sk, skb, portid);
2912		if (!err || err == -ESRCH)
2913			err = err2;
2914	}
2915
2916	return err;
2917}
2918EXPORT_SYMBOL(nlmsg_notify);
2919
2920#ifdef CONFIG_PROC_FS
2921struct nl_seq_iter {
2922	struct seq_net_private p;
 
2923	int link;
2924	int hash_idx;
2925};
2926
2927static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2928{
2929	struct nl_seq_iter *iter = seq->private;
2930	int i, j;
2931	struct sock *s;
2932	loff_t off = 0;
2933
2934	for (i = 0; i < MAX_LINKS; i++) {
2935		struct nl_portid_hash *hash = &nl_table[i].hash;
 
 
 
 
2936
2937		for (j = 0; j <= hash->mask; j++) {
2938			sk_for_each(s, &hash->table[j]) {
2939				if (sock_net(s) != seq_file_net(seq))
2940					continue;
2941				if (off == pos) {
2942					iter->link = i;
2943					iter->hash_idx = j;
2944					return s;
2945				}
2946				++off;
2947			}
2948		}
2949	}
2950	return NULL;
2951}
2952
2953static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2954	__acquires(nl_table_lock)
2955{
2956	read_lock(&nl_table_lock);
2957	return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2958}
2959
2960static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2961{
2962	struct sock *s;
2963	struct nl_seq_iter *iter;
2964	struct net *net;
2965	int i, j;
2966
2967	++*pos;
 
 
2968
2969	if (v == SEQ_START_TOKEN)
2970		return netlink_seq_socket_idx(seq, 0);
2971
2972	net = seq_file_net(seq);
2973	iter = seq->private;
2974	s = v;
2975	do {
2976		s = sk_next(s);
2977	} while (s && !nl_table[s->sk_protocol].compare(net, s));
2978	if (s)
2979		return s;
2980
2981	i = iter->link;
2982	j = iter->hash_idx + 1;
2983
2984	do {
2985		struct nl_portid_hash *hash = &nl_table[i].hash;
2986
2987		for (; j <= hash->mask; j++) {
2988			s = sk_head(&hash->table[j]);
 
2989
2990			while (s && !nl_table[s->sk_protocol].compare(net, s))
2991				s = sk_next(s);
2992			if (s) {
2993				iter->link = i;
2994				iter->hash_idx = j;
2995				return s;
2996			}
2997		}
 
 
 
 
2998
2999		j = 0;
3000	} while (++i < MAX_LINKS);
 
 
 
 
 
 
 
 
 
 
3001
3002	return NULL;
 
 
 
 
 
 
 
 
 
3003}
3004
3005static void netlink_seq_stop(struct seq_file *seq, void *v)
3006	__releases(nl_table_lock)
3007{
3008	read_unlock(&nl_table_lock);
 
 
 
 
 
3009}
3010
3011
3012static int netlink_seq_show(struct seq_file *seq, void *v)
3013{
3014	if (v == SEQ_START_TOKEN) {
3015		seq_puts(seq,
3016			 "sk       Eth Pid    Groups   "
3017			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
3018	} else {
3019		struct sock *s = v;
3020		struct netlink_sock *nlk = nlk_sk(s);
3021
3022		seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3023			   s,
3024			   s->sk_protocol,
3025			   nlk->portid,
3026			   nlk->groups ? (u32)nlk->groups[0] : 0,
3027			   sk_rmem_alloc_get(s),
3028			   sk_wmem_alloc_get(s),
3029			   nlk->cb_running,
3030			   atomic_read(&s->sk_refcnt),
3031			   atomic_read(&s->sk_drops),
3032			   sock_i_ino(s)
3033			);
3034
3035	}
3036	return 0;
3037}
3038
3039static const struct seq_operations netlink_seq_ops = {
3040	.start  = netlink_seq_start,
3041	.next   = netlink_seq_next,
3042	.stop   = netlink_seq_stop,
3043	.show   = netlink_seq_show,
3044};
3045
3046
3047static int netlink_seq_open(struct inode *inode, struct file *file)
3048{
3049	return seq_open_net(inode, file, &netlink_seq_ops,
3050				sizeof(struct nl_seq_iter));
3051}
3052
3053static const struct file_operations netlink_seq_fops = {
3054	.owner		= THIS_MODULE,
3055	.open		= netlink_seq_open,
3056	.read		= seq_read,
3057	.llseek		= seq_lseek,
3058	.release	= seq_release_net,
3059};
3060
3061#endif
3062
3063int netlink_register_notifier(struct notifier_block *nb)
3064{
3065	return atomic_notifier_chain_register(&netlink_chain, nb);
3066}
3067EXPORT_SYMBOL(netlink_register_notifier);
3068
3069int netlink_unregister_notifier(struct notifier_block *nb)
3070{
3071	return atomic_notifier_chain_unregister(&netlink_chain, nb);
3072}
3073EXPORT_SYMBOL(netlink_unregister_notifier);
3074
3075static const struct proto_ops netlink_ops = {
3076	.family =	PF_NETLINK,
3077	.owner =	THIS_MODULE,
3078	.release =	netlink_release,
3079	.bind =		netlink_bind,
3080	.connect =	netlink_connect,
3081	.socketpair =	sock_no_socketpair,
3082	.accept =	sock_no_accept,
3083	.getname =	netlink_getname,
3084	.poll =		netlink_poll,
3085	.ioctl =	sock_no_ioctl,
3086	.listen =	sock_no_listen,
3087	.shutdown =	sock_no_shutdown,
3088	.setsockopt =	netlink_setsockopt,
3089	.getsockopt =	netlink_getsockopt,
3090	.sendmsg =	netlink_sendmsg,
3091	.recvmsg =	netlink_recvmsg,
3092	.mmap =		netlink_mmap,
3093	.sendpage =	sock_no_sendpage,
3094};
3095
3096static const struct net_proto_family netlink_family_ops = {
3097	.family = PF_NETLINK,
3098	.create = netlink_create,
3099	.owner	= THIS_MODULE,	/* for consistency 8) */
3100};
3101
3102static int __net_init netlink_net_init(struct net *net)
3103{
3104#ifdef CONFIG_PROC_FS
3105	if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3106		return -ENOMEM;
3107#endif
3108	return 0;
3109}
3110
3111static void __net_exit netlink_net_exit(struct net *net)
3112{
3113#ifdef CONFIG_PROC_FS
3114	remove_proc_entry("netlink", net->proc_net);
3115#endif
3116}
3117
3118static void __init netlink_add_usersock_entry(void)
3119{
3120	struct listeners *listeners;
3121	int groups = 32;
3122
3123	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3124	if (!listeners)
3125		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3126
3127	netlink_table_grab();
3128
3129	nl_table[NETLINK_USERSOCK].groups = groups;
3130	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3131	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3132	nl_table[NETLINK_USERSOCK].registered = 1;
3133	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3134
3135	netlink_table_ungrab();
3136}
3137
3138static struct pernet_operations __net_initdata netlink_net_ops = {
3139	.init = netlink_net_init,
3140	.exit = netlink_net_exit,
3141};
3142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3143static int __init netlink_proto_init(void)
3144{
3145	int i;
3146	unsigned long limit;
3147	unsigned int order;
3148	int err = proto_register(&netlink_proto, 0);
3149
3150	if (err != 0)
3151		goto out;
3152
3153	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3154
3155	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3156	if (!nl_table)
3157		goto panic;
3158
3159	if (totalram_pages >= (128 * 1024))
3160		limit = totalram_pages >> (21 - PAGE_SHIFT);
3161	else
3162		limit = totalram_pages >> (23 - PAGE_SHIFT);
3163
3164	order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
3165	limit = (1UL << order) / sizeof(struct hlist_head);
3166	order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
3167
3168	for (i = 0; i < MAX_LINKS; i++) {
3169		struct nl_portid_hash *hash = &nl_table[i].hash;
3170
3171		hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
3172		if (!hash->table) {
3173			while (i-- > 0)
3174				nl_portid_hash_free(nl_table[i].hash.table,
3175						 1 * sizeof(*hash->table));
3176			kfree(nl_table);
3177			goto panic;
3178		}
3179		hash->max_shift = order;
3180		hash->shift = 0;
3181		hash->mask = 0;
3182		hash->rehash_time = jiffies;
3183
3184		nl_table[i].compare = netlink_compare;
3185	}
3186
3187	INIT_LIST_HEAD(&netlink_tap_all);
3188
3189	netlink_add_usersock_entry();
3190
3191	sock_register(&netlink_family_ops);
3192	register_pernet_subsys(&netlink_net_ops);
3193	/* The netlink device handler may be needed early. */
3194	rtnetlink_init();
3195out:
3196	return err;
3197panic:
3198	panic("netlink_init: Cannot allocate nl_table\n");
3199}
3200
3201core_initcall(netlink_proto_init);