Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * NETLINK      Generic Netlink Family
   4 *
   5 * 		Authors:	Jamal Hadi Salim
   6 * 				Thomas Graf <tgraf@suug.ch>
   7 *				Johannes Berg <johannes@sipsolutions.net>
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/slab.h>
  13#include <linux/errno.h>
  14#include <linux/types.h>
  15#include <linux/socket.h>
  16#include <linux/string_helpers.h>
  17#include <linux/skbuff.h>
  18#include <linux/mutex.h>
  19#include <linux/bitmap.h>
  20#include <linux/rwsem.h>
  21#include <linux/idr.h>
  22#include <net/sock.h>
  23#include <net/genetlink.h>
  24
  25#include "genetlink.h"
  26
  27static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
  28static DECLARE_RWSEM(cb_lock);
  29
  30atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
  31DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
  32
  33void genl_lock(void)
  34{
  35	mutex_lock(&genl_mutex);
  36}
  37EXPORT_SYMBOL(genl_lock);
  38
  39void genl_unlock(void)
  40{
  41	mutex_unlock(&genl_mutex);
  42}
  43EXPORT_SYMBOL(genl_unlock);
  44
 
 
 
 
 
 
 
 
  45static void genl_lock_all(void)
  46{
  47	down_write(&cb_lock);
  48	genl_lock();
  49}
  50
  51static void genl_unlock_all(void)
  52{
  53	genl_unlock();
  54	up_write(&cb_lock);
  55}
  56
  57static void genl_op_lock(const struct genl_family *family)
  58{
  59	if (!family->parallel_ops)
  60		genl_lock();
  61}
  62
  63static void genl_op_unlock(const struct genl_family *family)
  64{
  65	if (!family->parallel_ops)
  66		genl_unlock();
  67}
  68
  69static DEFINE_IDR(genl_fam_idr);
  70
 
  71/*
  72 * Bitmap of multicast groups that are currently in use.
  73 *
  74 * To avoid an allocation at boot of just one unsigned long,
  75 * declare it global instead.
  76 * Bit 0 is marked as already used since group 0 is invalid.
  77 * Bit 1 is marked as already used since the drop-monitor code
  78 * abuses the API and thinks it can statically use group 1.
  79 * That group will typically conflict with other groups that
  80 * any proper users use.
  81 * Bit 16 is marked as used since it's used for generic netlink
  82 * and the code no longer marks pre-reserved IDs as used.
  83 * Bit 17 is marked as already used since the VFS quota code
  84 * also abused this API and relied on family == group ID, we
  85 * cater to that by giving it a static family and group ID.
  86 * Bit 18 is marked as already used since the PMCRAID driver
  87 * did the same thing as the VFS quota code (maybe copied?)
  88 */
  89static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
  90				      BIT(GENL_ID_VFS_DQUOT) |
  91				      BIT(GENL_ID_PMCRAID);
  92static unsigned long *mc_groups = &mc_group_start;
  93static unsigned long mc_groups_longs = 1;
  94
  95/* We need the last attribute with non-zero ID therefore a 2-entry array */
  96static struct nla_policy genl_policy_reject_all[] = {
  97	{ .type = NLA_REJECT },
  98	{ .type = NLA_REJECT },
  99};
 100
 101static int genl_ctrl_event(int event, const struct genl_family *family,
 102			   const struct genl_multicast_group *grp,
 103			   int grp_id);
 104
 105static void
 106genl_op_fill_in_reject_policy(const struct genl_family *family,
 107			      struct genl_ops *op)
 108{
 109	BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
 110
 111	if (op->policy || op->cmd < family->resv_start_op)
 112		return;
 113
 114	op->policy = genl_policy_reject_all;
 115	op->maxattr = 1;
 116}
 117
 118static void
 119genl_op_fill_in_reject_policy_split(const struct genl_family *family,
 120				    struct genl_split_ops *op)
 121{
 122	if (op->policy)
 123		return;
 124
 125	op->policy = genl_policy_reject_all;
 126	op->maxattr = 1;
 127}
 128
 129static const struct genl_family *genl_family_find_byid(unsigned int id)
 130{
 131	return idr_find(&genl_fam_idr, id);
 132}
 133
 134static const struct genl_family *genl_family_find_byname(char *name)
 135{
 136	const struct genl_family *family;
 137	unsigned int id;
 138
 139	idr_for_each_entry(&genl_fam_idr, family, id)
 140		if (strcmp(family->name, name) == 0)
 141			return family;
 142
 143	return NULL;
 144}
 145
 146struct genl_op_iter {
 147	const struct genl_family *family;
 148	struct genl_split_ops doit;
 149	struct genl_split_ops dumpit;
 150	int cmd_idx;
 151	int entry_idx;
 152	u32 cmd;
 153	u8 flags;
 154};
 155
 156static void genl_op_from_full(const struct genl_family *family,
 157			      unsigned int i, struct genl_ops *op)
 158{
 159	*op = family->ops[i];
 160
 161	if (!op->maxattr)
 162		op->maxattr = family->maxattr;
 163	if (!op->policy)
 164		op->policy = family->policy;
 165
 166	genl_op_fill_in_reject_policy(family, op);
 167}
 168
 169static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
 170			     struct genl_ops *op)
 171{
 
 172	int i;
 173
 174	for (i = 0; i < family->n_ops; i++)
 175		if (family->ops[i].cmd == cmd) {
 176			genl_op_from_full(family, i, op);
 177			return 0;
 178		}
 179
 180	return -ENOENT;
 181}
 182
 183static void genl_op_from_small(const struct genl_family *family,
 184			       unsigned int i, struct genl_ops *op)
 185{
 186	memset(op, 0, sizeof(*op));
 187	op->doit	= family->small_ops[i].doit;
 188	op->dumpit	= family->small_ops[i].dumpit;
 189	op->cmd		= family->small_ops[i].cmd;
 190	op->internal_flags = family->small_ops[i].internal_flags;
 191	op->flags	= family->small_ops[i].flags;
 192	op->validate	= family->small_ops[i].validate;
 193
 194	op->maxattr = family->maxattr;
 195	op->policy = family->policy;
 196
 197	genl_op_fill_in_reject_policy(family, op);
 198}
 199
 200static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
 201			      struct genl_ops *op)
 202{
 203	int i;
 204
 205	for (i = 0; i < family->n_small_ops; i++)
 206		if (family->small_ops[i].cmd == cmd) {
 207			genl_op_from_small(family, i, op);
 208			return 0;
 209		}
 210
 211	return -ENOENT;
 212}
 213
 214static void genl_op_from_split(struct genl_op_iter *iter)
 215{
 216	const struct genl_family *family = iter->family;
 217	int i, cnt = 0;
 218
 219	i = iter->entry_idx - family->n_ops - family->n_small_ops;
 220
 221	if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
 222		iter->doit = family->split_ops[i + cnt];
 223		genl_op_fill_in_reject_policy_split(family, &iter->doit);
 224		cnt++;
 225	} else {
 226		memset(&iter->doit, 0, sizeof(iter->doit));
 227	}
 228
 229	if (i + cnt < family->n_split_ops &&
 230	    family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
 231	    (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
 232		iter->dumpit = family->split_ops[i + cnt];
 233		genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
 234		cnt++;
 235	} else {
 236		memset(&iter->dumpit, 0, sizeof(iter->dumpit));
 237	}
 238
 239	WARN_ON(!cnt);
 240	iter->entry_idx += cnt;
 241}
 242
 243static int
 244genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
 245		   struct genl_split_ops *op)
 
 246{
 
 247	int i;
 248
 249	for (i = 0; i < family->n_split_ops; i++)
 250		if (family->split_ops[i].cmd == cmd &&
 251		    family->split_ops[i].flags & flag) {
 252			*op = family->split_ops[i];
 253			return 0;
 254		}
 255
 256	return -ENOENT;
 257}
 258
 259static int
 260genl_cmd_full_to_split(struct genl_split_ops *op,
 261		       const struct genl_family *family,
 262		       const struct genl_ops *full, u8 flags)
 263{
 264	if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
 265	    (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
 266		memset(op, 0, sizeof(*op));
 267		return -ENOENT;
 268	}
 269
 270	if (flags & GENL_CMD_CAP_DUMP) {
 271		op->start	= full->start;
 272		op->dumpit	= full->dumpit;
 273		op->done	= full->done;
 274	} else {
 275		op->pre_doit	= family->pre_doit;
 276		op->doit	= full->doit;
 277		op->post_doit	= family->post_doit;
 278	}
 279
 280	if (flags & GENL_CMD_CAP_DUMP &&
 281	    full->validate & GENL_DONT_VALIDATE_DUMP) {
 282		op->policy	= NULL;
 283		op->maxattr	= 0;
 284	} else {
 285		op->policy	= full->policy;
 286		op->maxattr	= full->maxattr;
 287	}
 288
 289	op->cmd			= full->cmd;
 290	op->internal_flags	= full->internal_flags;
 291	op->flags		= full->flags;
 292	op->validate		= full->validate;
 293
 294	/* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
 295	op->flags		|= flags;
 296
 297	return 0;
 298}
 299
 300/* Must make sure that op is initialized to 0 on failure */
 301static int
 302genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
 303	     struct genl_split_ops *op)
 304{
 305	struct genl_ops full;
 306	int err;
 307
 308	err = genl_get_cmd_full(cmd, family, &full);
 309	if (err == -ENOENT)
 310		err = genl_get_cmd_small(cmd, family, &full);
 311	/* Found one of legacy forms */
 312	if (err == 0)
 313		return genl_cmd_full_to_split(op, family, &full, flags);
 314
 315	err = genl_get_cmd_split(cmd, flags, family, op);
 316	if (err)
 317		memset(op, 0, sizeof(*op));
 318	return err;
 319}
 320
 321/* For policy dumping only, get ops of both do and dump.
 322 * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
 323 */
 324static int
 325genl_get_cmd_both(u32 cmd, const struct genl_family *family,
 326		  struct genl_split_ops *doit, struct genl_split_ops *dumpit)
 327{
 328	int err1, err2;
 329
 330	err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
 331	err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
 332
 333	return err1 && err2 ? -ENOENT : 0;
 334}
 335
 336static bool
 337genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
 338{
 339	iter->family = family;
 340	iter->cmd_idx = 0;
 341	iter->entry_idx = 0;
 342
 343	iter->flags = 0;
 344
 345	return iter->family->n_ops +
 346		iter->family->n_small_ops +
 347		iter->family->n_split_ops;
 348}
 349
 350static bool genl_op_iter_next(struct genl_op_iter *iter)
 351{
 352	const struct genl_family *family = iter->family;
 353	bool legacy_op = true;
 354	struct genl_ops op;
 355
 356	if (iter->entry_idx < family->n_ops) {
 357		genl_op_from_full(family, iter->entry_idx, &op);
 358	} else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
 359		genl_op_from_small(family, iter->entry_idx - family->n_ops,
 360				   &op);
 361	} else if (iter->entry_idx <
 362		   family->n_ops + family->n_small_ops + family->n_split_ops) {
 363		legacy_op = false;
 364		/* updates entry_idx */
 365		genl_op_from_split(iter);
 366	} else {
 367		return false;
 368	}
 369
 370	iter->cmd_idx++;
 371
 372	if (legacy_op) {
 373		iter->entry_idx++;
 374
 375		genl_cmd_full_to_split(&iter->doit, family,
 376				       &op, GENL_CMD_CAP_DO);
 377		genl_cmd_full_to_split(&iter->dumpit, family,
 378				       &op, GENL_CMD_CAP_DUMP);
 379	}
 380
 381	iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
 382	iter->flags = iter->doit.flags | iter->dumpit.flags;
 383
 384	return true;
 385}
 386
 387static void
 388genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
 389{
 390	*dst = *src;
 391}
 392
 393static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
 394{
 395	return iter->cmd_idx;
 396}
 397
 398static int genl_allocate_reserve_groups(int n_groups, int *first_id)
 399{
 400	unsigned long *new_groups;
 401	int start = 0;
 402	int i;
 403	int id;
 404	bool fits;
 405
 406	do {
 407		if (start == 0)
 408			id = find_first_zero_bit(mc_groups,
 409						 mc_groups_longs *
 410						 BITS_PER_LONG);
 411		else
 412			id = find_next_zero_bit(mc_groups,
 413						mc_groups_longs * BITS_PER_LONG,
 414						start);
 415
 416		fits = true;
 417		for (i = id;
 418		     i < min_t(int, id + n_groups,
 419			       mc_groups_longs * BITS_PER_LONG);
 420		     i++) {
 421			if (test_bit(i, mc_groups)) {
 422				start = i;
 423				fits = false;
 424				break;
 425			}
 426		}
 427
 428		if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
 429			unsigned long new_longs = mc_groups_longs +
 430						  BITS_TO_LONGS(n_groups);
 431			size_t nlen = new_longs * sizeof(unsigned long);
 432
 433			if (mc_groups == &mc_group_start) {
 434				new_groups = kzalloc(nlen, GFP_KERNEL);
 435				if (!new_groups)
 436					return -ENOMEM;
 437				mc_groups = new_groups;
 438				*mc_groups = mc_group_start;
 439			} else {
 440				new_groups = krealloc(mc_groups, nlen,
 441						      GFP_KERNEL);
 442				if (!new_groups)
 443					return -ENOMEM;
 444				mc_groups = new_groups;
 445				for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
 446					mc_groups[mc_groups_longs + i] = 0;
 447			}
 448			mc_groups_longs = new_longs;
 449		}
 450	} while (!fits);
 451
 452	for (i = id; i < id + n_groups; i++)
 453		set_bit(i, mc_groups);
 454	*first_id = id;
 455	return 0;
 456}
 457
 458static struct genl_family genl_ctrl;
 459
 460static int genl_validate_assign_mc_groups(struct genl_family *family)
 461{
 462	int first_id;
 463	int n_groups = family->n_mcgrps;
 464	int err = 0, i;
 465	bool groups_allocated = false;
 466
 467	if (!n_groups)
 468		return 0;
 469
 470	for (i = 0; i < n_groups; i++) {
 471		const struct genl_multicast_group *grp = &family->mcgrps[i];
 472
 473		if (WARN_ON(grp->name[0] == '\0'))
 474			return -EINVAL;
 475		if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
 476			return -EINVAL;
 477	}
 478
 479	/* special-case our own group and hacks */
 480	if (family == &genl_ctrl) {
 481		first_id = GENL_ID_CTRL;
 482		BUG_ON(n_groups != 1);
 483	} else if (strcmp(family->name, "NET_DM") == 0) {
 484		first_id = 1;
 485		BUG_ON(n_groups != 1);
 486	} else if (family->id == GENL_ID_VFS_DQUOT) {
 487		first_id = GENL_ID_VFS_DQUOT;
 488		BUG_ON(n_groups != 1);
 489	} else if (family->id == GENL_ID_PMCRAID) {
 490		first_id = GENL_ID_PMCRAID;
 491		BUG_ON(n_groups != 1);
 492	} else {
 493		groups_allocated = true;
 494		err = genl_allocate_reserve_groups(n_groups, &first_id);
 495		if (err)
 496			return err;
 497	}
 498
 499	family->mcgrp_offset = first_id;
 500
 501	/* if still initializing, can't and don't need to realloc bitmaps */
 502	if (!init_net.genl_sock)
 503		return 0;
 504
 505	if (family->netnsok) {
 506		struct net *net;
 507
 508		netlink_table_grab();
 509		rcu_read_lock();
 510		for_each_net_rcu(net) {
 511			err = __netlink_change_ngroups(net->genl_sock,
 512					mc_groups_longs * BITS_PER_LONG);
 513			if (err) {
 514				/*
 515				 * No need to roll back, can only fail if
 516				 * memory allocation fails and then the
 517				 * number of _possible_ groups has been
 518				 * increased on some sockets which is ok.
 519				 */
 520				break;
 521			}
 522		}
 523		rcu_read_unlock();
 524		netlink_table_ungrab();
 525	} else {
 526		err = netlink_change_ngroups(init_net.genl_sock,
 527					     mc_groups_longs * BITS_PER_LONG);
 528	}
 529
 530	if (groups_allocated && err) {
 531		for (i = 0; i < family->n_mcgrps; i++)
 532			clear_bit(family->mcgrp_offset + i, mc_groups);
 533	}
 534
 535	return err;
 536}
 537
 538static void genl_unregister_mc_groups(const struct genl_family *family)
 539{
 540	struct net *net;
 541	int i;
 542
 543	netlink_table_grab();
 544	rcu_read_lock();
 545	for_each_net_rcu(net) {
 546		for (i = 0; i < family->n_mcgrps; i++)
 547			__netlink_clear_multicast_users(
 548				net->genl_sock, family->mcgrp_offset + i);
 549	}
 550	rcu_read_unlock();
 551	netlink_table_ungrab();
 552
 553	for (i = 0; i < family->n_mcgrps; i++) {
 554		int grp_id = family->mcgrp_offset + i;
 555
 556		if (grp_id != 1)
 557			clear_bit(grp_id, mc_groups);
 558		genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
 559				&family->mcgrps[i], grp_id);
 560	}
 561}
 562
 563static bool genl_split_op_check(const struct genl_split_ops *op)
 564{
 565	if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
 566					  GENL_CMD_CAP_DUMP)) != 1))
 567		return true;
 568	return false;
 569}
 570
 571static int genl_validate_ops(const struct genl_family *family)
 572{
 573	struct genl_op_iter i, j;
 574	unsigned int s;
 
 575
 576	if (WARN_ON(family->n_ops && !family->ops) ||
 577	    WARN_ON(family->n_small_ops && !family->small_ops) ||
 578	    WARN_ON(family->n_split_ops && !family->split_ops))
 579		return -EINVAL;
 580
 581	for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
 582		if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
 583			return -EINVAL;
 584
 585		if (WARN_ON(i.cmd >= family->resv_start_op &&
 586			    (i.doit.validate || i.dumpit.validate)))
 587			return -EINVAL;
 588
 589		genl_op_iter_copy(&j, &i);
 590		while (genl_op_iter_next(&j)) {
 591			if (i.cmd == j.cmd)
 592				return -EINVAL;
 593		}
 594	}
 595
 596	if (family->n_split_ops) {
 597		if (genl_split_op_check(&family->split_ops[0]))
 598			return -EINVAL;
 599	}
 600
 601	for (s = 1; s < family->n_split_ops; s++) {
 602		const struct genl_split_ops *a, *b;
 603
 604		a = &family->split_ops[s - 1];
 605		b = &family->split_ops[s];
 606
 607		if (genl_split_op_check(b))
 608			return -EINVAL;
 609
 610		/* Check sort order */
 611		if (a->cmd < b->cmd) {
 612			continue;
 613		} else if (a->cmd > b->cmd) {
 614			WARN_ON(1);
 615			return -EINVAL;
 616		}
 617
 618		if (a->internal_flags != b->internal_flags ||
 619		    ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
 620					       GENL_CMD_CAP_DUMP))) {
 621			WARN_ON(1);
 622			return -EINVAL;
 623		}
 624
 625		if ((a->flags & GENL_CMD_CAP_DO) &&
 626		    (b->flags & GENL_CMD_CAP_DUMP))
 627			continue;
 628
 629		WARN_ON(1);
 630		return -EINVAL;
 631	}
 632
 633	return 0;
 634}
 635
 636static void *genl_sk_priv_alloc(struct genl_family *family)
 637{
 638	void *priv;
 639
 640	priv = kzalloc(family->sock_priv_size, GFP_KERNEL);
 641	if (!priv)
 642		return ERR_PTR(-ENOMEM);
 643
 644	if (family->sock_priv_init)
 645		family->sock_priv_init(priv);
 646
 647	return priv;
 648}
 649
 650static void genl_sk_priv_free(const struct genl_family *family, void *priv)
 651{
 652	if (family->sock_priv_destroy)
 653		family->sock_priv_destroy(priv);
 654	kfree(priv);
 655}
 656
 657static int genl_sk_privs_alloc(struct genl_family *family)
 658{
 659	if (!family->sock_priv_size)
 660		return 0;
 661
 662	family->sock_privs = kzalloc(sizeof(*family->sock_privs), GFP_KERNEL);
 663	if (!family->sock_privs)
 664		return -ENOMEM;
 665	xa_init(family->sock_privs);
 666	return 0;
 667}
 668
 669static void genl_sk_privs_free(const struct genl_family *family)
 670{
 671	unsigned long id;
 672	void *priv;
 673
 674	if (!family->sock_priv_size)
 675		return;
 676
 677	xa_for_each(family->sock_privs, id, priv)
 678		genl_sk_priv_free(family, priv);
 679
 680	xa_destroy(family->sock_privs);
 681	kfree(family->sock_privs);
 682}
 683
 684static void genl_sk_priv_free_by_sock(struct genl_family *family,
 685				      struct sock *sk)
 686{
 687	void *priv;
 688
 689	if (!family->sock_priv_size)
 690		return;
 691	priv = xa_erase(family->sock_privs, (unsigned long) sk);
 692	if (!priv)
 693		return;
 694	genl_sk_priv_free(family, priv);
 695}
 696
 697static void genl_release(struct sock *sk, unsigned long *groups)
 698{
 699	struct genl_family *family;
 700	unsigned int id;
 701
 702	down_read(&cb_lock);
 703
 704	idr_for_each_entry(&genl_fam_idr, family, id)
 705		genl_sk_priv_free_by_sock(family, sk);
 706
 707	up_read(&cb_lock);
 708}
 709
 710/**
 711 * __genl_sk_priv_get - Get family private pointer for socket, if exists
 712 *
 713 * @family: family
 714 * @sk: socket
 715 *
 716 * Lookup a private memory for a Generic netlink family and specified socket.
 717 *
 718 * Caller should make sure this is called in RCU read locked section.
 719 *
 720 * Return: valid pointer on success, otherwise negative error value
 721 * encoded by ERR_PTR(), NULL in case priv does not exist.
 722 */
 723void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk)
 724{
 725	if (WARN_ON_ONCE(!family->sock_privs))
 726		return ERR_PTR(-EINVAL);
 727	return xa_load(family->sock_privs, (unsigned long) sk);
 728}
 729
 730/**
 731 * genl_sk_priv_get - Get family private pointer for socket
 732 *
 733 * @family: family
 734 * @sk: socket
 735 *
 736 * Lookup a private memory for a Generic netlink family and specified socket.
 737 * Allocate the private memory in case it was not already done.
 738 *
 739 * Return: valid pointer on success, otherwise negative error value
 740 * encoded by ERR_PTR().
 741 */
 742void *genl_sk_priv_get(struct genl_family *family, struct sock *sk)
 743{
 744	void *priv, *old_priv;
 745
 746	priv = __genl_sk_priv_get(family, sk);
 747	if (priv)
 748		return priv;
 749
 750	/* priv for the family does not exist so far, create it. */
 751
 752	priv = genl_sk_priv_alloc(family);
 753	if (IS_ERR(priv))
 754		return ERR_CAST(priv);
 755
 756	old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL,
 757			      priv, GFP_KERNEL);
 758	if (old_priv) {
 759		genl_sk_priv_free(family, priv);
 760		if (xa_is_err(old_priv))
 761			return ERR_PTR(xa_err(old_priv));
 762		/* Race happened, priv for the socket was already inserted. */
 763		return old_priv;
 764	}
 765	return priv;
 766}
 767
 768/**
 769 * genl_register_family - register a generic netlink family
 770 * @family: generic netlink family
 771 *
 772 * Registers the specified family after validating it first. Only one
 773 * family may be registered with the same family name or identifier.
 
 
 774 *
 775 * The family's ops, multicast groups and module pointer must already
 776 * be assigned.
 777 *
 778 * Return 0 on success or a negative error code.
 779 */
 780int genl_register_family(struct genl_family *family)
 781{
 782	int err, i;
 783	int start = GENL_START_ALLOC, end = GENL_MAX_ID;
 
 
 
 
 
 784
 785	err = genl_validate_ops(family);
 786	if (err)
 787		return err;
 788
 789	genl_lock_all();
 790
 791	if (genl_family_find_byname(family->name)) {
 792		err = -EEXIST;
 793		goto errout_locked;
 794	}
 795
 796	err = genl_sk_privs_alloc(family);
 797	if (err)
 798		goto errout_locked;
 799
 800	/*
 801	 * Sadly, a few cases need to be special-cased
 802	 * due to them having previously abused the API
 803	 * and having used their family ID also as their
 804	 * multicast group ID, so we use reserved IDs
 805	 * for both to be sure we can do that mapping.
 806	 */
 807	if (family == &genl_ctrl) {
 808		/* and this needs to be special for initial family lookups */
 809		start = end = GENL_ID_CTRL;
 810	} else if (strcmp(family->name, "pmcraid") == 0) {
 811		start = end = GENL_ID_PMCRAID;
 812	} else if (strcmp(family->name, "VFS_DQUOT") == 0) {
 813		start = end = GENL_ID_VFS_DQUOT;
 814	}
 815
 816	family->id = idr_alloc_cyclic(&genl_fam_idr, family,
 817				      start, end + 1, GFP_KERNEL);
 818	if (family->id < 0) {
 819		err = family->id;
 820		goto errout_sk_privs_free;
 821	}
 822
 
 
 
 
 
 
 
 
 
 
 823	err = genl_validate_assign_mc_groups(family);
 824	if (err)
 825		goto errout_remove;
 826
 
 827	genl_unlock_all();
 828
 829	/* send all events */
 830	genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
 831	for (i = 0; i < family->n_mcgrps; i++)
 832		genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
 833				&family->mcgrps[i], family->mcgrp_offset + i);
 834
 835	return 0;
 836
 837errout_remove:
 838	idr_remove(&genl_fam_idr, family->id);
 839errout_sk_privs_free:
 840	genl_sk_privs_free(family);
 841errout_locked:
 842	genl_unlock_all();
 
 843	return err;
 844}
 845EXPORT_SYMBOL(genl_register_family);
 846
 847/**
 848 * genl_unregister_family - unregister generic netlink family
 849 * @family: generic netlink family
 850 *
 851 * Unregisters the specified family.
 852 *
 853 * Returns 0 on success or a negative error code.
 854 */
 855int genl_unregister_family(const struct genl_family *family)
 856{
 857	genl_lock_all();
 858
 859	if (!genl_family_find_byid(family->id)) {
 860		genl_unlock_all();
 861		return -ENOENT;
 862	}
 863
 864	genl_unregister_mc_groups(family);
 865
 866	idr_remove(&genl_fam_idr, family->id);
 
 
 867
 868	up_write(&cb_lock);
 869	wait_event(genl_sk_destructing_waitq,
 870		   atomic_read(&genl_sk_destructing_cnt) == 0);
 871
 872	genl_sk_privs_free(family);
 
 
 
 
 
 873
 874	genl_unlock();
 
 
 
 875
 876	genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
 877
 878	return 0;
 879}
 880EXPORT_SYMBOL(genl_unregister_family);
 881
 882/**
 883 * genlmsg_put - Add generic netlink header to netlink message
 884 * @skb: socket buffer holding the message
 885 * @portid: netlink portid the message is addressed to
 886 * @seq: sequence number (usually the one of the sender)
 887 * @family: generic netlink family
 888 * @flags: netlink message flags
 889 * @cmd: generic netlink command
 890 *
 891 * Returns pointer to user specific header
 892 */
 893void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
 894		  const struct genl_family *family, int flags, u8 cmd)
 895{
 896	struct nlmsghdr *nlh;
 897	struct genlmsghdr *hdr;
 898
 899	nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
 900			family->hdrsize, flags);
 901	if (nlh == NULL)
 902		return NULL;
 903
 904	hdr = nlmsg_data(nlh);
 905	hdr->cmd = cmd;
 906	hdr->version = family->version;
 907	hdr->reserved = 0;
 908
 909	return (char *) hdr + GENL_HDRLEN;
 910}
 911EXPORT_SYMBOL(genlmsg_put);
 912
 913static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
 914{
 915	return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
 916}
 917
 918static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
 919{
 920	kfree(info);
 921}
 922
 923static struct nlattr **
 924genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
 925				struct nlmsghdr *nlh,
 926				struct netlink_ext_ack *extack,
 927				const struct genl_split_ops *ops,
 928				int hdrlen,
 929				enum genl_validate_flags no_strict_flag)
 930{
 931	enum netlink_validation validate = ops->validate & no_strict_flag ?
 932					   NL_VALIDATE_LIBERAL :
 933					   NL_VALIDATE_STRICT;
 934	struct nlattr **attrbuf;
 935	int err;
 936
 937	if (!ops->maxattr)
 938		return NULL;
 939
 940	attrbuf = kmalloc_array(ops->maxattr + 1,
 941				sizeof(struct nlattr *), GFP_KERNEL);
 942	if (!attrbuf)
 943		return ERR_PTR(-ENOMEM);
 944
 945	err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
 946			    validate, extack);
 947	if (err) {
 948		kfree(attrbuf);
 949		return ERR_PTR(err);
 950	}
 951	return attrbuf;
 952}
 953
 954static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
 955{
 956	kfree(attrbuf);
 957}
 958
 959struct genl_start_context {
 960	const struct genl_family *family;
 961	struct nlmsghdr *nlh;
 962	struct netlink_ext_ack *extack;
 963	const struct genl_split_ops *ops;
 964	int hdrlen;
 965};
 966
 967static int genl_start(struct netlink_callback *cb)
 968{
 969	struct genl_start_context *ctx = cb->data;
 970	const struct genl_split_ops *ops;
 971	struct genl_dumpit_info *info;
 972	struct nlattr **attrs = NULL;
 973	int rc = 0;
 974
 975	ops = ctx->ops;
 976	if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
 977	    ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
 978		return -EINVAL;
 979
 980	attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
 981						ops, ctx->hdrlen,
 982						GENL_DONT_VALIDATE_DUMP_STRICT);
 983	if (IS_ERR(attrs))
 984		return PTR_ERR(attrs);
 985
 986	info = genl_dumpit_info_alloc();
 987	if (!info) {
 988		genl_family_rcv_msg_attrs_free(attrs);
 989		return -ENOMEM;
 990	}
 991	info->op = *ops;
 992	info->info.family	= ctx->family;
 993	info->info.snd_seq	= cb->nlh->nlmsg_seq;
 994	info->info.snd_portid	= NETLINK_CB(cb->skb).portid;
 995	info->info.nlhdr	= cb->nlh;
 996	info->info.genlhdr	= nlmsg_data(cb->nlh);
 997	info->info.attrs	= attrs;
 998	genl_info_net_set(&info->info, sock_net(cb->skb->sk));
 999	info->info.extack	= cb->extack;
1000	memset(&info->info.ctx, 0, sizeof(info->info.ctx));
1001
1002	cb->data = info;
1003	if (ops->start) {
1004		genl_op_lock(ctx->family);
1005		rc = ops->start(cb);
1006		genl_op_unlock(ctx->family);
1007	}
1008
1009	if (rc) {
1010		genl_family_rcv_msg_attrs_free(info->info.attrs);
1011		genl_dumpit_info_free(info);
1012		cb->data = NULL;
1013	}
1014	return rc;
1015}
1016
1017static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1018{
1019	struct genl_dumpit_info *dump_info = cb->data;
1020	const struct genl_split_ops *ops = &dump_info->op;
1021	struct genl_info *info = &dump_info->info;
1022	int rc;
1023
1024	info->extack = cb->extack;
1025
1026	genl_op_lock(info->family);
1027	rc = ops->dumpit(skb, cb);
1028	genl_op_unlock(info->family);
1029	return rc;
1030}
1031
1032static int genl_done(struct netlink_callback *cb)
1033{
1034	struct genl_dumpit_info *dump_info = cb->data;
1035	const struct genl_split_ops *ops = &dump_info->op;
1036	struct genl_info *info = &dump_info->info;
1037	int rc = 0;
1038
1039	info->extack = cb->extack;
1040
1041	if (ops->done) {
1042		genl_op_lock(info->family);
1043		rc = ops->done(cb);
1044		genl_op_unlock(info->family);
1045	}
1046	genl_family_rcv_msg_attrs_free(info->attrs);
1047	genl_dumpit_info_free(dump_info);
1048	return rc;
1049}
1050
1051static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
1052				      struct sk_buff *skb,
1053				      struct nlmsghdr *nlh,
1054				      struct netlink_ext_ack *extack,
1055				      const struct genl_split_ops *ops,
1056				      int hdrlen, struct net *net)
1057{
1058	struct genl_start_context ctx;
1059	struct netlink_dump_control c = {
1060		.module = family->module,
1061		.data = &ctx,
1062		.start = genl_start,
1063		.dump = genl_dumpit,
1064		.done = genl_done,
1065		.extack = extack,
1066	};
1067	int err;
1068
1069	ctx.family = family;
1070	ctx.nlh = nlh;
1071	ctx.extack = extack;
1072	ctx.ops = ops;
1073	ctx.hdrlen = hdrlen;
1074
1075	genl_op_unlock(family);
1076	err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1077	genl_op_lock(family);
1078
1079	return err;
1080}
1081
1082static int genl_family_rcv_msg_doit(const struct genl_family *family,
1083				    struct sk_buff *skb,
1084				    struct nlmsghdr *nlh,
1085				    struct netlink_ext_ack *extack,
1086				    const struct genl_split_ops *ops,
1087				    int hdrlen, struct net *net)
1088{
1089	struct nlattr **attrbuf;
 
1090	struct genl_info info;
1091	int err;
 
 
1092
1093	attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
1094						  ops, hdrlen,
1095						  GENL_DONT_VALIDATE_STRICT);
1096	if (IS_ERR(attrbuf))
1097		return PTR_ERR(attrbuf);
1098
1099	info.snd_seq = nlh->nlmsg_seq;
1100	info.snd_portid = NETLINK_CB(skb).portid;
1101	info.family = family;
1102	info.nlhdr = nlh;
1103	info.genlhdr = nlmsg_data(nlh);
1104	info.attrs = attrbuf;
1105	info.extack = extack;
1106	genl_info_net_set(&info, net);
1107	memset(&info.ctx, 0, sizeof(info.ctx));
1108
1109	if (ops->pre_doit) {
1110		err = ops->pre_doit(ops, skb, &info);
1111		if (err)
1112			goto out;
1113	}
1114
1115	err = ops->doit(skb, &info);
 
 
1116
1117	if (ops->post_doit)
1118		ops->post_doit(ops, skb, &info);
 
1119
1120out:
1121	genl_family_rcv_msg_attrs_free(attrbuf);
1122
1123	return err;
1124}
1125
1126static int genl_header_check(const struct genl_family *family,
1127			     struct nlmsghdr *nlh, struct genlmsghdr *hdr,
1128			     struct netlink_ext_ack *extack)
1129{
1130	u16 flags;
 
 
 
 
1131
1132	/* Only for commands added after we started validating */
1133	if (hdr->cmd < family->resv_start_op)
1134		return 0;
1135
1136	if (hdr->reserved) {
1137		NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
1138		return -EINVAL;
1139	}
 
 
 
1140
1141	/* Old netlink flags have pretty loose semantics, allow only the flags
1142	 * consumed by the core where we can enforce the meaning.
1143	 */
1144	flags = nlh->nlmsg_flags;
1145	if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
1146		flags &= ~NLM_F_DUMP;
1147	if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
1148		NL_SET_ERR_MSG(extack,
1149			       "ambiguous or reserved bits set in nlmsg_flags");
1150		return -EINVAL;
1151	}
1152
1153	return 0;
1154}
1155
1156static int genl_family_rcv_msg(const struct genl_family *family,
1157			       struct sk_buff *skb,
1158			       struct nlmsghdr *nlh,
1159			       struct netlink_ext_ack *extack)
1160{
1161	struct net *net = sock_net(skb->sk);
1162	struct genlmsghdr *hdr = nlmsg_data(nlh);
1163	struct genl_split_ops op;
1164	int hdrlen;
1165	u8 flags;
1166
1167	/* this family doesn't exist in this netns */
1168	if (!family->netnsok && !net_eq(net, &init_net))
1169		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
1170
1171	hdrlen = GENL_HDRLEN + family->hdrsize;
1172	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
1173		return -EINVAL;
 
 
 
 
 
1174
1175	if (genl_header_check(family, nlh, hdr, extack))
1176		return -EINVAL;
 
 
 
1177
1178	flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
1179		GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
1180	if (genl_get_cmd(hdr->cmd, flags, family, &op))
1181		return -EOPNOTSUPP;
1182
1183	if ((op.flags & GENL_ADMIN_PERM) &&
1184	    !netlink_capable(skb, CAP_NET_ADMIN))
1185		return -EPERM;
1186
1187	if ((op.flags & GENL_UNS_ADMIN_PERM) &&
1188	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1189		return -EPERM;
1190
1191	if (flags & GENL_CMD_CAP_DUMP)
1192		return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
1193						  &op, hdrlen, net);
1194	else
1195		return genl_family_rcv_msg_doit(family, skb, nlh, extack,
1196						&op, hdrlen, net);
1197}
1198
1199static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
1200			struct netlink_ext_ack *extack)
1201{
1202	const struct genl_family *family;
1203	int err;
1204
1205	family = genl_family_find_byid(nlh->nlmsg_type);
1206	if (family == NULL)
1207		return -ENOENT;
1208
1209	genl_op_lock(family);
1210	err = genl_family_rcv_msg(family, skb, nlh, extack);
1211	genl_op_unlock(family);
 
 
 
 
1212
1213	return err;
1214}
1215
1216static void genl_rcv(struct sk_buff *skb)
1217{
1218	down_read(&cb_lock);
1219	netlink_rcv_skb(skb, &genl_rcv_msg);
1220	up_read(&cb_lock);
1221}
1222
1223/**************************************************************************
1224 * Controller
1225 **************************************************************************/
1226
1227static struct genl_family genl_ctrl;
 
 
 
 
 
 
1228
1229static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
1230			  u32 flags, struct sk_buff *skb, u8 cmd)
1231{
1232	struct genl_op_iter i;
1233	void *hdr;
1234
1235	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1236	if (hdr == NULL)
1237		return -EMSGSIZE;
1238
1239	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1240	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
1241	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
1242	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
1243	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
1244		goto nla_put_failure;
1245
1246	if (genl_op_iter_init(family, &i)) {
1247		struct nlattr *nla_ops;
 
1248
1249		nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
1250		if (nla_ops == NULL)
1251			goto nla_put_failure;
1252
1253		while (genl_op_iter_next(&i)) {
1254			struct nlattr *nest;
1255			u32 op_flags;
 
1256
1257			op_flags = i.flags;
1258			if (i.doit.policy || i.dumpit.policy)
 
 
 
1259				op_flags |= GENL_CMD_CAP_HASPOL;
1260
1261			nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
1262			if (nest == NULL)
1263				goto nla_put_failure;
1264
1265			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
1266			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
1267				goto nla_put_failure;
1268
1269			nla_nest_end(skb, nest);
1270		}
1271
1272		nla_nest_end(skb, nla_ops);
1273	}
1274
1275	if (family->n_mcgrps) {
1276		struct nlattr *nla_grps;
1277		int i;
1278
1279		nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1280		if (nla_grps == NULL)
1281			goto nla_put_failure;
1282
1283		for (i = 0; i < family->n_mcgrps; i++) {
1284			struct nlattr *nest;
1285			const struct genl_multicast_group *grp;
1286
1287			grp = &family->mcgrps[i];
1288
1289			nest = nla_nest_start_noflag(skb, i + 1);
1290			if (nest == NULL)
1291				goto nla_put_failure;
1292
1293			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
1294					family->mcgrp_offset + i) ||
1295			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1296					   grp->name))
1297				goto nla_put_failure;
1298
1299			nla_nest_end(skb, nest);
1300		}
1301		nla_nest_end(skb, nla_grps);
1302	}
1303
1304	genlmsg_end(skb, hdr);
1305	return 0;
1306
1307nla_put_failure:
1308	genlmsg_cancel(skb, hdr);
1309	return -EMSGSIZE;
1310}
1311
1312static int ctrl_fill_mcgrp_info(const struct genl_family *family,
1313				const struct genl_multicast_group *grp,
1314				int grp_id, u32 portid, u32 seq, u32 flags,
1315				struct sk_buff *skb, u8 cmd)
1316{
1317	void *hdr;
1318	struct nlattr *nla_grps;
1319	struct nlattr *nest;
1320
1321	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1322	if (hdr == NULL)
1323		return -1;
1324
1325	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1326	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
1327		goto nla_put_failure;
1328
1329	nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1330	if (nla_grps == NULL)
1331		goto nla_put_failure;
1332
1333	nest = nla_nest_start_noflag(skb, 1);
1334	if (nest == NULL)
1335		goto nla_put_failure;
1336
1337	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
1338	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1339			   grp->name))
1340		goto nla_put_failure;
1341
1342	nla_nest_end(skb, nest);
1343	nla_nest_end(skb, nla_grps);
1344
1345	genlmsg_end(skb, hdr);
1346	return 0;
1347
1348nla_put_failure:
1349	genlmsg_cancel(skb, hdr);
1350	return -EMSGSIZE;
1351}
1352
1353static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
1354{
1355	int n = 0;
 
1356	struct genl_family *rt;
1357	struct net *net = sock_net(skb->sk);
1358	int fams_to_skip = cb->args[0];
1359	unsigned int id;
1360	int err = 0;
1361
1362	idr_for_each_entry(&genl_fam_idr, rt, id) {
1363		if (!rt->netnsok && !net_eq(net, &init_net))
1364			continue;
1365
1366		if (n++ < fams_to_skip)
1367			continue;
1368
1369		err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
1370				     cb->nlh->nlmsg_seq, NLM_F_MULTI,
1371				     skb, CTRL_CMD_NEWFAMILY);
1372		if (err) {
1373			n--;
1374			break;
1375		}
1376	}
 
 
 
 
 
 
 
 
1377
1378	cb->args[0] = n;
1379	return err;
1380}
1381
1382static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
1383					     u32 portid, int seq, u8 cmd)
1384{
1385	struct sk_buff *skb;
1386	int err;
1387
1388	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1389	if (skb == NULL)
1390		return ERR_PTR(-ENOBUFS);
1391
1392	err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
1393	if (err < 0) {
1394		nlmsg_free(skb);
1395		return ERR_PTR(err);
1396	}
1397
1398	return skb;
1399}
1400
1401static struct sk_buff *
1402ctrl_build_mcgrp_msg(const struct genl_family *family,
1403		     const struct genl_multicast_group *grp,
1404		     int grp_id, u32 portid, int seq, u8 cmd)
1405{
1406	struct sk_buff *skb;
1407	int err;
1408
1409	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1410	if (skb == NULL)
1411		return ERR_PTR(-ENOBUFS);
1412
1413	err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
1414				   seq, 0, skb, cmd);
1415	if (err < 0) {
1416		nlmsg_free(skb);
1417		return ERR_PTR(err);
1418	}
1419
1420	return skb;
1421}
1422
1423static const struct nla_policy ctrl_policy_family[] = {
1424	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
1425	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
1426				    .len = GENL_NAMSIZ - 1 },
1427};
1428
1429static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
1430{
1431	struct sk_buff *msg;
1432	const struct genl_family *res = NULL;
1433	int err = -EINVAL;
1434
1435	if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
1436		u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
1437		res = genl_family_find_byid(id);
1438		err = -ENOENT;
1439	}
1440
1441	if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
1442		char *name;
1443
1444		name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
1445		res = genl_family_find_byname(name);
1446#ifdef CONFIG_MODULES
1447		if (res == NULL) {
1448			genl_unlock();
1449			up_read(&cb_lock);
1450			request_module("net-pf-%d-proto-%d-family-%s",
1451				       PF_NETLINK, NETLINK_GENERIC, name);
1452			down_read(&cb_lock);
1453			genl_lock();
1454			res = genl_family_find_byname(name);
1455		}
1456#endif
1457		err = -ENOENT;
1458	}
1459
1460	if (res == NULL)
1461		return err;
1462
1463	if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
1464		/* family doesn't exist here */
1465		return -ENOENT;
1466	}
1467
1468	msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
1469				    CTRL_CMD_NEWFAMILY);
1470	if (IS_ERR(msg))
1471		return PTR_ERR(msg);
1472
1473	return genlmsg_reply(msg, info);
1474}
1475
1476static int genl_ctrl_event(int event, const struct genl_family *family,
1477			   const struct genl_multicast_group *grp,
1478			   int grp_id)
1479{
1480	struct sk_buff *msg;
1481
1482	/* genl is still initialising */
1483	if (!init_net.genl_sock)
1484		return 0;
1485
1486	switch (event) {
1487	case CTRL_CMD_NEWFAMILY:
1488	case CTRL_CMD_DELFAMILY:
1489		WARN_ON(grp);
1490		msg = ctrl_build_family_msg(family, 0, 0, event);
1491		break;
1492	case CTRL_CMD_NEWMCAST_GRP:
1493	case CTRL_CMD_DELMCAST_GRP:
1494		BUG_ON(!grp);
1495		msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
1496		break;
1497	default:
1498		return -EINVAL;
1499	}
1500
1501	if (IS_ERR(msg))
1502		return PTR_ERR(msg);
1503
1504	if (!family->netnsok)
1505		genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
1506					0, GFP_KERNEL);
1507	else
1508		genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
1509
1510	return 0;
1511}
1512
1513struct ctrl_dump_policy_ctx {
1514	struct netlink_policy_dump_state *state;
1515	const struct genl_family *rt;
1516	struct genl_op_iter *op_iter;
1517	u32 op;
1518	u16 fam_id;
1519	u8 dump_map:1,
1520	   single_op:1;
1521};
1522
1523static const struct nla_policy ctrl_policy_policy[] = {
1524	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
1525	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
1526				    .len = GENL_NAMSIZ - 1 },
1527	[CTRL_ATTR_OP]		= { .type = NLA_U32 },
1528};
1529
1530static int ctrl_dumppolicy_start(struct netlink_callback *cb)
1531{
1532	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
1533	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1534	struct nlattr **tb = info->info.attrs;
1535	const struct genl_family *rt;
1536	struct genl_op_iter i;
1537	int err;
1538
1539	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
1540
1541	if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
1542		return -EINVAL;
1543
1544	if (tb[CTRL_ATTR_FAMILY_ID]) {
1545		ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
1546	} else {
1547		rt = genl_family_find_byname(
1548			nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
1549		if (!rt)
1550			return -ENOENT;
1551		ctx->fam_id = rt->id;
1552	}
1553
1554	rt = genl_family_find_byid(ctx->fam_id);
1555	if (!rt)
1556		return -ENOENT;
1557
1558	ctx->rt = rt;
1559
1560	if (tb[CTRL_ATTR_OP]) {
1561		struct genl_split_ops doit, dump;
1562
1563		ctx->single_op = true;
1564		ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
1565
1566		err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
1567		if (err) {
1568			NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
1569			return err;
1570		}
1571
1572		if (doit.policy) {
1573			err = netlink_policy_dump_add_policy(&ctx->state,
1574							     doit.policy,
1575							     doit.maxattr);
1576			if (err)
1577				goto err_free_state;
1578		}
1579		if (dump.policy) {
1580			err = netlink_policy_dump_add_policy(&ctx->state,
1581							     dump.policy,
1582							     dump.maxattr);
1583			if (err)
1584				goto err_free_state;
1585		}
1586
1587		if (!ctx->state)
1588			return -ENODATA;
1589
1590		ctx->dump_map = 1;
1591		return 0;
1592	}
1593
1594	ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
1595	if (!ctx->op_iter)
1596		return -ENOMEM;
1597
1598	genl_op_iter_init(rt, ctx->op_iter);
1599	ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1600
1601	for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
1602		if (i.doit.policy) {
1603			err = netlink_policy_dump_add_policy(&ctx->state,
1604							     i.doit.policy,
1605							     i.doit.maxattr);
1606			if (err)
1607				goto err_free_state;
1608		}
1609		if (i.dumpit.policy) {
1610			err = netlink_policy_dump_add_policy(&ctx->state,
1611							     i.dumpit.policy,
1612							     i.dumpit.maxattr);
1613			if (err)
1614				goto err_free_state;
1615		}
1616	}
1617
1618	if (!ctx->state) {
1619		err = -ENODATA;
1620		goto err_free_op_iter;
1621	}
1622	return 0;
1623
1624err_free_state:
1625	netlink_policy_dump_free(ctx->state);
1626err_free_op_iter:
1627	kfree(ctx->op_iter);
1628	return err;
1629}
1630
1631static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
1632				  struct netlink_callback *cb)
1633{
1634	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1635	void *hdr;
1636
1637	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1638			  cb->nlh->nlmsg_seq, &genl_ctrl,
1639			  NLM_F_MULTI, CTRL_CMD_GETPOLICY);
1640	if (!hdr)
1641		return NULL;
1642
1643	if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
1644		return NULL;
1645
1646	return hdr;
1647}
1648
1649static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
1650				  struct netlink_callback *cb,
1651				  struct genl_split_ops *doit,
1652				  struct genl_split_ops *dumpit)
1653{
1654	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1655	struct nlattr *nest_pol, *nest_op;
1656	void *hdr;
1657	int idx;
1658
1659	/* skip if we have nothing to show */
1660	if (!doit->policy && !dumpit->policy)
1661		return 0;
1662
1663	hdr = ctrl_dumppolicy_prep(skb, cb);
1664	if (!hdr)
1665		return -ENOBUFS;
1666
1667	nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
1668	if (!nest_pol)
1669		goto err;
1670
1671	nest_op = nla_nest_start(skb, doit->cmd);
1672	if (!nest_op)
1673		goto err;
1674
1675	if (doit->policy) {
1676		idx = netlink_policy_dump_get_policy_idx(ctx->state,
1677							 doit->policy,
1678							 doit->maxattr);
1679
1680		if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
1681			goto err;
1682	}
1683	if (dumpit->policy) {
1684		idx = netlink_policy_dump_get_policy_idx(ctx->state,
1685							 dumpit->policy,
1686							 dumpit->maxattr);
1687
1688		if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
1689			goto err;
1690	}
1691
1692	nla_nest_end(skb, nest_op);
1693	nla_nest_end(skb, nest_pol);
1694	genlmsg_end(skb, hdr);
1695
1696	return 0;
1697err:
1698	genlmsg_cancel(skb, hdr);
1699	return -ENOBUFS;
1700}
1701
1702static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
1703{
1704	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1705	void *hdr;
1706
1707	if (ctx->dump_map) {
1708		if (ctx->single_op) {
1709			struct genl_split_ops doit, dumpit;
1710
1711			if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
1712						      &doit, &dumpit)))
1713				return -ENOENT;
1714
1715			if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
1716				return skb->len;
1717
1718			/* done with the per-op policy index list */
1719			ctx->dump_map = 0;
1720		}
1721
1722		while (ctx->dump_map) {
1723			if (ctrl_dumppolicy_put_op(skb, cb,
1724						   &ctx->op_iter->doit,
1725						   &ctx->op_iter->dumpit))
1726				return skb->len;
1727
1728			ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1729		}
1730	}
1731
1732	while (netlink_policy_dump_loop(ctx->state)) {
1733		struct nlattr *nest;
1734
1735		hdr = ctrl_dumppolicy_prep(skb, cb);
1736		if (!hdr)
1737			goto nla_put_failure;
1738
1739		nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
1740		if (!nest)
1741			goto nla_put_failure;
1742
1743		if (netlink_policy_dump_write(skb, ctx->state))
1744			goto nla_put_failure;
1745
1746		nla_nest_end(skb, nest);
1747
1748		genlmsg_end(skb, hdr);
1749	}
1750
1751	return skb->len;
1752
1753nla_put_failure:
1754	genlmsg_cancel(skb, hdr);
1755	return skb->len;
1756}
1757
1758static int ctrl_dumppolicy_done(struct netlink_callback *cb)
1759{
1760	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1761
1762	kfree(ctx->op_iter);
1763	netlink_policy_dump_free(ctx->state);
1764	return 0;
1765}
1766
1767static const struct genl_split_ops genl_ctrl_ops[] = {
1768	{
1769		.cmd		= CTRL_CMD_GETFAMILY,
1770		.validate	= GENL_DONT_VALIDATE_STRICT,
1771		.policy		= ctrl_policy_family,
1772		.maxattr	= ARRAY_SIZE(ctrl_policy_family) - 1,
1773		.doit		= ctrl_getfamily,
1774		.flags		= GENL_CMD_CAP_DO,
1775	},
1776	{
1777		.cmd		= CTRL_CMD_GETFAMILY,
1778		.validate	= GENL_DONT_VALIDATE_DUMP,
1779		.policy		= ctrl_policy_family,
1780		.maxattr	= ARRAY_SIZE(ctrl_policy_family) - 1,
1781		.dumpit		= ctrl_dumpfamily,
1782		.flags		= GENL_CMD_CAP_DUMP,
1783	},
1784	{
1785		.cmd		= CTRL_CMD_GETPOLICY,
1786		.policy		= ctrl_policy_policy,
1787		.maxattr	= ARRAY_SIZE(ctrl_policy_policy) - 1,
1788		.start		= ctrl_dumppolicy_start,
1789		.dumpit		= ctrl_dumppolicy,
1790		.done		= ctrl_dumppolicy_done,
1791		.flags		= GENL_CMD_CAP_DUMP,
1792	},
1793};
1794
1795static const struct genl_multicast_group genl_ctrl_groups[] = {
1796	{ .name = "notify", },
1797};
1798
1799static struct genl_family genl_ctrl __ro_after_init = {
1800	.module = THIS_MODULE,
1801	.split_ops = genl_ctrl_ops,
1802	.n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
1803	.resv_start_op = CTRL_CMD_GETPOLICY + 1,
1804	.mcgrps = genl_ctrl_groups,
1805	.n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
1806	.id = GENL_ID_CTRL,
1807	.name = "nlctrl",
1808	.version = 0x2,
1809	.netnsok = true,
1810};
1811
1812static int genl_bind(struct net *net, int group)
1813{
1814	const struct genl_family *family;
1815	unsigned int id;
1816	int ret = 0;
1817
1818	down_read(&cb_lock);
 
 
1819
1820	idr_for_each_entry(&genl_fam_idr, family, id) {
1821		const struct genl_multicast_group *grp;
1822		int i;
1823
1824		if (family->n_mcgrps == 0)
1825			continue;
1826
1827		i = group - family->mcgrp_offset;
1828		if (i < 0 || i >= family->n_mcgrps)
1829			continue;
1830
1831		grp = &family->mcgrps[i];
1832		if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) &&
1833		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
1834			ret = -EPERM;
1835		if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) &&
1836		    !ns_capable(net->user_ns, CAP_SYS_ADMIN))
1837			ret = -EPERM;
1838
1839		if (family->bind)
1840			family->bind(i);
1841
1842		break;
1843	}
1844
1845	up_read(&cb_lock);
1846	return ret;
 
1847}
1848
1849static void genl_unbind(struct net *net, int group)
1850{
1851	const struct genl_family *family;
1852	unsigned int id;
1853
1854	down_read(&cb_lock);
 
 
1855
1856	idr_for_each_entry(&genl_fam_idr, family, id) {
1857		int i;
1858
1859		if (family->n_mcgrps == 0)
1860			continue;
1861
1862		i = group - family->mcgrp_offset;
1863		if (i < 0 || i >= family->n_mcgrps)
1864			continue;
1865
1866		if (family->unbind)
1867			family->unbind(i);
1868
1869		break;
 
 
 
 
1870	}
1871
1872	up_read(&cb_lock);
1873}
1874
1875static int __net_init genl_pernet_init(struct net *net)
1876{
1877	struct netlink_kernel_cfg cfg = {
1878		.input		= genl_rcv,
1879		.flags		= NL_CFG_F_NONROOT_RECV,
1880		.bind		= genl_bind,
1881		.unbind		= genl_unbind,
1882		.release	= genl_release,
1883	};
1884
1885	/* we'll bump the group number right afterwards */
1886	net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1887
1888	if (!net->genl_sock && net_eq(net, &init_net))
1889		panic("GENL: Cannot initialize generic netlink\n");
1890
1891	if (!net->genl_sock)
1892		return -ENOMEM;
1893
1894	return 0;
1895}
1896
1897static void __net_exit genl_pernet_exit(struct net *net)
1898{
1899	netlink_kernel_release(net->genl_sock);
1900	net->genl_sock = NULL;
1901}
1902
1903static struct pernet_operations genl_pernet_ops = {
1904	.init = genl_pernet_init,
1905	.exit = genl_pernet_exit,
1906};
1907
1908static int __init genl_init(void)
1909{
1910	int err;
 
 
 
1911
1912	err = genl_register_family(&genl_ctrl);
 
1913	if (err < 0)
1914		goto problem;
1915
1916	err = register_pernet_subsys(&genl_pernet_ops);
1917	if (err)
1918		goto problem;
1919
1920	return 0;
1921
1922problem:
1923	panic("GENL: Cannot register controller: %d\n", err);
1924}
1925
1926core_initcall(genl_init);
1927
1928static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
 
1929{
1930	struct sk_buff *tmp;
1931	struct net *net, *prev = NULL;
1932	bool delivered = false;
1933	int err;
1934
1935	rcu_read_lock();
1936	for_each_net_rcu(net) {
1937		if (prev) {
1938			tmp = skb_clone(skb, GFP_ATOMIC);
1939			if (!tmp) {
1940				err = -ENOMEM;
1941				goto error;
1942			}
1943			err = nlmsg_multicast(prev->genl_sock, tmp,
1944					      portid, group, GFP_ATOMIC);
1945			if (!err)
1946				delivered = true;
1947			else if (err != -ESRCH)
1948				goto error;
1949		}
1950
1951		prev = net;
1952	}
1953	err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
1954
1955	rcu_read_unlock();
1956
1957	if (!err)
1958		delivered = true;
1959	else if (err != -ESRCH)
1960		return err;
1961	return delivered ? 0 : -ESRCH;
1962 error:
1963	rcu_read_unlock();
1964
1965	kfree_skb(skb);
1966	return err;
1967}
1968
1969int genlmsg_multicast_allns(const struct genl_family *family,
1970			    struct sk_buff *skb, u32 portid,
1971			    unsigned int group)
1972{
1973	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1974		return -EINVAL;
1975
1976	group = family->mcgrp_offset + group;
1977	return genlmsg_mcast(skb, portid, group);
1978}
1979EXPORT_SYMBOL(genlmsg_multicast_allns);
1980
1981void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1982		 struct genl_info *info, u32 group, gfp_t flags)
1983{
1984	struct net *net = genl_info_net(info);
1985	struct sock *sk = net->genl_sock;
 
 
 
 
1986
1987	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1988		return;
1989
1990	group = family->mcgrp_offset + group;
1991	nlmsg_notify(sk, skb, info->snd_portid, group,
1992		     nlmsg_report(info->nlhdr), flags);
1993}
1994EXPORT_SYMBOL(genl_notify);
v4.6
 
   1/*
   2 * NETLINK      Generic Netlink Family
   3 *
   4 * 		Authors:	Jamal Hadi Salim
   5 * 				Thomas Graf <tgraf@suug.ch>
   6 *				Johannes Berg <johannes@sipsolutions.net>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/slab.h>
  12#include <linux/errno.h>
  13#include <linux/types.h>
  14#include <linux/socket.h>
  15#include <linux/string.h>
  16#include <linux/skbuff.h>
  17#include <linux/mutex.h>
  18#include <linux/bitmap.h>
  19#include <linux/rwsem.h>
 
  20#include <net/sock.h>
  21#include <net/genetlink.h>
  22
 
 
  23static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
  24static DECLARE_RWSEM(cb_lock);
  25
  26atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
  27DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
  28
  29void genl_lock(void)
  30{
  31	mutex_lock(&genl_mutex);
  32}
  33EXPORT_SYMBOL(genl_lock);
  34
  35void genl_unlock(void)
  36{
  37	mutex_unlock(&genl_mutex);
  38}
  39EXPORT_SYMBOL(genl_unlock);
  40
  41#ifdef CONFIG_LOCKDEP
  42bool lockdep_genl_is_held(void)
  43{
  44	return lockdep_is_held(&genl_mutex);
  45}
  46EXPORT_SYMBOL(lockdep_genl_is_held);
  47#endif
  48
  49static void genl_lock_all(void)
  50{
  51	down_write(&cb_lock);
  52	genl_lock();
  53}
  54
  55static void genl_unlock_all(void)
  56{
  57	genl_unlock();
  58	up_write(&cb_lock);
  59}
  60
  61#define GENL_FAM_TAB_SIZE	16
  62#define GENL_FAM_TAB_MASK	(GENL_FAM_TAB_SIZE - 1)
 
 
 
 
 
 
 
 
 
 
 
  63
  64static struct list_head family_ht[GENL_FAM_TAB_SIZE];
  65/*
  66 * Bitmap of multicast groups that are currently in use.
  67 *
  68 * To avoid an allocation at boot of just one unsigned long,
  69 * declare it global instead.
  70 * Bit 0 is marked as already used since group 0 is invalid.
  71 * Bit 1 is marked as already used since the drop-monitor code
  72 * abuses the API and thinks it can statically use group 1.
  73 * That group will typically conflict with other groups that
  74 * any proper users use.
  75 * Bit 16 is marked as used since it's used for generic netlink
  76 * and the code no longer marks pre-reserved IDs as used.
  77 * Bit 17 is marked as already used since the VFS quota code
  78 * also abused this API and relied on family == group ID, we
  79 * cater to that by giving it a static family and group ID.
  80 * Bit 18 is marked as already used since the PMCRAID driver
  81 * did the same thing as the VFS quota code (maybe copied?)
  82 */
  83static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
  84				      BIT(GENL_ID_VFS_DQUOT) |
  85				      BIT(GENL_ID_PMCRAID);
  86static unsigned long *mc_groups = &mc_group_start;
  87static unsigned long mc_groups_longs = 1;
  88
  89static int genl_ctrl_event(int event, struct genl_family *family,
 
 
 
 
 
 
  90			   const struct genl_multicast_group *grp,
  91			   int grp_id);
  92
  93static inline unsigned int genl_family_hash(unsigned int id)
 
 
  94{
  95	return id & GENL_FAM_TAB_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  96}
  97
  98static inline struct list_head *genl_family_chain(unsigned int id)
  99{
 100	return &family_ht[genl_family_hash(id)];
 101}
 102
 103static struct genl_family *genl_family_find_byid(unsigned int id)
 104{
 105	struct genl_family *f;
 
 106
 107	list_for_each_entry(f, genl_family_chain(id), family_list)
 108		if (f->id == id)
 109			return f;
 110
 111	return NULL;
 112}
 113
 114static struct genl_family *genl_family_find_byname(char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115{
 116	struct genl_family *f;
 117	int i;
 118
 119	for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
 120		list_for_each_entry(f, genl_family_chain(i), family_list)
 121			if (strcmp(f->name, name) == 0)
 122				return f;
 
 
 
 
 123
 124	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125}
 126
 127static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
 
 128{
 129	int i;
 130
 131	for (i = 0; i < family->n_ops; i++)
 132		if (family->ops[i].cmd == cmd)
 133			return &family->ops[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134
 135	return NULL;
 
 136}
 137
 138/* Of course we are going to have problems once we hit
 139 * 2^16 alive types, but that can only happen by year 2K
 140*/
 141static u16 genl_generate_id(void)
 142{
 143	static u16 id_gen_idx = GENL_MIN_ID;
 144	int i;
 145
 146	for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
 147		if (id_gen_idx != GENL_ID_VFS_DQUOT &&
 148		    id_gen_idx != GENL_ID_PMCRAID &&
 149		    !genl_family_find_byid(id_gen_idx))
 150			return id_gen_idx;
 151		if (++id_gen_idx > GENL_MAX_ID)
 152			id_gen_idx = GENL_MIN_ID;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153	}
 154
 
 
 
 
 
 
 
 
 155	return 0;
 156}
 157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158static int genl_allocate_reserve_groups(int n_groups, int *first_id)
 159{
 160	unsigned long *new_groups;
 161	int start = 0;
 162	int i;
 163	int id;
 164	bool fits;
 165
 166	do {
 167		if (start == 0)
 168			id = find_first_zero_bit(mc_groups,
 169						 mc_groups_longs *
 170						 BITS_PER_LONG);
 171		else
 172			id = find_next_zero_bit(mc_groups,
 173						mc_groups_longs * BITS_PER_LONG,
 174						start);
 175
 176		fits = true;
 177		for (i = id;
 178		     i < min_t(int, id + n_groups,
 179			       mc_groups_longs * BITS_PER_LONG);
 180		     i++) {
 181			if (test_bit(i, mc_groups)) {
 182				start = i;
 183				fits = false;
 184				break;
 185			}
 186		}
 187
 188		if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
 189			unsigned long new_longs = mc_groups_longs +
 190						  BITS_TO_LONGS(n_groups);
 191			size_t nlen = new_longs * sizeof(unsigned long);
 192
 193			if (mc_groups == &mc_group_start) {
 194				new_groups = kzalloc(nlen, GFP_KERNEL);
 195				if (!new_groups)
 196					return -ENOMEM;
 197				mc_groups = new_groups;
 198				*mc_groups = mc_group_start;
 199			} else {
 200				new_groups = krealloc(mc_groups, nlen,
 201						      GFP_KERNEL);
 202				if (!new_groups)
 203					return -ENOMEM;
 204				mc_groups = new_groups;
 205				for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
 206					mc_groups[mc_groups_longs + i] = 0;
 207			}
 208			mc_groups_longs = new_longs;
 209		}
 210	} while (!fits);
 211
 212	for (i = id; i < id + n_groups; i++)
 213		set_bit(i, mc_groups);
 214	*first_id = id;
 215	return 0;
 216}
 217
 218static struct genl_family genl_ctrl;
 219
 220static int genl_validate_assign_mc_groups(struct genl_family *family)
 221{
 222	int first_id;
 223	int n_groups = family->n_mcgrps;
 224	int err = 0, i;
 225	bool groups_allocated = false;
 226
 227	if (!n_groups)
 228		return 0;
 229
 230	for (i = 0; i < n_groups; i++) {
 231		const struct genl_multicast_group *grp = &family->mcgrps[i];
 232
 233		if (WARN_ON(grp->name[0] == '\0'))
 234			return -EINVAL;
 235		if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
 236			return -EINVAL;
 237	}
 238
 239	/* special-case our own group and hacks */
 240	if (family == &genl_ctrl) {
 241		first_id = GENL_ID_CTRL;
 242		BUG_ON(n_groups != 1);
 243	} else if (strcmp(family->name, "NET_DM") == 0) {
 244		first_id = 1;
 245		BUG_ON(n_groups != 1);
 246	} else if (family->id == GENL_ID_VFS_DQUOT) {
 247		first_id = GENL_ID_VFS_DQUOT;
 248		BUG_ON(n_groups != 1);
 249	} else if (family->id == GENL_ID_PMCRAID) {
 250		first_id = GENL_ID_PMCRAID;
 251		BUG_ON(n_groups != 1);
 252	} else {
 253		groups_allocated = true;
 254		err = genl_allocate_reserve_groups(n_groups, &first_id);
 255		if (err)
 256			return err;
 257	}
 258
 259	family->mcgrp_offset = first_id;
 260
 261	/* if still initializing, can't and don't need to to realloc bitmaps */
 262	if (!init_net.genl_sock)
 263		return 0;
 264
 265	if (family->netnsok) {
 266		struct net *net;
 267
 268		netlink_table_grab();
 269		rcu_read_lock();
 270		for_each_net_rcu(net) {
 271			err = __netlink_change_ngroups(net->genl_sock,
 272					mc_groups_longs * BITS_PER_LONG);
 273			if (err) {
 274				/*
 275				 * No need to roll back, can only fail if
 276				 * memory allocation fails and then the
 277				 * number of _possible_ groups has been
 278				 * increased on some sockets which is ok.
 279				 */
 280				break;
 281			}
 282		}
 283		rcu_read_unlock();
 284		netlink_table_ungrab();
 285	} else {
 286		err = netlink_change_ngroups(init_net.genl_sock,
 287					     mc_groups_longs * BITS_PER_LONG);
 288	}
 289
 290	if (groups_allocated && err) {
 291		for (i = 0; i < family->n_mcgrps; i++)
 292			clear_bit(family->mcgrp_offset + i, mc_groups);
 293	}
 294
 295	return err;
 296}
 297
 298static void genl_unregister_mc_groups(struct genl_family *family)
 299{
 300	struct net *net;
 301	int i;
 302
 303	netlink_table_grab();
 304	rcu_read_lock();
 305	for_each_net_rcu(net) {
 306		for (i = 0; i < family->n_mcgrps; i++)
 307			__netlink_clear_multicast_users(
 308				net->genl_sock, family->mcgrp_offset + i);
 309	}
 310	rcu_read_unlock();
 311	netlink_table_ungrab();
 312
 313	for (i = 0; i < family->n_mcgrps; i++) {
 314		int grp_id = family->mcgrp_offset + i;
 315
 316		if (grp_id != 1)
 317			clear_bit(grp_id, mc_groups);
 318		genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
 319				&family->mcgrps[i], grp_id);
 320	}
 321}
 322
 
 
 
 
 
 
 
 
 323static int genl_validate_ops(const struct genl_family *family)
 324{
 325	const struct genl_ops *ops = family->ops;
 326	unsigned int n_ops = family->n_ops;
 327	int i, j;
 328
 329	if (WARN_ON(n_ops && !ops))
 
 
 330		return -EINVAL;
 331
 332	if (!n_ops)
 333		return 0;
 
 334
 335	for (i = 0; i < n_ops; i++) {
 336		if (ops[i].dumpit == NULL && ops[i].doit == NULL)
 337			return -EINVAL;
 338		for (j = i + 1; j < n_ops; j++)
 339			if (ops[i].cmd == ops[j].cmd)
 
 
 340				return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341	}
 342
 343	return 0;
 344}
 345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346/**
 347 * __genl_register_family - register a generic netlink family
 348 * @family: generic netlink family
 349 *
 350 * Registers the specified family after validating it first. Only one
 351 * family may be registered with the same family name or identifier.
 352 * The family id may equal GENL_ID_GENERATE causing an unique id to
 353 * be automatically generated and assigned.
 354 *
 355 * The family's ops array must already be assigned, you can use the
 356 * genl_register_family_with_ops() helper function.
 357 *
 358 * Return 0 on success or a negative error code.
 359 */
 360int __genl_register_family(struct genl_family *family)
 361{
 362	int err = -EINVAL, i;
 363
 364	if (family->id && family->id < GENL_MIN_ID)
 365		goto errout;
 366
 367	if (family->id > GENL_MAX_ID)
 368		goto errout;
 369
 370	err = genl_validate_ops(family);
 371	if (err)
 372		return err;
 373
 374	genl_lock_all();
 375
 376	if (genl_family_find_byname(family->name)) {
 377		err = -EEXIST;
 378		goto errout_locked;
 379	}
 380
 381	if (family->id == GENL_ID_GENERATE) {
 382		u16 newid = genl_generate_id();
 
 383
 384		if (!newid) {
 385			err = -ENOMEM;
 386			goto errout_locked;
 387		}
 
 
 
 
 
 
 
 
 
 
 
 388
 389		family->id = newid;
 390	} else if (genl_family_find_byid(family->id)) {
 391		err = -EEXIST;
 392		goto errout_locked;
 
 393	}
 394
 395	if (family->maxattr && !family->parallel_ops) {
 396		family->attrbuf = kmalloc((family->maxattr+1) *
 397					sizeof(struct nlattr *), GFP_KERNEL);
 398		if (family->attrbuf == NULL) {
 399			err = -ENOMEM;
 400			goto errout_locked;
 401		}
 402	} else
 403		family->attrbuf = NULL;
 404
 405	err = genl_validate_assign_mc_groups(family);
 406	if (err)
 407		goto errout_locked;
 408
 409	list_add_tail(&family->family_list, genl_family_chain(family->id));
 410	genl_unlock_all();
 411
 412	/* send all events */
 413	genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
 414	for (i = 0; i < family->n_mcgrps; i++)
 415		genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
 416				&family->mcgrps[i], family->mcgrp_offset + i);
 417
 418	return 0;
 419
 
 
 
 
 420errout_locked:
 421	genl_unlock_all();
 422errout:
 423	return err;
 424}
 425EXPORT_SYMBOL(__genl_register_family);
 426
 427/**
 428 * genl_unregister_family - unregister generic netlink family
 429 * @family: generic netlink family
 430 *
 431 * Unregisters the specified family.
 432 *
 433 * Returns 0 on success or a negative error code.
 434 */
 435int genl_unregister_family(struct genl_family *family)
 436{
 437	struct genl_family *rc;
 438
 439	genl_lock_all();
 
 
 
 
 
 440
 441	list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
 442		if (family->id != rc->id || strcmp(rc->name, family->name))
 443			continue;
 444
 445		genl_unregister_mc_groups(family);
 
 
 446
 447		list_del(&rc->family_list);
 448		family->n_ops = 0;
 449		up_write(&cb_lock);
 450		wait_event(genl_sk_destructing_waitq,
 451			   atomic_read(&genl_sk_destructing_cnt) == 0);
 452		genl_unlock();
 453
 454		kfree(family->attrbuf);
 455		genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
 456		return 0;
 457	}
 458
 459	genl_unlock_all();
 460
 461	return -ENOENT;
 462}
 463EXPORT_SYMBOL(genl_unregister_family);
 464
 465/**
 466 * genlmsg_put - Add generic netlink header to netlink message
 467 * @skb: socket buffer holding the message
 468 * @portid: netlink portid the message is addressed to
 469 * @seq: sequence number (usually the one of the sender)
 470 * @family: generic netlink family
 471 * @flags: netlink message flags
 472 * @cmd: generic netlink command
 473 *
 474 * Returns pointer to user specific header
 475 */
 476void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
 477				struct genl_family *family, int flags, u8 cmd)
 478{
 479	struct nlmsghdr *nlh;
 480	struct genlmsghdr *hdr;
 481
 482	nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
 483			family->hdrsize, flags);
 484	if (nlh == NULL)
 485		return NULL;
 486
 487	hdr = nlmsg_data(nlh);
 488	hdr->cmd = cmd;
 489	hdr->version = family->version;
 490	hdr->reserved = 0;
 491
 492	return (char *) hdr + GENL_HDRLEN;
 493}
 494EXPORT_SYMBOL(genlmsg_put);
 495
 496static int genl_lock_start(struct netlink_callback *cb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497{
 498	/* our ops are always const - netlink API doesn't propagate that */
 499	const struct genl_ops *ops = cb->data;
 
 
 500	int rc = 0;
 501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502	if (ops->start) {
 503		genl_lock();
 504		rc = ops->start(cb);
 505		genl_unlock();
 
 
 
 
 
 
 506	}
 507	return rc;
 508}
 509
 510static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 511{
 512	/* our ops are always const - netlink API doesn't propagate that */
 513	const struct genl_ops *ops = cb->data;
 
 514	int rc;
 515
 516	genl_lock();
 
 
 517	rc = ops->dumpit(skb, cb);
 518	genl_unlock();
 519	return rc;
 520}
 521
 522static int genl_lock_done(struct netlink_callback *cb)
 523{
 524	/* our ops are always const - netlink API doesn't propagate that */
 525	const struct genl_ops *ops = cb->data;
 
 526	int rc = 0;
 527
 
 
 528	if (ops->done) {
 529		genl_lock();
 530		rc = ops->done(cb);
 531		genl_unlock();
 532	}
 
 
 533	return rc;
 534}
 535
 536static int genl_family_rcv_msg(struct genl_family *family,
 537			       struct sk_buff *skb,
 538			       struct nlmsghdr *nlh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539{
 540	const struct genl_ops *ops;
 541	struct net *net = sock_net(skb->sk);
 542	struct genl_info info;
 543	struct genlmsghdr *hdr = nlmsg_data(nlh);
 544	struct nlattr **attrbuf;
 545	int hdrlen, err;
 546
 547	/* this family doesn't exist in this netns */
 548	if (!family->netnsok && !net_eq(net, &init_net))
 549		return -ENOENT;
 
 
 550
 551	hdrlen = GENL_HDRLEN + family->hdrsize;
 552	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
 553		return -EINVAL;
 
 
 
 
 
 
 554
 555	ops = genl_get_cmd(hdr->cmd, family);
 556	if (ops == NULL)
 557		return -EOPNOTSUPP;
 
 
 558
 559	if ((ops->flags & GENL_ADMIN_PERM) &&
 560	    !netlink_capable(skb, CAP_NET_ADMIN))
 561		return -EPERM;
 562
 563	if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
 564	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
 565		return -EPERM;
 566
 567	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 568		int rc;
 569
 570		if (ops->dumpit == NULL)
 571			return -EOPNOTSUPP;
 572
 573		if (!family->parallel_ops) {
 574			struct netlink_dump_control c = {
 575				.module = family->module,
 576				/* we have const, but the netlink API doesn't */
 577				.data = (void *)ops,
 578				.start = genl_lock_start,
 579				.dump = genl_lock_dumpit,
 580				.done = genl_lock_done,
 581			};
 582
 583			genl_unlock();
 584			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
 585			genl_lock();
 586
 587		} else {
 588			struct netlink_dump_control c = {
 589				.module = family->module,
 590				.start = ops->start,
 591				.dump = ops->dumpit,
 592				.done = ops->done,
 593			};
 594
 595			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
 596		}
 
 
 
 
 
 
 
 
 
 597
 598		return rc;
 599	}
 600
 601	if (ops->doit == NULL)
 602		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 603
 604	if (family->maxattr && family->parallel_ops) {
 605		attrbuf = kmalloc((family->maxattr+1) *
 606					sizeof(struct nlattr *), GFP_KERNEL);
 607		if (attrbuf == NULL)
 608			return -ENOMEM;
 609	} else
 610		attrbuf = family->attrbuf;
 611
 612	if (attrbuf) {
 613		err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
 614				  ops->policy);
 615		if (err < 0)
 616			goto out;
 617	}
 618
 619	info.snd_seq = nlh->nlmsg_seq;
 620	info.snd_portid = NETLINK_CB(skb).portid;
 621	info.nlhdr = nlh;
 622	info.genlhdr = nlmsg_data(nlh);
 623	info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
 624	info.attrs = attrbuf;
 625	genl_info_net_set(&info, net);
 626	memset(&info.user_ptr, 0, sizeof(info.user_ptr));
 627
 628	if (family->pre_doit) {
 629		err = family->pre_doit(ops, skb, &info);
 630		if (err)
 631			goto out;
 632	}
 633
 634	err = ops->doit(skb, &info);
 
 
 
 635
 636	if (family->post_doit)
 637		family->post_doit(ops, skb, &info);
 
 638
 639out:
 640	if (family->parallel_ops)
 641		kfree(attrbuf);
 642
 643	return err;
 
 
 
 
 
 644}
 645
 646static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
 647{
 648	struct genl_family *family;
 649	int err;
 650
 651	family = genl_family_find_byid(nlh->nlmsg_type);
 652	if (family == NULL)
 653		return -ENOENT;
 654
 655	if (!family->parallel_ops)
 656		genl_lock();
 657
 658	err = genl_family_rcv_msg(family, skb, nlh);
 659
 660	if (!family->parallel_ops)
 661		genl_unlock();
 662
 663	return err;
 664}
 665
 666static void genl_rcv(struct sk_buff *skb)
 667{
 668	down_read(&cb_lock);
 669	netlink_rcv_skb(skb, &genl_rcv_msg);
 670	up_read(&cb_lock);
 671}
 672
 673/**************************************************************************
 674 * Controller
 675 **************************************************************************/
 676
 677static struct genl_family genl_ctrl = {
 678	.id = GENL_ID_CTRL,
 679	.name = "nlctrl",
 680	.version = 0x2,
 681	.maxattr = CTRL_ATTR_MAX,
 682	.netnsok = true,
 683};
 684
 685static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
 686			  u32 flags, struct sk_buff *skb, u8 cmd)
 687{
 
 688	void *hdr;
 689
 690	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
 691	if (hdr == NULL)
 692		return -1;
 693
 694	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
 695	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
 696	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
 697	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
 698	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
 699		goto nla_put_failure;
 700
 701	if (family->n_ops) {
 702		struct nlattr *nla_ops;
 703		int i;
 704
 705		nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
 706		if (nla_ops == NULL)
 707			goto nla_put_failure;
 708
 709		for (i = 0; i < family->n_ops; i++) {
 710			struct nlattr *nest;
 711			const struct genl_ops *ops = &family->ops[i];
 712			u32 op_flags = ops->flags;
 713
 714			if (ops->dumpit)
 715				op_flags |= GENL_CMD_CAP_DUMP;
 716			if (ops->doit)
 717				op_flags |= GENL_CMD_CAP_DO;
 718			if (ops->policy)
 719				op_flags |= GENL_CMD_CAP_HASPOL;
 720
 721			nest = nla_nest_start(skb, i + 1);
 722			if (nest == NULL)
 723				goto nla_put_failure;
 724
 725			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
 726			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
 727				goto nla_put_failure;
 728
 729			nla_nest_end(skb, nest);
 730		}
 731
 732		nla_nest_end(skb, nla_ops);
 733	}
 734
 735	if (family->n_mcgrps) {
 736		struct nlattr *nla_grps;
 737		int i;
 738
 739		nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
 740		if (nla_grps == NULL)
 741			goto nla_put_failure;
 742
 743		for (i = 0; i < family->n_mcgrps; i++) {
 744			struct nlattr *nest;
 745			const struct genl_multicast_group *grp;
 746
 747			grp = &family->mcgrps[i];
 748
 749			nest = nla_nest_start(skb, i + 1);
 750			if (nest == NULL)
 751				goto nla_put_failure;
 752
 753			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
 754					family->mcgrp_offset + i) ||
 755			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
 756					   grp->name))
 757				goto nla_put_failure;
 758
 759			nla_nest_end(skb, nest);
 760		}
 761		nla_nest_end(skb, nla_grps);
 762	}
 763
 764	genlmsg_end(skb, hdr);
 765	return 0;
 766
 767nla_put_failure:
 768	genlmsg_cancel(skb, hdr);
 769	return -EMSGSIZE;
 770}
 771
 772static int ctrl_fill_mcgrp_info(struct genl_family *family,
 773				const struct genl_multicast_group *grp,
 774				int grp_id, u32 portid, u32 seq, u32 flags,
 775				struct sk_buff *skb, u8 cmd)
 776{
 777	void *hdr;
 778	struct nlattr *nla_grps;
 779	struct nlattr *nest;
 780
 781	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
 782	if (hdr == NULL)
 783		return -1;
 784
 785	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
 786	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
 787		goto nla_put_failure;
 788
 789	nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
 790	if (nla_grps == NULL)
 791		goto nla_put_failure;
 792
 793	nest = nla_nest_start(skb, 1);
 794	if (nest == NULL)
 795		goto nla_put_failure;
 796
 797	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
 798	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
 799			   grp->name))
 800		goto nla_put_failure;
 801
 802	nla_nest_end(skb, nest);
 803	nla_nest_end(skb, nla_grps);
 804
 805	genlmsg_end(skb, hdr);
 806	return 0;
 807
 808nla_put_failure:
 809	genlmsg_cancel(skb, hdr);
 810	return -EMSGSIZE;
 811}
 812
 813static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
 814{
 815
 816	int i, n = 0;
 817	struct genl_family *rt;
 818	struct net *net = sock_net(skb->sk);
 819	int chains_to_skip = cb->args[0];
 820	int fams_to_skip = cb->args[1];
 
 
 
 
 
 821
 822	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
 823		n = 0;
 824		list_for_each_entry(rt, genl_family_chain(i), family_list) {
 825			if (!rt->netnsok && !net_eq(net, &init_net))
 826				continue;
 827			if (++n < fams_to_skip)
 828				continue;
 829			if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
 830					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
 831					   skb, CTRL_CMD_NEWFAMILY) < 0)
 832				goto errout;
 833		}
 834
 835		fams_to_skip = 0;
 836	}
 837
 838errout:
 839	cb->args[0] = i;
 840	cb->args[1] = n;
 841
 842	return skb->len;
 
 843}
 844
 845static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
 846					     u32 portid, int seq, u8 cmd)
 847{
 848	struct sk_buff *skb;
 849	int err;
 850
 851	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 852	if (skb == NULL)
 853		return ERR_PTR(-ENOBUFS);
 854
 855	err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
 856	if (err < 0) {
 857		nlmsg_free(skb);
 858		return ERR_PTR(err);
 859	}
 860
 861	return skb;
 862}
 863
 864static struct sk_buff *
 865ctrl_build_mcgrp_msg(struct genl_family *family,
 866		     const struct genl_multicast_group *grp,
 867		     int grp_id, u32 portid, int seq, u8 cmd)
 868{
 869	struct sk_buff *skb;
 870	int err;
 871
 872	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 873	if (skb == NULL)
 874		return ERR_PTR(-ENOBUFS);
 875
 876	err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
 877				   seq, 0, skb, cmd);
 878	if (err < 0) {
 879		nlmsg_free(skb);
 880		return ERR_PTR(err);
 881	}
 882
 883	return skb;
 884}
 885
 886static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
 887	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
 888	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
 889				    .len = GENL_NAMSIZ - 1 },
 890};
 891
 892static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
 893{
 894	struct sk_buff *msg;
 895	struct genl_family *res = NULL;
 896	int err = -EINVAL;
 897
 898	if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
 899		u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
 900		res = genl_family_find_byid(id);
 901		err = -ENOENT;
 902	}
 903
 904	if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
 905		char *name;
 906
 907		name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
 908		res = genl_family_find_byname(name);
 909#ifdef CONFIG_MODULES
 910		if (res == NULL) {
 911			genl_unlock();
 912			up_read(&cb_lock);
 913			request_module("net-pf-%d-proto-%d-family-%s",
 914				       PF_NETLINK, NETLINK_GENERIC, name);
 915			down_read(&cb_lock);
 916			genl_lock();
 917			res = genl_family_find_byname(name);
 918		}
 919#endif
 920		err = -ENOENT;
 921	}
 922
 923	if (res == NULL)
 924		return err;
 925
 926	if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
 927		/* family doesn't exist here */
 928		return -ENOENT;
 929	}
 930
 931	msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
 932				    CTRL_CMD_NEWFAMILY);
 933	if (IS_ERR(msg))
 934		return PTR_ERR(msg);
 935
 936	return genlmsg_reply(msg, info);
 937}
 938
 939static int genl_ctrl_event(int event, struct genl_family *family,
 940			   const struct genl_multicast_group *grp,
 941			   int grp_id)
 942{
 943	struct sk_buff *msg;
 944
 945	/* genl is still initialising */
 946	if (!init_net.genl_sock)
 947		return 0;
 948
 949	switch (event) {
 950	case CTRL_CMD_NEWFAMILY:
 951	case CTRL_CMD_DELFAMILY:
 952		WARN_ON(grp);
 953		msg = ctrl_build_family_msg(family, 0, 0, event);
 954		break;
 955	case CTRL_CMD_NEWMCAST_GRP:
 956	case CTRL_CMD_DELMCAST_GRP:
 957		BUG_ON(!grp);
 958		msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
 959		break;
 960	default:
 961		return -EINVAL;
 962	}
 963
 964	if (IS_ERR(msg))
 965		return PTR_ERR(msg);
 966
 967	if (!family->netnsok) {
 968		genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
 969					0, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970	} else {
 971		rcu_read_lock();
 972		genlmsg_multicast_allns(&genl_ctrl, msg, 0,
 973					0, GFP_ATOMIC);
 974		rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975	}
 976
 
 
 
 
 
 
 
 
 
 
 
 
 
 977	return 0;
 978}
 979
 980static struct genl_ops genl_ctrl_ops[] = {
 981	{
 982		.cmd		= CTRL_CMD_GETFAMILY,
 
 
 
 983		.doit		= ctrl_getfamily,
 
 
 
 
 
 
 
 984		.dumpit		= ctrl_dumpfamily,
 985		.policy		= ctrl_policy,
 
 
 
 
 
 
 
 
 
 986	},
 987};
 988
 989static struct genl_multicast_group genl_ctrl_groups[] = {
 990	{ .name = "notify", },
 991};
 992
 
 
 
 
 
 
 
 
 
 
 
 
 
 993static int genl_bind(struct net *net, int group)
 994{
 995	int i, err = -ENOENT;
 
 
 996
 997	down_read(&cb_lock);
 998	for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
 999		struct genl_family *f;
1000
1001		list_for_each_entry(f, genl_family_chain(i), family_list) {
1002			if (group >= f->mcgrp_offset &&
1003			    group < f->mcgrp_offset + f->n_mcgrps) {
1004				int fam_grp = group - f->mcgrp_offset;
1005
1006				if (!f->netnsok && net != &init_net)
1007					err = -ENOENT;
1008				else if (f->mcast_bind)
1009					err = f->mcast_bind(net, fam_grp);
1010				else
1011					err = 0;
1012				break;
1013			}
1014		}
 
 
 
 
 
 
 
 
 
1015	}
 
1016	up_read(&cb_lock);
1017
1018	return err;
1019}
1020
1021static void genl_unbind(struct net *net, int group)
1022{
1023	int i;
 
1024
1025	down_read(&cb_lock);
1026	for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1027		struct genl_family *f;
1028
1029		list_for_each_entry(f, genl_family_chain(i), family_list) {
1030			if (group >= f->mcgrp_offset &&
1031			    group < f->mcgrp_offset + f->n_mcgrps) {
1032				int fam_grp = group - f->mcgrp_offset;
 
 
 
 
 
 
 
 
1033
1034				if (f->mcast_unbind)
1035					f->mcast_unbind(net, fam_grp);
1036				break;
1037			}
1038		}
1039	}
 
1040	up_read(&cb_lock);
1041}
1042
1043static int __net_init genl_pernet_init(struct net *net)
1044{
1045	struct netlink_kernel_cfg cfg = {
1046		.input		= genl_rcv,
1047		.flags		= NL_CFG_F_NONROOT_RECV,
1048		.bind		= genl_bind,
1049		.unbind		= genl_unbind,
 
1050	};
1051
1052	/* we'll bump the group number right afterwards */
1053	net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1054
1055	if (!net->genl_sock && net_eq(net, &init_net))
1056		panic("GENL: Cannot initialize generic netlink\n");
1057
1058	if (!net->genl_sock)
1059		return -ENOMEM;
1060
1061	return 0;
1062}
1063
1064static void __net_exit genl_pernet_exit(struct net *net)
1065{
1066	netlink_kernel_release(net->genl_sock);
1067	net->genl_sock = NULL;
1068}
1069
1070static struct pernet_operations genl_pernet_ops = {
1071	.init = genl_pernet_init,
1072	.exit = genl_pernet_exit,
1073};
1074
1075static int __init genl_init(void)
1076{
1077	int i, err;
1078
1079	for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1080		INIT_LIST_HEAD(&family_ht[i]);
1081
1082	err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1083						   genl_ctrl_groups);
1084	if (err < 0)
1085		goto problem;
1086
1087	err = register_pernet_subsys(&genl_pernet_ops);
1088	if (err)
1089		goto problem;
1090
1091	return 0;
1092
1093problem:
1094	panic("GENL: Cannot register controller: %d\n", err);
1095}
1096
1097subsys_initcall(genl_init);
1098
1099static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1100			 gfp_t flags)
1101{
1102	struct sk_buff *tmp;
1103	struct net *net, *prev = NULL;
 
1104	int err;
1105
 
1106	for_each_net_rcu(net) {
1107		if (prev) {
1108			tmp = skb_clone(skb, flags);
1109			if (!tmp) {
1110				err = -ENOMEM;
1111				goto error;
1112			}
1113			err = nlmsg_multicast(prev->genl_sock, tmp,
1114					      portid, group, flags);
1115			if (err)
 
 
1116				goto error;
1117		}
1118
1119		prev = net;
1120	}
 
1121
1122	return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
 
 
 
 
 
 
1123 error:
 
 
1124	kfree_skb(skb);
1125	return err;
1126}
1127
1128int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1129			    u32 portid, unsigned int group, gfp_t flags)
 
1130{
1131	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1132		return -EINVAL;
 
1133	group = family->mcgrp_offset + group;
1134	return genlmsg_mcast(skb, portid, group, flags);
1135}
1136EXPORT_SYMBOL(genlmsg_multicast_allns);
1137
1138void genl_notify(struct genl_family *family, struct sk_buff *skb,
1139		 struct genl_info *info, u32 group, gfp_t flags)
1140{
1141	struct net *net = genl_info_net(info);
1142	struct sock *sk = net->genl_sock;
1143	int report = 0;
1144
1145	if (info->nlhdr)
1146		report = nlmsg_report(info->nlhdr);
1147
1148	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1149		return;
 
1150	group = family->mcgrp_offset + group;
1151	nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
 
1152}
1153EXPORT_SYMBOL(genl_notify);