Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NETLINK Generic Netlink Family
4 *
5 * Authors: Jamal Hadi Salim
6 * Thomas Graf <tgraf@suug.ch>
7 * Johannes Berg <johannes@sipsolutions.net>
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/errno.h>
14#include <linux/types.h>
15#include <linux/socket.h>
16#include <linux/string_helpers.h>
17#include <linux/skbuff.h>
18#include <linux/mutex.h>
19#include <linux/bitmap.h>
20#include <linux/rwsem.h>
21#include <linux/idr.h>
22#include <net/sock.h>
23#include <net/genetlink.h>
24
25#include "genetlink.h"
26
27static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
28static DECLARE_RWSEM(cb_lock);
29
30atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
31DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
32
33void genl_lock(void)
34{
35 mutex_lock(&genl_mutex);
36}
37EXPORT_SYMBOL(genl_lock);
38
39void genl_unlock(void)
40{
41 mutex_unlock(&genl_mutex);
42}
43EXPORT_SYMBOL(genl_unlock);
44
45static void genl_lock_all(void)
46{
47 down_write(&cb_lock);
48 genl_lock();
49}
50
51static void genl_unlock_all(void)
52{
53 genl_unlock();
54 up_write(&cb_lock);
55}
56
57static void genl_op_lock(const struct genl_family *family)
58{
59 if (!family->parallel_ops)
60 genl_lock();
61}
62
63static void genl_op_unlock(const struct genl_family *family)
64{
65 if (!family->parallel_ops)
66 genl_unlock();
67}
68
69static DEFINE_IDR(genl_fam_idr);
70
71/*
72 * Bitmap of multicast groups that are currently in use.
73 *
74 * To avoid an allocation at boot of just one unsigned long,
75 * declare it global instead.
76 * Bit 0 is marked as already used since group 0 is invalid.
77 * Bit 1 is marked as already used since the drop-monitor code
78 * abuses the API and thinks it can statically use group 1.
79 * That group will typically conflict with other groups that
80 * any proper users use.
81 * Bit 16 is marked as used since it's used for generic netlink
82 * and the code no longer marks pre-reserved IDs as used.
83 * Bit 17 is marked as already used since the VFS quota code
84 * also abused this API and relied on family == group ID, we
85 * cater to that by giving it a static family and group ID.
86 * Bit 18 is marked as already used since the PMCRAID driver
87 * did the same thing as the VFS quota code (maybe copied?)
88 */
89static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
90 BIT(GENL_ID_VFS_DQUOT) |
91 BIT(GENL_ID_PMCRAID);
92static unsigned long *mc_groups = &mc_group_start;
93static unsigned long mc_groups_longs = 1;
94
95/* We need the last attribute with non-zero ID therefore a 2-entry array */
96static struct nla_policy genl_policy_reject_all[] = {
97 { .type = NLA_REJECT },
98 { .type = NLA_REJECT },
99};
100
101static int genl_ctrl_event(int event, const struct genl_family *family,
102 const struct genl_multicast_group *grp,
103 int grp_id);
104
105static void
106genl_op_fill_in_reject_policy(const struct genl_family *family,
107 struct genl_ops *op)
108{
109 BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
110
111 if (op->policy || op->cmd < family->resv_start_op)
112 return;
113
114 op->policy = genl_policy_reject_all;
115 op->maxattr = 1;
116}
117
118static void
119genl_op_fill_in_reject_policy_split(const struct genl_family *family,
120 struct genl_split_ops *op)
121{
122 if (op->policy)
123 return;
124
125 op->policy = genl_policy_reject_all;
126 op->maxattr = 1;
127}
128
129static const struct genl_family *genl_family_find_byid(unsigned int id)
130{
131 return idr_find(&genl_fam_idr, id);
132}
133
134static const struct genl_family *genl_family_find_byname(char *name)
135{
136 const struct genl_family *family;
137 unsigned int id;
138
139 idr_for_each_entry(&genl_fam_idr, family, id)
140 if (strcmp(family->name, name) == 0)
141 return family;
142
143 return NULL;
144}
145
146struct genl_op_iter {
147 const struct genl_family *family;
148 struct genl_split_ops doit;
149 struct genl_split_ops dumpit;
150 int cmd_idx;
151 int entry_idx;
152 u32 cmd;
153 u8 flags;
154};
155
156static void genl_op_from_full(const struct genl_family *family,
157 unsigned int i, struct genl_ops *op)
158{
159 *op = family->ops[i];
160
161 if (!op->maxattr)
162 op->maxattr = family->maxattr;
163 if (!op->policy)
164 op->policy = family->policy;
165
166 genl_op_fill_in_reject_policy(family, op);
167}
168
169static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
170 struct genl_ops *op)
171{
172 int i;
173
174 for (i = 0; i < family->n_ops; i++)
175 if (family->ops[i].cmd == cmd) {
176 genl_op_from_full(family, i, op);
177 return 0;
178 }
179
180 return -ENOENT;
181}
182
183static void genl_op_from_small(const struct genl_family *family,
184 unsigned int i, struct genl_ops *op)
185{
186 memset(op, 0, sizeof(*op));
187 op->doit = family->small_ops[i].doit;
188 op->dumpit = family->small_ops[i].dumpit;
189 op->cmd = family->small_ops[i].cmd;
190 op->internal_flags = family->small_ops[i].internal_flags;
191 op->flags = family->small_ops[i].flags;
192 op->validate = family->small_ops[i].validate;
193
194 op->maxattr = family->maxattr;
195 op->policy = family->policy;
196
197 genl_op_fill_in_reject_policy(family, op);
198}
199
200static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
201 struct genl_ops *op)
202{
203 int i;
204
205 for (i = 0; i < family->n_small_ops; i++)
206 if (family->small_ops[i].cmd == cmd) {
207 genl_op_from_small(family, i, op);
208 return 0;
209 }
210
211 return -ENOENT;
212}
213
214static void genl_op_from_split(struct genl_op_iter *iter)
215{
216 const struct genl_family *family = iter->family;
217 int i, cnt = 0;
218
219 i = iter->entry_idx - family->n_ops - family->n_small_ops;
220
221 if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
222 iter->doit = family->split_ops[i + cnt];
223 genl_op_fill_in_reject_policy_split(family, &iter->doit);
224 cnt++;
225 } else {
226 memset(&iter->doit, 0, sizeof(iter->doit));
227 }
228
229 if (i + cnt < family->n_split_ops &&
230 family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
231 (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
232 iter->dumpit = family->split_ops[i + cnt];
233 genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
234 cnt++;
235 } else {
236 memset(&iter->dumpit, 0, sizeof(iter->dumpit));
237 }
238
239 WARN_ON(!cnt);
240 iter->entry_idx += cnt;
241}
242
243static int
244genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
245 struct genl_split_ops *op)
246{
247 int i;
248
249 for (i = 0; i < family->n_split_ops; i++)
250 if (family->split_ops[i].cmd == cmd &&
251 family->split_ops[i].flags & flag) {
252 *op = family->split_ops[i];
253 return 0;
254 }
255
256 return -ENOENT;
257}
258
259static int
260genl_cmd_full_to_split(struct genl_split_ops *op,
261 const struct genl_family *family,
262 const struct genl_ops *full, u8 flags)
263{
264 if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
265 (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
266 memset(op, 0, sizeof(*op));
267 return -ENOENT;
268 }
269
270 if (flags & GENL_CMD_CAP_DUMP) {
271 op->start = full->start;
272 op->dumpit = full->dumpit;
273 op->done = full->done;
274 } else {
275 op->pre_doit = family->pre_doit;
276 op->doit = full->doit;
277 op->post_doit = family->post_doit;
278 }
279
280 if (flags & GENL_CMD_CAP_DUMP &&
281 full->validate & GENL_DONT_VALIDATE_DUMP) {
282 op->policy = NULL;
283 op->maxattr = 0;
284 } else {
285 op->policy = full->policy;
286 op->maxattr = full->maxattr;
287 }
288
289 op->cmd = full->cmd;
290 op->internal_flags = full->internal_flags;
291 op->flags = full->flags;
292 op->validate = full->validate;
293
294 /* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
295 op->flags |= flags;
296
297 return 0;
298}
299
300/* Must make sure that op is initialized to 0 on failure */
301static int
302genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
303 struct genl_split_ops *op)
304{
305 struct genl_ops full;
306 int err;
307
308 err = genl_get_cmd_full(cmd, family, &full);
309 if (err == -ENOENT)
310 err = genl_get_cmd_small(cmd, family, &full);
311 /* Found one of legacy forms */
312 if (err == 0)
313 return genl_cmd_full_to_split(op, family, &full, flags);
314
315 err = genl_get_cmd_split(cmd, flags, family, op);
316 if (err)
317 memset(op, 0, sizeof(*op));
318 return err;
319}
320
321/* For policy dumping only, get ops of both do and dump.
322 * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
323 */
324static int
325genl_get_cmd_both(u32 cmd, const struct genl_family *family,
326 struct genl_split_ops *doit, struct genl_split_ops *dumpit)
327{
328 int err1, err2;
329
330 err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
331 err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
332
333 return err1 && err2 ? -ENOENT : 0;
334}
335
336static bool
337genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
338{
339 iter->family = family;
340 iter->cmd_idx = 0;
341 iter->entry_idx = 0;
342
343 iter->flags = 0;
344
345 return iter->family->n_ops +
346 iter->family->n_small_ops +
347 iter->family->n_split_ops;
348}
349
350static bool genl_op_iter_next(struct genl_op_iter *iter)
351{
352 const struct genl_family *family = iter->family;
353 bool legacy_op = true;
354 struct genl_ops op;
355
356 if (iter->entry_idx < family->n_ops) {
357 genl_op_from_full(family, iter->entry_idx, &op);
358 } else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
359 genl_op_from_small(family, iter->entry_idx - family->n_ops,
360 &op);
361 } else if (iter->entry_idx <
362 family->n_ops + family->n_small_ops + family->n_split_ops) {
363 legacy_op = false;
364 /* updates entry_idx */
365 genl_op_from_split(iter);
366 } else {
367 return false;
368 }
369
370 iter->cmd_idx++;
371
372 if (legacy_op) {
373 iter->entry_idx++;
374
375 genl_cmd_full_to_split(&iter->doit, family,
376 &op, GENL_CMD_CAP_DO);
377 genl_cmd_full_to_split(&iter->dumpit, family,
378 &op, GENL_CMD_CAP_DUMP);
379 }
380
381 iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
382 iter->flags = iter->doit.flags | iter->dumpit.flags;
383
384 return true;
385}
386
387static void
388genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
389{
390 *dst = *src;
391}
392
393static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
394{
395 return iter->cmd_idx;
396}
397
398static int genl_allocate_reserve_groups(int n_groups, int *first_id)
399{
400 unsigned long *new_groups;
401 int start = 0;
402 int i;
403 int id;
404 bool fits;
405
406 do {
407 if (start == 0)
408 id = find_first_zero_bit(mc_groups,
409 mc_groups_longs *
410 BITS_PER_LONG);
411 else
412 id = find_next_zero_bit(mc_groups,
413 mc_groups_longs * BITS_PER_LONG,
414 start);
415
416 fits = true;
417 for (i = id;
418 i < min_t(int, id + n_groups,
419 mc_groups_longs * BITS_PER_LONG);
420 i++) {
421 if (test_bit(i, mc_groups)) {
422 start = i;
423 fits = false;
424 break;
425 }
426 }
427
428 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
429 unsigned long new_longs = mc_groups_longs +
430 BITS_TO_LONGS(n_groups);
431 size_t nlen = new_longs * sizeof(unsigned long);
432
433 if (mc_groups == &mc_group_start) {
434 new_groups = kzalloc(nlen, GFP_KERNEL);
435 if (!new_groups)
436 return -ENOMEM;
437 mc_groups = new_groups;
438 *mc_groups = mc_group_start;
439 } else {
440 new_groups = krealloc(mc_groups, nlen,
441 GFP_KERNEL);
442 if (!new_groups)
443 return -ENOMEM;
444 mc_groups = new_groups;
445 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
446 mc_groups[mc_groups_longs + i] = 0;
447 }
448 mc_groups_longs = new_longs;
449 }
450 } while (!fits);
451
452 for (i = id; i < id + n_groups; i++)
453 set_bit(i, mc_groups);
454 *first_id = id;
455 return 0;
456}
457
458static struct genl_family genl_ctrl;
459
460static int genl_validate_assign_mc_groups(struct genl_family *family)
461{
462 int first_id;
463 int n_groups = family->n_mcgrps;
464 int err = 0, i;
465 bool groups_allocated = false;
466
467 if (!n_groups)
468 return 0;
469
470 for (i = 0; i < n_groups; i++) {
471 const struct genl_multicast_group *grp = &family->mcgrps[i];
472
473 if (WARN_ON(grp->name[0] == '\0'))
474 return -EINVAL;
475 if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
476 return -EINVAL;
477 }
478
479 /* special-case our own group and hacks */
480 if (family == &genl_ctrl) {
481 first_id = GENL_ID_CTRL;
482 BUG_ON(n_groups != 1);
483 } else if (strcmp(family->name, "NET_DM") == 0) {
484 first_id = 1;
485 BUG_ON(n_groups != 1);
486 } else if (family->id == GENL_ID_VFS_DQUOT) {
487 first_id = GENL_ID_VFS_DQUOT;
488 BUG_ON(n_groups != 1);
489 } else if (family->id == GENL_ID_PMCRAID) {
490 first_id = GENL_ID_PMCRAID;
491 BUG_ON(n_groups != 1);
492 } else {
493 groups_allocated = true;
494 err = genl_allocate_reserve_groups(n_groups, &first_id);
495 if (err)
496 return err;
497 }
498
499 family->mcgrp_offset = first_id;
500
501 /* if still initializing, can't and don't need to realloc bitmaps */
502 if (!init_net.genl_sock)
503 return 0;
504
505 if (family->netnsok) {
506 struct net *net;
507
508 netlink_table_grab();
509 rcu_read_lock();
510 for_each_net_rcu(net) {
511 err = __netlink_change_ngroups(net->genl_sock,
512 mc_groups_longs * BITS_PER_LONG);
513 if (err) {
514 /*
515 * No need to roll back, can only fail if
516 * memory allocation fails and then the
517 * number of _possible_ groups has been
518 * increased on some sockets which is ok.
519 */
520 break;
521 }
522 }
523 rcu_read_unlock();
524 netlink_table_ungrab();
525 } else {
526 err = netlink_change_ngroups(init_net.genl_sock,
527 mc_groups_longs * BITS_PER_LONG);
528 }
529
530 if (groups_allocated && err) {
531 for (i = 0; i < family->n_mcgrps; i++)
532 clear_bit(family->mcgrp_offset + i, mc_groups);
533 }
534
535 return err;
536}
537
538static void genl_unregister_mc_groups(const struct genl_family *family)
539{
540 struct net *net;
541 int i;
542
543 netlink_table_grab();
544 rcu_read_lock();
545 for_each_net_rcu(net) {
546 for (i = 0; i < family->n_mcgrps; i++)
547 __netlink_clear_multicast_users(
548 net->genl_sock, family->mcgrp_offset + i);
549 }
550 rcu_read_unlock();
551 netlink_table_ungrab();
552
553 for (i = 0; i < family->n_mcgrps; i++) {
554 int grp_id = family->mcgrp_offset + i;
555
556 if (grp_id != 1)
557 clear_bit(grp_id, mc_groups);
558 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
559 &family->mcgrps[i], grp_id);
560 }
561}
562
563static bool genl_split_op_check(const struct genl_split_ops *op)
564{
565 if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
566 GENL_CMD_CAP_DUMP)) != 1))
567 return true;
568 return false;
569}
570
571static int genl_validate_ops(const struct genl_family *family)
572{
573 struct genl_op_iter i, j;
574 unsigned int s;
575
576 if (WARN_ON(family->n_ops && !family->ops) ||
577 WARN_ON(family->n_small_ops && !family->small_ops) ||
578 WARN_ON(family->n_split_ops && !family->split_ops))
579 return -EINVAL;
580
581 for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
582 if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
583 return -EINVAL;
584
585 if (WARN_ON(i.cmd >= family->resv_start_op &&
586 (i.doit.validate || i.dumpit.validate)))
587 return -EINVAL;
588
589 genl_op_iter_copy(&j, &i);
590 while (genl_op_iter_next(&j)) {
591 if (i.cmd == j.cmd)
592 return -EINVAL;
593 }
594 }
595
596 if (family->n_split_ops) {
597 if (genl_split_op_check(&family->split_ops[0]))
598 return -EINVAL;
599 }
600
601 for (s = 1; s < family->n_split_ops; s++) {
602 const struct genl_split_ops *a, *b;
603
604 a = &family->split_ops[s - 1];
605 b = &family->split_ops[s];
606
607 if (genl_split_op_check(b))
608 return -EINVAL;
609
610 /* Check sort order */
611 if (a->cmd < b->cmd) {
612 continue;
613 } else if (a->cmd > b->cmd) {
614 WARN_ON(1);
615 return -EINVAL;
616 }
617
618 if (a->internal_flags != b->internal_flags ||
619 ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
620 GENL_CMD_CAP_DUMP))) {
621 WARN_ON(1);
622 return -EINVAL;
623 }
624
625 if ((a->flags & GENL_CMD_CAP_DO) &&
626 (b->flags & GENL_CMD_CAP_DUMP))
627 continue;
628
629 WARN_ON(1);
630 return -EINVAL;
631 }
632
633 return 0;
634}
635
636static void *genl_sk_priv_alloc(struct genl_family *family)
637{
638 void *priv;
639
640 priv = kzalloc(family->sock_priv_size, GFP_KERNEL);
641 if (!priv)
642 return ERR_PTR(-ENOMEM);
643
644 if (family->sock_priv_init)
645 family->sock_priv_init(priv);
646
647 return priv;
648}
649
650static void genl_sk_priv_free(const struct genl_family *family, void *priv)
651{
652 if (family->sock_priv_destroy)
653 family->sock_priv_destroy(priv);
654 kfree(priv);
655}
656
657static int genl_sk_privs_alloc(struct genl_family *family)
658{
659 if (!family->sock_priv_size)
660 return 0;
661
662 family->sock_privs = kzalloc(sizeof(*family->sock_privs), GFP_KERNEL);
663 if (!family->sock_privs)
664 return -ENOMEM;
665 xa_init(family->sock_privs);
666 return 0;
667}
668
669static void genl_sk_privs_free(const struct genl_family *family)
670{
671 unsigned long id;
672 void *priv;
673
674 if (!family->sock_priv_size)
675 return;
676
677 xa_for_each(family->sock_privs, id, priv)
678 genl_sk_priv_free(family, priv);
679
680 xa_destroy(family->sock_privs);
681 kfree(family->sock_privs);
682}
683
684static void genl_sk_priv_free_by_sock(struct genl_family *family,
685 struct sock *sk)
686{
687 void *priv;
688
689 if (!family->sock_priv_size)
690 return;
691 priv = xa_erase(family->sock_privs, (unsigned long) sk);
692 if (!priv)
693 return;
694 genl_sk_priv_free(family, priv);
695}
696
697static void genl_release(struct sock *sk, unsigned long *groups)
698{
699 struct genl_family *family;
700 unsigned int id;
701
702 down_read(&cb_lock);
703
704 idr_for_each_entry(&genl_fam_idr, family, id)
705 genl_sk_priv_free_by_sock(family, sk);
706
707 up_read(&cb_lock);
708}
709
710/**
711 * __genl_sk_priv_get - Get family private pointer for socket, if exists
712 *
713 * @family: family
714 * @sk: socket
715 *
716 * Lookup a private memory for a Generic netlink family and specified socket.
717 *
718 * Caller should make sure this is called in RCU read locked section.
719 *
720 * Return: valid pointer on success, otherwise negative error value
721 * encoded by ERR_PTR(), NULL in case priv does not exist.
722 */
723void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk)
724{
725 if (WARN_ON_ONCE(!family->sock_privs))
726 return ERR_PTR(-EINVAL);
727 return xa_load(family->sock_privs, (unsigned long) sk);
728}
729
730/**
731 * genl_sk_priv_get - Get family private pointer for socket
732 *
733 * @family: family
734 * @sk: socket
735 *
736 * Lookup a private memory for a Generic netlink family and specified socket.
737 * Allocate the private memory in case it was not already done.
738 *
739 * Return: valid pointer on success, otherwise negative error value
740 * encoded by ERR_PTR().
741 */
742void *genl_sk_priv_get(struct genl_family *family, struct sock *sk)
743{
744 void *priv, *old_priv;
745
746 priv = __genl_sk_priv_get(family, sk);
747 if (priv)
748 return priv;
749
750 /* priv for the family does not exist so far, create it. */
751
752 priv = genl_sk_priv_alloc(family);
753 if (IS_ERR(priv))
754 return ERR_CAST(priv);
755
756 old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL,
757 priv, GFP_KERNEL);
758 if (old_priv) {
759 genl_sk_priv_free(family, priv);
760 if (xa_is_err(old_priv))
761 return ERR_PTR(xa_err(old_priv));
762 /* Race happened, priv for the socket was already inserted. */
763 return old_priv;
764 }
765 return priv;
766}
767
768/**
769 * genl_register_family - register a generic netlink family
770 * @family: generic netlink family
771 *
772 * Registers the specified family after validating it first. Only one
773 * family may be registered with the same family name or identifier.
774 *
775 * The family's ops, multicast groups and module pointer must already
776 * be assigned.
777 *
778 * Return 0 on success or a negative error code.
779 */
780int genl_register_family(struct genl_family *family)
781{
782 int err, i;
783 int start = GENL_START_ALLOC, end = GENL_MAX_ID;
784
785 err = genl_validate_ops(family);
786 if (err)
787 return err;
788
789 genl_lock_all();
790
791 if (genl_family_find_byname(family->name)) {
792 err = -EEXIST;
793 goto errout_locked;
794 }
795
796 err = genl_sk_privs_alloc(family);
797 if (err)
798 goto errout_locked;
799
800 /*
801 * Sadly, a few cases need to be special-cased
802 * due to them having previously abused the API
803 * and having used their family ID also as their
804 * multicast group ID, so we use reserved IDs
805 * for both to be sure we can do that mapping.
806 */
807 if (family == &genl_ctrl) {
808 /* and this needs to be special for initial family lookups */
809 start = end = GENL_ID_CTRL;
810 } else if (strcmp(family->name, "pmcraid") == 0) {
811 start = end = GENL_ID_PMCRAID;
812 } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
813 start = end = GENL_ID_VFS_DQUOT;
814 }
815
816 family->id = idr_alloc_cyclic(&genl_fam_idr, family,
817 start, end + 1, GFP_KERNEL);
818 if (family->id < 0) {
819 err = family->id;
820 goto errout_sk_privs_free;
821 }
822
823 err = genl_validate_assign_mc_groups(family);
824 if (err)
825 goto errout_remove;
826
827 genl_unlock_all();
828
829 /* send all events */
830 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
831 for (i = 0; i < family->n_mcgrps; i++)
832 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
833 &family->mcgrps[i], family->mcgrp_offset + i);
834
835 return 0;
836
837errout_remove:
838 idr_remove(&genl_fam_idr, family->id);
839errout_sk_privs_free:
840 genl_sk_privs_free(family);
841errout_locked:
842 genl_unlock_all();
843 return err;
844}
845EXPORT_SYMBOL(genl_register_family);
846
847/**
848 * genl_unregister_family - unregister generic netlink family
849 * @family: generic netlink family
850 *
851 * Unregisters the specified family.
852 *
853 * Returns 0 on success or a negative error code.
854 */
855int genl_unregister_family(const struct genl_family *family)
856{
857 genl_lock_all();
858
859 if (!genl_family_find_byid(family->id)) {
860 genl_unlock_all();
861 return -ENOENT;
862 }
863
864 genl_unregister_mc_groups(family);
865
866 idr_remove(&genl_fam_idr, family->id);
867
868 up_write(&cb_lock);
869 wait_event(genl_sk_destructing_waitq,
870 atomic_read(&genl_sk_destructing_cnt) == 0);
871
872 genl_sk_privs_free(family);
873
874 genl_unlock();
875
876 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
877
878 return 0;
879}
880EXPORT_SYMBOL(genl_unregister_family);
881
882/**
883 * genlmsg_put - Add generic netlink header to netlink message
884 * @skb: socket buffer holding the message
885 * @portid: netlink portid the message is addressed to
886 * @seq: sequence number (usually the one of the sender)
887 * @family: generic netlink family
888 * @flags: netlink message flags
889 * @cmd: generic netlink command
890 *
891 * Returns pointer to user specific header
892 */
893void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
894 const struct genl_family *family, int flags, u8 cmd)
895{
896 struct nlmsghdr *nlh;
897 struct genlmsghdr *hdr;
898
899 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
900 family->hdrsize, flags);
901 if (nlh == NULL)
902 return NULL;
903
904 hdr = nlmsg_data(nlh);
905 hdr->cmd = cmd;
906 hdr->version = family->version;
907 hdr->reserved = 0;
908
909 return (char *) hdr + GENL_HDRLEN;
910}
911EXPORT_SYMBOL(genlmsg_put);
912
913static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
914{
915 return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
916}
917
918static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
919{
920 kfree(info);
921}
922
923static struct nlattr **
924genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
925 struct nlmsghdr *nlh,
926 struct netlink_ext_ack *extack,
927 const struct genl_split_ops *ops,
928 int hdrlen,
929 enum genl_validate_flags no_strict_flag)
930{
931 enum netlink_validation validate = ops->validate & no_strict_flag ?
932 NL_VALIDATE_LIBERAL :
933 NL_VALIDATE_STRICT;
934 struct nlattr **attrbuf;
935 int err;
936
937 if (!ops->maxattr)
938 return NULL;
939
940 attrbuf = kmalloc_array(ops->maxattr + 1,
941 sizeof(struct nlattr *), GFP_KERNEL);
942 if (!attrbuf)
943 return ERR_PTR(-ENOMEM);
944
945 err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
946 validate, extack);
947 if (err) {
948 kfree(attrbuf);
949 return ERR_PTR(err);
950 }
951 return attrbuf;
952}
953
954static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
955{
956 kfree(attrbuf);
957}
958
959struct genl_start_context {
960 const struct genl_family *family;
961 struct nlmsghdr *nlh;
962 struct netlink_ext_ack *extack;
963 const struct genl_split_ops *ops;
964 int hdrlen;
965};
966
967static int genl_start(struct netlink_callback *cb)
968{
969 struct genl_start_context *ctx = cb->data;
970 const struct genl_split_ops *ops;
971 struct genl_dumpit_info *info;
972 struct nlattr **attrs = NULL;
973 int rc = 0;
974
975 ops = ctx->ops;
976 if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
977 ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
978 return -EINVAL;
979
980 attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
981 ops, ctx->hdrlen,
982 GENL_DONT_VALIDATE_DUMP_STRICT);
983 if (IS_ERR(attrs))
984 return PTR_ERR(attrs);
985
986 info = genl_dumpit_info_alloc();
987 if (!info) {
988 genl_family_rcv_msg_attrs_free(attrs);
989 return -ENOMEM;
990 }
991 info->op = *ops;
992 info->info.family = ctx->family;
993 info->info.snd_seq = cb->nlh->nlmsg_seq;
994 info->info.snd_portid = NETLINK_CB(cb->skb).portid;
995 info->info.nlhdr = cb->nlh;
996 info->info.genlhdr = nlmsg_data(cb->nlh);
997 info->info.attrs = attrs;
998 genl_info_net_set(&info->info, sock_net(cb->skb->sk));
999 info->info.extack = cb->extack;
1000 memset(&info->info.ctx, 0, sizeof(info->info.ctx));
1001
1002 cb->data = info;
1003 if (ops->start) {
1004 genl_op_lock(ctx->family);
1005 rc = ops->start(cb);
1006 genl_op_unlock(ctx->family);
1007 }
1008
1009 if (rc) {
1010 genl_family_rcv_msg_attrs_free(info->info.attrs);
1011 genl_dumpit_info_free(info);
1012 cb->data = NULL;
1013 }
1014 return rc;
1015}
1016
1017static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1018{
1019 struct genl_dumpit_info *dump_info = cb->data;
1020 const struct genl_split_ops *ops = &dump_info->op;
1021 struct genl_info *info = &dump_info->info;
1022 int rc;
1023
1024 info->extack = cb->extack;
1025
1026 genl_op_lock(info->family);
1027 rc = ops->dumpit(skb, cb);
1028 genl_op_unlock(info->family);
1029 return rc;
1030}
1031
1032static int genl_done(struct netlink_callback *cb)
1033{
1034 struct genl_dumpit_info *dump_info = cb->data;
1035 const struct genl_split_ops *ops = &dump_info->op;
1036 struct genl_info *info = &dump_info->info;
1037 int rc = 0;
1038
1039 info->extack = cb->extack;
1040
1041 if (ops->done) {
1042 genl_op_lock(info->family);
1043 rc = ops->done(cb);
1044 genl_op_unlock(info->family);
1045 }
1046 genl_family_rcv_msg_attrs_free(info->attrs);
1047 genl_dumpit_info_free(dump_info);
1048 return rc;
1049}
1050
1051static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
1052 struct sk_buff *skb,
1053 struct nlmsghdr *nlh,
1054 struct netlink_ext_ack *extack,
1055 const struct genl_split_ops *ops,
1056 int hdrlen, struct net *net)
1057{
1058 struct genl_start_context ctx;
1059 struct netlink_dump_control c = {
1060 .module = family->module,
1061 .data = &ctx,
1062 .start = genl_start,
1063 .dump = genl_dumpit,
1064 .done = genl_done,
1065 .extack = extack,
1066 };
1067 int err;
1068
1069 ctx.family = family;
1070 ctx.nlh = nlh;
1071 ctx.extack = extack;
1072 ctx.ops = ops;
1073 ctx.hdrlen = hdrlen;
1074
1075 genl_op_unlock(family);
1076 err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1077 genl_op_lock(family);
1078
1079 return err;
1080}
1081
1082static int genl_family_rcv_msg_doit(const struct genl_family *family,
1083 struct sk_buff *skb,
1084 struct nlmsghdr *nlh,
1085 struct netlink_ext_ack *extack,
1086 const struct genl_split_ops *ops,
1087 int hdrlen, struct net *net)
1088{
1089 struct nlattr **attrbuf;
1090 struct genl_info info;
1091 int err;
1092
1093 attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
1094 ops, hdrlen,
1095 GENL_DONT_VALIDATE_STRICT);
1096 if (IS_ERR(attrbuf))
1097 return PTR_ERR(attrbuf);
1098
1099 info.snd_seq = nlh->nlmsg_seq;
1100 info.snd_portid = NETLINK_CB(skb).portid;
1101 info.family = family;
1102 info.nlhdr = nlh;
1103 info.genlhdr = nlmsg_data(nlh);
1104 info.attrs = attrbuf;
1105 info.extack = extack;
1106 genl_info_net_set(&info, net);
1107 memset(&info.ctx, 0, sizeof(info.ctx));
1108
1109 if (ops->pre_doit) {
1110 err = ops->pre_doit(ops, skb, &info);
1111 if (err)
1112 goto out;
1113 }
1114
1115 err = ops->doit(skb, &info);
1116
1117 if (ops->post_doit)
1118 ops->post_doit(ops, skb, &info);
1119
1120out:
1121 genl_family_rcv_msg_attrs_free(attrbuf);
1122
1123 return err;
1124}
1125
1126static int genl_header_check(const struct genl_family *family,
1127 struct nlmsghdr *nlh, struct genlmsghdr *hdr,
1128 struct netlink_ext_ack *extack)
1129{
1130 u16 flags;
1131
1132 /* Only for commands added after we started validating */
1133 if (hdr->cmd < family->resv_start_op)
1134 return 0;
1135
1136 if (hdr->reserved) {
1137 NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
1138 return -EINVAL;
1139 }
1140
1141 /* Old netlink flags have pretty loose semantics, allow only the flags
1142 * consumed by the core where we can enforce the meaning.
1143 */
1144 flags = nlh->nlmsg_flags;
1145 if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
1146 flags &= ~NLM_F_DUMP;
1147 if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
1148 NL_SET_ERR_MSG(extack,
1149 "ambiguous or reserved bits set in nlmsg_flags");
1150 return -EINVAL;
1151 }
1152
1153 return 0;
1154}
1155
1156static int genl_family_rcv_msg(const struct genl_family *family,
1157 struct sk_buff *skb,
1158 struct nlmsghdr *nlh,
1159 struct netlink_ext_ack *extack)
1160{
1161 struct net *net = sock_net(skb->sk);
1162 struct genlmsghdr *hdr = nlmsg_data(nlh);
1163 struct genl_split_ops op;
1164 int hdrlen;
1165 u8 flags;
1166
1167 /* this family doesn't exist in this netns */
1168 if (!family->netnsok && !net_eq(net, &init_net))
1169 return -ENOENT;
1170
1171 hdrlen = GENL_HDRLEN + family->hdrsize;
1172 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
1173 return -EINVAL;
1174
1175 if (genl_header_check(family, nlh, hdr, extack))
1176 return -EINVAL;
1177
1178 flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
1179 GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
1180 if (genl_get_cmd(hdr->cmd, flags, family, &op))
1181 return -EOPNOTSUPP;
1182
1183 if ((op.flags & GENL_ADMIN_PERM) &&
1184 !netlink_capable(skb, CAP_NET_ADMIN))
1185 return -EPERM;
1186
1187 if ((op.flags & GENL_UNS_ADMIN_PERM) &&
1188 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1189 return -EPERM;
1190
1191 if (flags & GENL_CMD_CAP_DUMP)
1192 return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
1193 &op, hdrlen, net);
1194 else
1195 return genl_family_rcv_msg_doit(family, skb, nlh, extack,
1196 &op, hdrlen, net);
1197}
1198
1199static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
1200 struct netlink_ext_ack *extack)
1201{
1202 const struct genl_family *family;
1203 int err;
1204
1205 family = genl_family_find_byid(nlh->nlmsg_type);
1206 if (family == NULL)
1207 return -ENOENT;
1208
1209 genl_op_lock(family);
1210 err = genl_family_rcv_msg(family, skb, nlh, extack);
1211 genl_op_unlock(family);
1212
1213 return err;
1214}
1215
1216static void genl_rcv(struct sk_buff *skb)
1217{
1218 down_read(&cb_lock);
1219 netlink_rcv_skb(skb, &genl_rcv_msg);
1220 up_read(&cb_lock);
1221}
1222
1223/**************************************************************************
1224 * Controller
1225 **************************************************************************/
1226
1227static struct genl_family genl_ctrl;
1228
1229static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
1230 u32 flags, struct sk_buff *skb, u8 cmd)
1231{
1232 struct genl_op_iter i;
1233 void *hdr;
1234
1235 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1236 if (hdr == NULL)
1237 return -EMSGSIZE;
1238
1239 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1240 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
1241 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
1242 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
1243 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
1244 goto nla_put_failure;
1245
1246 if (genl_op_iter_init(family, &i)) {
1247 struct nlattr *nla_ops;
1248
1249 nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
1250 if (nla_ops == NULL)
1251 goto nla_put_failure;
1252
1253 while (genl_op_iter_next(&i)) {
1254 struct nlattr *nest;
1255 u32 op_flags;
1256
1257 op_flags = i.flags;
1258 if (i.doit.policy || i.dumpit.policy)
1259 op_flags |= GENL_CMD_CAP_HASPOL;
1260
1261 nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
1262 if (nest == NULL)
1263 goto nla_put_failure;
1264
1265 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
1266 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
1267 goto nla_put_failure;
1268
1269 nla_nest_end(skb, nest);
1270 }
1271
1272 nla_nest_end(skb, nla_ops);
1273 }
1274
1275 if (family->n_mcgrps) {
1276 struct nlattr *nla_grps;
1277 int i;
1278
1279 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1280 if (nla_grps == NULL)
1281 goto nla_put_failure;
1282
1283 for (i = 0; i < family->n_mcgrps; i++) {
1284 struct nlattr *nest;
1285 const struct genl_multicast_group *grp;
1286
1287 grp = &family->mcgrps[i];
1288
1289 nest = nla_nest_start_noflag(skb, i + 1);
1290 if (nest == NULL)
1291 goto nla_put_failure;
1292
1293 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
1294 family->mcgrp_offset + i) ||
1295 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1296 grp->name))
1297 goto nla_put_failure;
1298
1299 nla_nest_end(skb, nest);
1300 }
1301 nla_nest_end(skb, nla_grps);
1302 }
1303
1304 genlmsg_end(skb, hdr);
1305 return 0;
1306
1307nla_put_failure:
1308 genlmsg_cancel(skb, hdr);
1309 return -EMSGSIZE;
1310}
1311
1312static int ctrl_fill_mcgrp_info(const struct genl_family *family,
1313 const struct genl_multicast_group *grp,
1314 int grp_id, u32 portid, u32 seq, u32 flags,
1315 struct sk_buff *skb, u8 cmd)
1316{
1317 void *hdr;
1318 struct nlattr *nla_grps;
1319 struct nlattr *nest;
1320
1321 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1322 if (hdr == NULL)
1323 return -1;
1324
1325 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1326 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
1327 goto nla_put_failure;
1328
1329 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1330 if (nla_grps == NULL)
1331 goto nla_put_failure;
1332
1333 nest = nla_nest_start_noflag(skb, 1);
1334 if (nest == NULL)
1335 goto nla_put_failure;
1336
1337 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
1338 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1339 grp->name))
1340 goto nla_put_failure;
1341
1342 nla_nest_end(skb, nest);
1343 nla_nest_end(skb, nla_grps);
1344
1345 genlmsg_end(skb, hdr);
1346 return 0;
1347
1348nla_put_failure:
1349 genlmsg_cancel(skb, hdr);
1350 return -EMSGSIZE;
1351}
1352
1353static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
1354{
1355 int n = 0;
1356 struct genl_family *rt;
1357 struct net *net = sock_net(skb->sk);
1358 int fams_to_skip = cb->args[0];
1359 unsigned int id;
1360 int err = 0;
1361
1362 idr_for_each_entry(&genl_fam_idr, rt, id) {
1363 if (!rt->netnsok && !net_eq(net, &init_net))
1364 continue;
1365
1366 if (n++ < fams_to_skip)
1367 continue;
1368
1369 err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
1370 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1371 skb, CTRL_CMD_NEWFAMILY);
1372 if (err) {
1373 n--;
1374 break;
1375 }
1376 }
1377
1378 cb->args[0] = n;
1379 return err;
1380}
1381
1382static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
1383 u32 portid, int seq, u8 cmd)
1384{
1385 struct sk_buff *skb;
1386 int err;
1387
1388 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1389 if (skb == NULL)
1390 return ERR_PTR(-ENOBUFS);
1391
1392 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
1393 if (err < 0) {
1394 nlmsg_free(skb);
1395 return ERR_PTR(err);
1396 }
1397
1398 return skb;
1399}
1400
1401static struct sk_buff *
1402ctrl_build_mcgrp_msg(const struct genl_family *family,
1403 const struct genl_multicast_group *grp,
1404 int grp_id, u32 portid, int seq, u8 cmd)
1405{
1406 struct sk_buff *skb;
1407 int err;
1408
1409 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1410 if (skb == NULL)
1411 return ERR_PTR(-ENOBUFS);
1412
1413 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
1414 seq, 0, skb, cmd);
1415 if (err < 0) {
1416 nlmsg_free(skb);
1417 return ERR_PTR(err);
1418 }
1419
1420 return skb;
1421}
1422
1423static const struct nla_policy ctrl_policy_family[] = {
1424 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
1425 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
1426 .len = GENL_NAMSIZ - 1 },
1427};
1428
1429static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
1430{
1431 struct sk_buff *msg;
1432 const struct genl_family *res = NULL;
1433 int err = -EINVAL;
1434
1435 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
1436 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
1437 res = genl_family_find_byid(id);
1438 err = -ENOENT;
1439 }
1440
1441 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
1442 char *name;
1443
1444 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
1445 res = genl_family_find_byname(name);
1446#ifdef CONFIG_MODULES
1447 if (res == NULL) {
1448 genl_unlock();
1449 up_read(&cb_lock);
1450 request_module("net-pf-%d-proto-%d-family-%s",
1451 PF_NETLINK, NETLINK_GENERIC, name);
1452 down_read(&cb_lock);
1453 genl_lock();
1454 res = genl_family_find_byname(name);
1455 }
1456#endif
1457 err = -ENOENT;
1458 }
1459
1460 if (res == NULL)
1461 return err;
1462
1463 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
1464 /* family doesn't exist here */
1465 return -ENOENT;
1466 }
1467
1468 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
1469 CTRL_CMD_NEWFAMILY);
1470 if (IS_ERR(msg))
1471 return PTR_ERR(msg);
1472
1473 return genlmsg_reply(msg, info);
1474}
1475
1476static int genl_ctrl_event(int event, const struct genl_family *family,
1477 const struct genl_multicast_group *grp,
1478 int grp_id)
1479{
1480 struct sk_buff *msg;
1481
1482 /* genl is still initialising */
1483 if (!init_net.genl_sock)
1484 return 0;
1485
1486 switch (event) {
1487 case CTRL_CMD_NEWFAMILY:
1488 case CTRL_CMD_DELFAMILY:
1489 WARN_ON(grp);
1490 msg = ctrl_build_family_msg(family, 0, 0, event);
1491 break;
1492 case CTRL_CMD_NEWMCAST_GRP:
1493 case CTRL_CMD_DELMCAST_GRP:
1494 BUG_ON(!grp);
1495 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
1496 break;
1497 default:
1498 return -EINVAL;
1499 }
1500
1501 if (IS_ERR(msg))
1502 return PTR_ERR(msg);
1503
1504 if (!family->netnsok)
1505 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
1506 0, GFP_KERNEL);
1507 else
1508 genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
1509
1510 return 0;
1511}
1512
1513struct ctrl_dump_policy_ctx {
1514 struct netlink_policy_dump_state *state;
1515 const struct genl_family *rt;
1516 struct genl_op_iter *op_iter;
1517 u32 op;
1518 u16 fam_id;
1519 u8 dump_map:1,
1520 single_op:1;
1521};
1522
1523static const struct nla_policy ctrl_policy_policy[] = {
1524 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
1525 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
1526 .len = GENL_NAMSIZ - 1 },
1527 [CTRL_ATTR_OP] = { .type = NLA_U32 },
1528};
1529
1530static int ctrl_dumppolicy_start(struct netlink_callback *cb)
1531{
1532 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
1533 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1534 struct nlattr **tb = info->info.attrs;
1535 const struct genl_family *rt;
1536 struct genl_op_iter i;
1537 int err;
1538
1539 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
1540
1541 if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
1542 return -EINVAL;
1543
1544 if (tb[CTRL_ATTR_FAMILY_ID]) {
1545 ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
1546 } else {
1547 rt = genl_family_find_byname(
1548 nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
1549 if (!rt)
1550 return -ENOENT;
1551 ctx->fam_id = rt->id;
1552 }
1553
1554 rt = genl_family_find_byid(ctx->fam_id);
1555 if (!rt)
1556 return -ENOENT;
1557
1558 ctx->rt = rt;
1559
1560 if (tb[CTRL_ATTR_OP]) {
1561 struct genl_split_ops doit, dump;
1562
1563 ctx->single_op = true;
1564 ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
1565
1566 err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
1567 if (err) {
1568 NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
1569 return err;
1570 }
1571
1572 if (doit.policy) {
1573 err = netlink_policy_dump_add_policy(&ctx->state,
1574 doit.policy,
1575 doit.maxattr);
1576 if (err)
1577 goto err_free_state;
1578 }
1579 if (dump.policy) {
1580 err = netlink_policy_dump_add_policy(&ctx->state,
1581 dump.policy,
1582 dump.maxattr);
1583 if (err)
1584 goto err_free_state;
1585 }
1586
1587 if (!ctx->state)
1588 return -ENODATA;
1589
1590 ctx->dump_map = 1;
1591 return 0;
1592 }
1593
1594 ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
1595 if (!ctx->op_iter)
1596 return -ENOMEM;
1597
1598 genl_op_iter_init(rt, ctx->op_iter);
1599 ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1600
1601 for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
1602 if (i.doit.policy) {
1603 err = netlink_policy_dump_add_policy(&ctx->state,
1604 i.doit.policy,
1605 i.doit.maxattr);
1606 if (err)
1607 goto err_free_state;
1608 }
1609 if (i.dumpit.policy) {
1610 err = netlink_policy_dump_add_policy(&ctx->state,
1611 i.dumpit.policy,
1612 i.dumpit.maxattr);
1613 if (err)
1614 goto err_free_state;
1615 }
1616 }
1617
1618 if (!ctx->state) {
1619 err = -ENODATA;
1620 goto err_free_op_iter;
1621 }
1622 return 0;
1623
1624err_free_state:
1625 netlink_policy_dump_free(ctx->state);
1626err_free_op_iter:
1627 kfree(ctx->op_iter);
1628 return err;
1629}
1630
1631static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
1632 struct netlink_callback *cb)
1633{
1634 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1635 void *hdr;
1636
1637 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1638 cb->nlh->nlmsg_seq, &genl_ctrl,
1639 NLM_F_MULTI, CTRL_CMD_GETPOLICY);
1640 if (!hdr)
1641 return NULL;
1642
1643 if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
1644 return NULL;
1645
1646 return hdr;
1647}
1648
1649static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
1650 struct netlink_callback *cb,
1651 struct genl_split_ops *doit,
1652 struct genl_split_ops *dumpit)
1653{
1654 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1655 struct nlattr *nest_pol, *nest_op;
1656 void *hdr;
1657 int idx;
1658
1659 /* skip if we have nothing to show */
1660 if (!doit->policy && !dumpit->policy)
1661 return 0;
1662
1663 hdr = ctrl_dumppolicy_prep(skb, cb);
1664 if (!hdr)
1665 return -ENOBUFS;
1666
1667 nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
1668 if (!nest_pol)
1669 goto err;
1670
1671 nest_op = nla_nest_start(skb, doit->cmd);
1672 if (!nest_op)
1673 goto err;
1674
1675 if (doit->policy) {
1676 idx = netlink_policy_dump_get_policy_idx(ctx->state,
1677 doit->policy,
1678 doit->maxattr);
1679
1680 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
1681 goto err;
1682 }
1683 if (dumpit->policy) {
1684 idx = netlink_policy_dump_get_policy_idx(ctx->state,
1685 dumpit->policy,
1686 dumpit->maxattr);
1687
1688 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
1689 goto err;
1690 }
1691
1692 nla_nest_end(skb, nest_op);
1693 nla_nest_end(skb, nest_pol);
1694 genlmsg_end(skb, hdr);
1695
1696 return 0;
1697err:
1698 genlmsg_cancel(skb, hdr);
1699 return -ENOBUFS;
1700}
1701
1702static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
1703{
1704 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1705 void *hdr;
1706
1707 if (ctx->dump_map) {
1708 if (ctx->single_op) {
1709 struct genl_split_ops doit, dumpit;
1710
1711 if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
1712 &doit, &dumpit)))
1713 return -ENOENT;
1714
1715 if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
1716 return skb->len;
1717
1718 /* done with the per-op policy index list */
1719 ctx->dump_map = 0;
1720 }
1721
1722 while (ctx->dump_map) {
1723 if (ctrl_dumppolicy_put_op(skb, cb,
1724 &ctx->op_iter->doit,
1725 &ctx->op_iter->dumpit))
1726 return skb->len;
1727
1728 ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1729 }
1730 }
1731
1732 while (netlink_policy_dump_loop(ctx->state)) {
1733 struct nlattr *nest;
1734
1735 hdr = ctrl_dumppolicy_prep(skb, cb);
1736 if (!hdr)
1737 goto nla_put_failure;
1738
1739 nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
1740 if (!nest)
1741 goto nla_put_failure;
1742
1743 if (netlink_policy_dump_write(skb, ctx->state))
1744 goto nla_put_failure;
1745
1746 nla_nest_end(skb, nest);
1747
1748 genlmsg_end(skb, hdr);
1749 }
1750
1751 return skb->len;
1752
1753nla_put_failure:
1754 genlmsg_cancel(skb, hdr);
1755 return skb->len;
1756}
1757
1758static int ctrl_dumppolicy_done(struct netlink_callback *cb)
1759{
1760 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1761
1762 kfree(ctx->op_iter);
1763 netlink_policy_dump_free(ctx->state);
1764 return 0;
1765}
1766
1767static const struct genl_split_ops genl_ctrl_ops[] = {
1768 {
1769 .cmd = CTRL_CMD_GETFAMILY,
1770 .validate = GENL_DONT_VALIDATE_STRICT,
1771 .policy = ctrl_policy_family,
1772 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
1773 .doit = ctrl_getfamily,
1774 .flags = GENL_CMD_CAP_DO,
1775 },
1776 {
1777 .cmd = CTRL_CMD_GETFAMILY,
1778 .validate = GENL_DONT_VALIDATE_DUMP,
1779 .policy = ctrl_policy_family,
1780 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
1781 .dumpit = ctrl_dumpfamily,
1782 .flags = GENL_CMD_CAP_DUMP,
1783 },
1784 {
1785 .cmd = CTRL_CMD_GETPOLICY,
1786 .policy = ctrl_policy_policy,
1787 .maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1,
1788 .start = ctrl_dumppolicy_start,
1789 .dumpit = ctrl_dumppolicy,
1790 .done = ctrl_dumppolicy_done,
1791 .flags = GENL_CMD_CAP_DUMP,
1792 },
1793};
1794
1795static const struct genl_multicast_group genl_ctrl_groups[] = {
1796 { .name = "notify", },
1797};
1798
1799static struct genl_family genl_ctrl __ro_after_init = {
1800 .module = THIS_MODULE,
1801 .split_ops = genl_ctrl_ops,
1802 .n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
1803 .resv_start_op = CTRL_CMD_GETPOLICY + 1,
1804 .mcgrps = genl_ctrl_groups,
1805 .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
1806 .id = GENL_ID_CTRL,
1807 .name = "nlctrl",
1808 .version = 0x2,
1809 .netnsok = true,
1810};
1811
1812static int genl_bind(struct net *net, int group)
1813{
1814 const struct genl_family *family;
1815 unsigned int id;
1816 int ret = 0;
1817
1818 down_read(&cb_lock);
1819
1820 idr_for_each_entry(&genl_fam_idr, family, id) {
1821 const struct genl_multicast_group *grp;
1822 int i;
1823
1824 if (family->n_mcgrps == 0)
1825 continue;
1826
1827 i = group - family->mcgrp_offset;
1828 if (i < 0 || i >= family->n_mcgrps)
1829 continue;
1830
1831 grp = &family->mcgrps[i];
1832 if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) &&
1833 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1834 ret = -EPERM;
1835 if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) &&
1836 !ns_capable(net->user_ns, CAP_SYS_ADMIN))
1837 ret = -EPERM;
1838
1839 if (family->bind)
1840 family->bind(i);
1841
1842 break;
1843 }
1844
1845 up_read(&cb_lock);
1846 return ret;
1847}
1848
1849static void genl_unbind(struct net *net, int group)
1850{
1851 const struct genl_family *family;
1852 unsigned int id;
1853
1854 down_read(&cb_lock);
1855
1856 idr_for_each_entry(&genl_fam_idr, family, id) {
1857 int i;
1858
1859 if (family->n_mcgrps == 0)
1860 continue;
1861
1862 i = group - family->mcgrp_offset;
1863 if (i < 0 || i >= family->n_mcgrps)
1864 continue;
1865
1866 if (family->unbind)
1867 family->unbind(i);
1868
1869 break;
1870 }
1871
1872 up_read(&cb_lock);
1873}
1874
1875static int __net_init genl_pernet_init(struct net *net)
1876{
1877 struct netlink_kernel_cfg cfg = {
1878 .input = genl_rcv,
1879 .flags = NL_CFG_F_NONROOT_RECV,
1880 .bind = genl_bind,
1881 .unbind = genl_unbind,
1882 .release = genl_release,
1883 };
1884
1885 /* we'll bump the group number right afterwards */
1886 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1887
1888 if (!net->genl_sock && net_eq(net, &init_net))
1889 panic("GENL: Cannot initialize generic netlink\n");
1890
1891 if (!net->genl_sock)
1892 return -ENOMEM;
1893
1894 return 0;
1895}
1896
1897static void __net_exit genl_pernet_exit(struct net *net)
1898{
1899 netlink_kernel_release(net->genl_sock);
1900 net->genl_sock = NULL;
1901}
1902
1903static struct pernet_operations genl_pernet_ops = {
1904 .init = genl_pernet_init,
1905 .exit = genl_pernet_exit,
1906};
1907
1908static int __init genl_init(void)
1909{
1910 int err;
1911
1912 err = genl_register_family(&genl_ctrl);
1913 if (err < 0)
1914 goto problem;
1915
1916 err = register_pernet_subsys(&genl_pernet_ops);
1917 if (err)
1918 goto problem;
1919
1920 return 0;
1921
1922problem:
1923 panic("GENL: Cannot register controller: %d\n", err);
1924}
1925
1926core_initcall(genl_init);
1927
1928static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
1929{
1930 struct sk_buff *tmp;
1931 struct net *net, *prev = NULL;
1932 bool delivered = false;
1933 int err;
1934
1935 rcu_read_lock();
1936 for_each_net_rcu(net) {
1937 if (prev) {
1938 tmp = skb_clone(skb, GFP_ATOMIC);
1939 if (!tmp) {
1940 err = -ENOMEM;
1941 goto error;
1942 }
1943 err = nlmsg_multicast(prev->genl_sock, tmp,
1944 portid, group, GFP_ATOMIC);
1945 if (!err)
1946 delivered = true;
1947 else if (err != -ESRCH)
1948 goto error;
1949 }
1950
1951 prev = net;
1952 }
1953 err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
1954
1955 rcu_read_unlock();
1956
1957 if (!err)
1958 delivered = true;
1959 else if (err != -ESRCH)
1960 return err;
1961 return delivered ? 0 : -ESRCH;
1962 error:
1963 rcu_read_unlock();
1964
1965 kfree_skb(skb);
1966 return err;
1967}
1968
1969int genlmsg_multicast_allns(const struct genl_family *family,
1970 struct sk_buff *skb, u32 portid,
1971 unsigned int group)
1972{
1973 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1974 return -EINVAL;
1975
1976 group = family->mcgrp_offset + group;
1977 return genlmsg_mcast(skb, portid, group);
1978}
1979EXPORT_SYMBOL(genlmsg_multicast_allns);
1980
1981void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1982 struct genl_info *info, u32 group, gfp_t flags)
1983{
1984 struct net *net = genl_info_net(info);
1985 struct sock *sk = net->genl_sock;
1986
1987 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1988 return;
1989
1990 group = family->mcgrp_offset + group;
1991 nlmsg_notify(sk, skb, info->snd_portid, group,
1992 nlmsg_report(info->nlhdr), flags);
1993}
1994EXPORT_SYMBOL(genl_notify);
1/*
2 * NETLINK Generic Netlink Family
3 *
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/socket.h>
15#include <linux/string.h>
16#include <linux/skbuff.h>
17#include <linux/mutex.h>
18#include <linux/bitmap.h>
19#include <net/sock.h>
20#include <net/genetlink.h>
21
22static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
23
24void genl_lock(void)
25{
26 mutex_lock(&genl_mutex);
27}
28EXPORT_SYMBOL(genl_lock);
29
30void genl_unlock(void)
31{
32 mutex_unlock(&genl_mutex);
33}
34EXPORT_SYMBOL(genl_unlock);
35
36#ifdef CONFIG_PROVE_LOCKING
37int lockdep_genl_is_held(void)
38{
39 return lockdep_is_held(&genl_mutex);
40}
41EXPORT_SYMBOL(lockdep_genl_is_held);
42#endif
43
44#define GENL_FAM_TAB_SIZE 16
45#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
46
47static struct list_head family_ht[GENL_FAM_TAB_SIZE];
48/*
49 * Bitmap of multicast groups that are currently in use.
50 *
51 * To avoid an allocation at boot of just one unsigned long,
52 * declare it global instead.
53 * Bit 0 is marked as already used since group 0 is invalid.
54 */
55static unsigned long mc_group_start = 0x1;
56static unsigned long *mc_groups = &mc_group_start;
57static unsigned long mc_groups_longs = 1;
58
59static int genl_ctrl_event(int event, void *data);
60
61static inline unsigned int genl_family_hash(unsigned int id)
62{
63 return id & GENL_FAM_TAB_MASK;
64}
65
66static inline struct list_head *genl_family_chain(unsigned int id)
67{
68 return &family_ht[genl_family_hash(id)];
69}
70
71static struct genl_family *genl_family_find_byid(unsigned int id)
72{
73 struct genl_family *f;
74
75 list_for_each_entry(f, genl_family_chain(id), family_list)
76 if (f->id == id)
77 return f;
78
79 return NULL;
80}
81
82static struct genl_family *genl_family_find_byname(char *name)
83{
84 struct genl_family *f;
85 int i;
86
87 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
88 list_for_each_entry(f, genl_family_chain(i), family_list)
89 if (strcmp(f->name, name) == 0)
90 return f;
91
92 return NULL;
93}
94
95static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
96{
97 struct genl_ops *ops;
98
99 list_for_each_entry(ops, &family->ops_list, ops_list)
100 if (ops->cmd == cmd)
101 return ops;
102
103 return NULL;
104}
105
106/* Of course we are going to have problems once we hit
107 * 2^16 alive types, but that can only happen by year 2K
108*/
109static u16 genl_generate_id(void)
110{
111 static u16 id_gen_idx = GENL_MIN_ID;
112 int i;
113
114 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
115 if (!genl_family_find_byid(id_gen_idx))
116 return id_gen_idx;
117 if (++id_gen_idx > GENL_MAX_ID)
118 id_gen_idx = GENL_MIN_ID;
119 }
120
121 return 0;
122}
123
124static struct genl_multicast_group notify_grp;
125
126/**
127 * genl_register_mc_group - register a multicast group
128 *
129 * Registers the specified multicast group and notifies userspace
130 * about the new group.
131 *
132 * Returns 0 on success or a negative error code.
133 *
134 * @family: The generic netlink family the group shall be registered for.
135 * @grp: The group to register, must have a name.
136 */
137int genl_register_mc_group(struct genl_family *family,
138 struct genl_multicast_group *grp)
139{
140 int id;
141 unsigned long *new_groups;
142 int err = 0;
143
144 BUG_ON(grp->name[0] == '\0');
145
146 genl_lock();
147
148 /* special-case our own group */
149 if (grp == ¬ify_grp)
150 id = GENL_ID_CTRL;
151 else
152 id = find_first_zero_bit(mc_groups,
153 mc_groups_longs * BITS_PER_LONG);
154
155
156 if (id >= mc_groups_longs * BITS_PER_LONG) {
157 size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long);
158
159 if (mc_groups == &mc_group_start) {
160 new_groups = kzalloc(nlen, GFP_KERNEL);
161 if (!new_groups) {
162 err = -ENOMEM;
163 goto out;
164 }
165 mc_groups = new_groups;
166 *mc_groups = mc_group_start;
167 } else {
168 new_groups = krealloc(mc_groups, nlen, GFP_KERNEL);
169 if (!new_groups) {
170 err = -ENOMEM;
171 goto out;
172 }
173 mc_groups = new_groups;
174 mc_groups[mc_groups_longs] = 0;
175 }
176 mc_groups_longs++;
177 }
178
179 if (family->netnsok) {
180 struct net *net;
181
182 netlink_table_grab();
183 rcu_read_lock();
184 for_each_net_rcu(net) {
185 err = __netlink_change_ngroups(net->genl_sock,
186 mc_groups_longs * BITS_PER_LONG);
187 if (err) {
188 /*
189 * No need to roll back, can only fail if
190 * memory allocation fails and then the
191 * number of _possible_ groups has been
192 * increased on some sockets which is ok.
193 */
194 rcu_read_unlock();
195 netlink_table_ungrab();
196 goto out;
197 }
198 }
199 rcu_read_unlock();
200 netlink_table_ungrab();
201 } else {
202 err = netlink_change_ngroups(init_net.genl_sock,
203 mc_groups_longs * BITS_PER_LONG);
204 if (err)
205 goto out;
206 }
207
208 grp->id = id;
209 set_bit(id, mc_groups);
210 list_add_tail(&grp->list, &family->mcast_groups);
211 grp->family = family;
212
213 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp);
214 out:
215 genl_unlock();
216 return err;
217}
218EXPORT_SYMBOL(genl_register_mc_group);
219
220static void __genl_unregister_mc_group(struct genl_family *family,
221 struct genl_multicast_group *grp)
222{
223 struct net *net;
224 BUG_ON(grp->family != family);
225
226 netlink_table_grab();
227 rcu_read_lock();
228 for_each_net_rcu(net)
229 __netlink_clear_multicast_users(net->genl_sock, grp->id);
230 rcu_read_unlock();
231 netlink_table_ungrab();
232
233 clear_bit(grp->id, mc_groups);
234 list_del(&grp->list);
235 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
236 grp->id = 0;
237 grp->family = NULL;
238}
239
240/**
241 * genl_unregister_mc_group - unregister a multicast group
242 *
243 * Unregisters the specified multicast group and notifies userspace
244 * about it. All current listeners on the group are removed.
245 *
246 * Note: It is not necessary to unregister all multicast groups before
247 * unregistering the family, unregistering the family will cause
248 * all assigned multicast groups to be unregistered automatically.
249 *
250 * @family: Generic netlink family the group belongs to.
251 * @grp: The group to unregister, must have been registered successfully
252 * previously.
253 */
254void genl_unregister_mc_group(struct genl_family *family,
255 struct genl_multicast_group *grp)
256{
257 genl_lock();
258 __genl_unregister_mc_group(family, grp);
259 genl_unlock();
260}
261EXPORT_SYMBOL(genl_unregister_mc_group);
262
263static void genl_unregister_mc_groups(struct genl_family *family)
264{
265 struct genl_multicast_group *grp, *tmp;
266
267 list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
268 __genl_unregister_mc_group(family, grp);
269}
270
271/**
272 * genl_register_ops - register generic netlink operations
273 * @family: generic netlink family
274 * @ops: operations to be registered
275 *
276 * Registers the specified operations and assigns them to the specified
277 * family. Either a doit or dumpit callback must be specified or the
278 * operation will fail. Only one operation structure per command
279 * identifier may be registered.
280 *
281 * See include/net/genetlink.h for more documenation on the operations
282 * structure.
283 *
284 * Returns 0 on success or a negative error code.
285 */
286int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
287{
288 int err = -EINVAL;
289
290 if (ops->dumpit == NULL && ops->doit == NULL)
291 goto errout;
292
293 if (genl_get_cmd(ops->cmd, family)) {
294 err = -EEXIST;
295 goto errout;
296 }
297
298 if (ops->dumpit)
299 ops->flags |= GENL_CMD_CAP_DUMP;
300 if (ops->doit)
301 ops->flags |= GENL_CMD_CAP_DO;
302 if (ops->policy)
303 ops->flags |= GENL_CMD_CAP_HASPOL;
304
305 genl_lock();
306 list_add_tail(&ops->ops_list, &family->ops_list);
307 genl_unlock();
308
309 genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
310 err = 0;
311errout:
312 return err;
313}
314EXPORT_SYMBOL(genl_register_ops);
315
316/**
317 * genl_unregister_ops - unregister generic netlink operations
318 * @family: generic netlink family
319 * @ops: operations to be unregistered
320 *
321 * Unregisters the specified operations and unassigns them from the
322 * specified family. The operation blocks until the current message
323 * processing has finished and doesn't start again until the
324 * unregister process has finished.
325 *
326 * Note: It is not necessary to unregister all operations before
327 * unregistering the family, unregistering the family will cause
328 * all assigned operations to be unregistered automatically.
329 *
330 * Returns 0 on success or a negative error code.
331 */
332int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
333{
334 struct genl_ops *rc;
335
336 genl_lock();
337 list_for_each_entry(rc, &family->ops_list, ops_list) {
338 if (rc == ops) {
339 list_del(&ops->ops_list);
340 genl_unlock();
341 genl_ctrl_event(CTRL_CMD_DELOPS, ops);
342 return 0;
343 }
344 }
345 genl_unlock();
346
347 return -ENOENT;
348}
349EXPORT_SYMBOL(genl_unregister_ops);
350
351/**
352 * genl_register_family - register a generic netlink family
353 * @family: generic netlink family
354 *
355 * Registers the specified family after validating it first. Only one
356 * family may be registered with the same family name or identifier.
357 * The family id may equal GENL_ID_GENERATE causing an unique id to
358 * be automatically generated and assigned.
359 *
360 * Return 0 on success or a negative error code.
361 */
362int genl_register_family(struct genl_family *family)
363{
364 int err = -EINVAL;
365
366 if (family->id && family->id < GENL_MIN_ID)
367 goto errout;
368
369 if (family->id > GENL_MAX_ID)
370 goto errout;
371
372 INIT_LIST_HEAD(&family->ops_list);
373 INIT_LIST_HEAD(&family->mcast_groups);
374
375 genl_lock();
376
377 if (genl_family_find_byname(family->name)) {
378 err = -EEXIST;
379 goto errout_locked;
380 }
381
382 if (family->id == GENL_ID_GENERATE) {
383 u16 newid = genl_generate_id();
384
385 if (!newid) {
386 err = -ENOMEM;
387 goto errout_locked;
388 }
389
390 family->id = newid;
391 } else if (genl_family_find_byid(family->id)) {
392 err = -EEXIST;
393 goto errout_locked;
394 }
395
396 if (family->maxattr) {
397 family->attrbuf = kmalloc((family->maxattr+1) *
398 sizeof(struct nlattr *), GFP_KERNEL);
399 if (family->attrbuf == NULL) {
400 err = -ENOMEM;
401 goto errout_locked;
402 }
403 } else
404 family->attrbuf = NULL;
405
406 list_add_tail(&family->family_list, genl_family_chain(family->id));
407 genl_unlock();
408
409 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family);
410
411 return 0;
412
413errout_locked:
414 genl_unlock();
415errout:
416 return err;
417}
418EXPORT_SYMBOL(genl_register_family);
419
420/**
421 * genl_register_family_with_ops - register a generic netlink family
422 * @family: generic netlink family
423 * @ops: operations to be registered
424 * @n_ops: number of elements to register
425 *
426 * Registers the specified family and operations from the specified table.
427 * Only one family may be registered with the same family name or identifier.
428 *
429 * The family id may equal GENL_ID_GENERATE causing an unique id to
430 * be automatically generated and assigned.
431 *
432 * Either a doit or dumpit callback must be specified for every registered
433 * operation or the function will fail. Only one operation structure per
434 * command identifier may be registered.
435 *
436 * See include/net/genetlink.h for more documenation on the operations
437 * structure.
438 *
439 * This is equivalent to calling genl_register_family() followed by
440 * genl_register_ops() for every operation entry in the table taking
441 * care to unregister the family on error path.
442 *
443 * Return 0 on success or a negative error code.
444 */
445int genl_register_family_with_ops(struct genl_family *family,
446 struct genl_ops *ops, size_t n_ops)
447{
448 int err, i;
449
450 err = genl_register_family(family);
451 if (err)
452 return err;
453
454 for (i = 0; i < n_ops; ++i, ++ops) {
455 err = genl_register_ops(family, ops);
456 if (err)
457 goto err_out;
458 }
459 return 0;
460err_out:
461 genl_unregister_family(family);
462 return err;
463}
464EXPORT_SYMBOL(genl_register_family_with_ops);
465
466/**
467 * genl_unregister_family - unregister generic netlink family
468 * @family: generic netlink family
469 *
470 * Unregisters the specified family.
471 *
472 * Returns 0 on success or a negative error code.
473 */
474int genl_unregister_family(struct genl_family *family)
475{
476 struct genl_family *rc;
477
478 genl_lock();
479
480 genl_unregister_mc_groups(family);
481
482 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
483 if (family->id != rc->id || strcmp(rc->name, family->name))
484 continue;
485
486 list_del(&rc->family_list);
487 INIT_LIST_HEAD(&family->ops_list);
488 genl_unlock();
489
490 kfree(family->attrbuf);
491 genl_ctrl_event(CTRL_CMD_DELFAMILY, family);
492 return 0;
493 }
494
495 genl_unlock();
496
497 return -ENOENT;
498}
499EXPORT_SYMBOL(genl_unregister_family);
500
501/**
502 * genlmsg_put - Add generic netlink header to netlink message
503 * @skb: socket buffer holding the message
504 * @pid: netlink pid the message is addressed to
505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family
507 * @flags netlink message flags
508 * @cmd: generic netlink command
509 *
510 * Returns pointer to user specific header
511 */
512void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
513 struct genl_family *family, int flags, u8 cmd)
514{
515 struct nlmsghdr *nlh;
516 struct genlmsghdr *hdr;
517
518 nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
519 family->hdrsize, flags);
520 if (nlh == NULL)
521 return NULL;
522
523 hdr = nlmsg_data(nlh);
524 hdr->cmd = cmd;
525 hdr->version = family->version;
526 hdr->reserved = 0;
527
528 return (char *) hdr + GENL_HDRLEN;
529}
530EXPORT_SYMBOL(genlmsg_put);
531
532static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
533{
534 struct genl_ops *ops;
535 struct genl_family *family;
536 struct net *net = sock_net(skb->sk);
537 struct genl_info info;
538 struct genlmsghdr *hdr = nlmsg_data(nlh);
539 int hdrlen, err;
540
541 family = genl_family_find_byid(nlh->nlmsg_type);
542 if (family == NULL)
543 return -ENOENT;
544
545 /* this family doesn't exist in this netns */
546 if (!family->netnsok && !net_eq(net, &init_net))
547 return -ENOENT;
548
549 hdrlen = GENL_HDRLEN + family->hdrsize;
550 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
551 return -EINVAL;
552
553 ops = genl_get_cmd(hdr->cmd, family);
554 if (ops == NULL)
555 return -EOPNOTSUPP;
556
557 if ((ops->flags & GENL_ADMIN_PERM) &&
558 !capable(CAP_NET_ADMIN))
559 return -EPERM;
560
561 if (nlh->nlmsg_flags & NLM_F_DUMP) {
562 if (ops->dumpit == NULL)
563 return -EOPNOTSUPP;
564
565 genl_unlock();
566 {
567 struct netlink_dump_control c = {
568 .dump = ops->dumpit,
569 .done = ops->done,
570 };
571 err = netlink_dump_start(net->genl_sock, skb, nlh, &c);
572 }
573 genl_lock();
574 return err;
575 }
576
577 if (ops->doit == NULL)
578 return -EOPNOTSUPP;
579
580 if (family->attrbuf) {
581 err = nlmsg_parse(nlh, hdrlen, family->attrbuf, family->maxattr,
582 ops->policy);
583 if (err < 0)
584 return err;
585 }
586
587 info.snd_seq = nlh->nlmsg_seq;
588 info.snd_pid = NETLINK_CB(skb).pid;
589 info.nlhdr = nlh;
590 info.genlhdr = nlmsg_data(nlh);
591 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
592 info.attrs = family->attrbuf;
593 genl_info_net_set(&info, net);
594 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
595
596 if (family->pre_doit) {
597 err = family->pre_doit(ops, skb, &info);
598 if (err)
599 return err;
600 }
601
602 err = ops->doit(skb, &info);
603
604 if (family->post_doit)
605 family->post_doit(ops, skb, &info);
606
607 return err;
608}
609
610static void genl_rcv(struct sk_buff *skb)
611{
612 genl_lock();
613 netlink_rcv_skb(skb, &genl_rcv_msg);
614 genl_unlock();
615}
616
617/**************************************************************************
618 * Controller
619 **************************************************************************/
620
621static struct genl_family genl_ctrl = {
622 .id = GENL_ID_CTRL,
623 .name = "nlctrl",
624 .version = 0x2,
625 .maxattr = CTRL_ATTR_MAX,
626 .netnsok = true,
627};
628
629static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
630 u32 flags, struct sk_buff *skb, u8 cmd)
631{
632 void *hdr;
633
634 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
635 if (hdr == NULL)
636 return -1;
637
638 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
639 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
640 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
641 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
642 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
643 goto nla_put_failure;
644
645 if (!list_empty(&family->ops_list)) {
646 struct nlattr *nla_ops;
647 struct genl_ops *ops;
648 int idx = 1;
649
650 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
651 if (nla_ops == NULL)
652 goto nla_put_failure;
653
654 list_for_each_entry(ops, &family->ops_list, ops_list) {
655 struct nlattr *nest;
656
657 nest = nla_nest_start(skb, idx++);
658 if (nest == NULL)
659 goto nla_put_failure;
660
661 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
662 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
663 goto nla_put_failure;
664
665 nla_nest_end(skb, nest);
666 }
667
668 nla_nest_end(skb, nla_ops);
669 }
670
671 if (!list_empty(&family->mcast_groups)) {
672 struct genl_multicast_group *grp;
673 struct nlattr *nla_grps;
674 int idx = 1;
675
676 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
677 if (nla_grps == NULL)
678 goto nla_put_failure;
679
680 list_for_each_entry(grp, &family->mcast_groups, list) {
681 struct nlattr *nest;
682
683 nest = nla_nest_start(skb, idx++);
684 if (nest == NULL)
685 goto nla_put_failure;
686
687 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
688 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
689 grp->name))
690 goto nla_put_failure;
691
692 nla_nest_end(skb, nest);
693 }
694 nla_nest_end(skb, nla_grps);
695 }
696
697 return genlmsg_end(skb, hdr);
698
699nla_put_failure:
700 genlmsg_cancel(skb, hdr);
701 return -EMSGSIZE;
702}
703
704static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
705 u32 seq, u32 flags, struct sk_buff *skb,
706 u8 cmd)
707{
708 void *hdr;
709 struct nlattr *nla_grps;
710 struct nlattr *nest;
711
712 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
713 if (hdr == NULL)
714 return -1;
715
716 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
717 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
718 goto nla_put_failure;
719
720 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
721 if (nla_grps == NULL)
722 goto nla_put_failure;
723
724 nest = nla_nest_start(skb, 1);
725 if (nest == NULL)
726 goto nla_put_failure;
727
728 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
729 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
730 grp->name))
731 goto nla_put_failure;
732
733 nla_nest_end(skb, nest);
734 nla_nest_end(skb, nla_grps);
735
736 return genlmsg_end(skb, hdr);
737
738nla_put_failure:
739 genlmsg_cancel(skb, hdr);
740 return -EMSGSIZE;
741}
742
743static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
744{
745
746 int i, n = 0;
747 struct genl_family *rt;
748 struct net *net = sock_net(skb->sk);
749 int chains_to_skip = cb->args[0];
750 int fams_to_skip = cb->args[1];
751
752 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
753 n = 0;
754 list_for_each_entry(rt, genl_family_chain(i), family_list) {
755 if (!rt->netnsok && !net_eq(net, &init_net))
756 continue;
757 if (++n < fams_to_skip)
758 continue;
759 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
760 cb->nlh->nlmsg_seq, NLM_F_MULTI,
761 skb, CTRL_CMD_NEWFAMILY) < 0)
762 goto errout;
763 }
764
765 fams_to_skip = 0;
766 }
767
768errout:
769 cb->args[0] = i;
770 cb->args[1] = n;
771
772 return skb->len;
773}
774
775static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
776 u32 pid, int seq, u8 cmd)
777{
778 struct sk_buff *skb;
779 int err;
780
781 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
782 if (skb == NULL)
783 return ERR_PTR(-ENOBUFS);
784
785 err = ctrl_fill_info(family, pid, seq, 0, skb, cmd);
786 if (err < 0) {
787 nlmsg_free(skb);
788 return ERR_PTR(err);
789 }
790
791 return skb;
792}
793
794static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
795 u32 pid, int seq, u8 cmd)
796{
797 struct sk_buff *skb;
798 int err;
799
800 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
801 if (skb == NULL)
802 return ERR_PTR(-ENOBUFS);
803
804 err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd);
805 if (err < 0) {
806 nlmsg_free(skb);
807 return ERR_PTR(err);
808 }
809
810 return skb;
811}
812
813static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
814 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
815 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
816 .len = GENL_NAMSIZ - 1 },
817};
818
819static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
820{
821 struct sk_buff *msg;
822 struct genl_family *res = NULL;
823 int err = -EINVAL;
824
825 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
826 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
827 res = genl_family_find_byid(id);
828 err = -ENOENT;
829 }
830
831 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
832 char *name;
833
834 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
835 res = genl_family_find_byname(name);
836#ifdef CONFIG_MODULES
837 if (res == NULL) {
838 genl_unlock();
839 request_module("net-pf-%d-proto-%d-family-%s",
840 PF_NETLINK, NETLINK_GENERIC, name);
841 genl_lock();
842 res = genl_family_find_byname(name);
843 }
844#endif
845 err = -ENOENT;
846 }
847
848 if (res == NULL)
849 return err;
850
851 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
852 /* family doesn't exist here */
853 return -ENOENT;
854 }
855
856 msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
857 CTRL_CMD_NEWFAMILY);
858 if (IS_ERR(msg))
859 return PTR_ERR(msg);
860
861 return genlmsg_reply(msg, info);
862}
863
864static int genl_ctrl_event(int event, void *data)
865{
866 struct sk_buff *msg;
867 struct genl_family *family;
868 struct genl_multicast_group *grp;
869
870 /* genl is still initialising */
871 if (!init_net.genl_sock)
872 return 0;
873
874 switch (event) {
875 case CTRL_CMD_NEWFAMILY:
876 case CTRL_CMD_DELFAMILY:
877 family = data;
878 msg = ctrl_build_family_msg(family, 0, 0, event);
879 break;
880 case CTRL_CMD_NEWMCAST_GRP:
881 case CTRL_CMD_DELMCAST_GRP:
882 grp = data;
883 family = grp->family;
884 msg = ctrl_build_mcgrp_msg(data, 0, 0, event);
885 break;
886 default:
887 return -EINVAL;
888 }
889
890 if (IS_ERR(msg))
891 return PTR_ERR(msg);
892
893 if (!family->netnsok) {
894 genlmsg_multicast_netns(&init_net, msg, 0,
895 GENL_ID_CTRL, GFP_KERNEL);
896 } else {
897 rcu_read_lock();
898 genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC);
899 rcu_read_unlock();
900 }
901
902 return 0;
903}
904
905static struct genl_ops genl_ctrl_ops = {
906 .cmd = CTRL_CMD_GETFAMILY,
907 .doit = ctrl_getfamily,
908 .dumpit = ctrl_dumpfamily,
909 .policy = ctrl_policy,
910};
911
912static struct genl_multicast_group notify_grp = {
913 .name = "notify",
914};
915
916static int __net_init genl_pernet_init(struct net *net)
917{
918 /* we'll bump the group number right afterwards */
919 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0,
920 genl_rcv, &genl_mutex,
921 THIS_MODULE);
922
923 if (!net->genl_sock && net_eq(net, &init_net))
924 panic("GENL: Cannot initialize generic netlink\n");
925
926 if (!net->genl_sock)
927 return -ENOMEM;
928
929 return 0;
930}
931
932static void __net_exit genl_pernet_exit(struct net *net)
933{
934 netlink_kernel_release(net->genl_sock);
935 net->genl_sock = NULL;
936}
937
938static struct pernet_operations genl_pernet_ops = {
939 .init = genl_pernet_init,
940 .exit = genl_pernet_exit,
941};
942
943static int __init genl_init(void)
944{
945 int i, err;
946
947 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
948 INIT_LIST_HEAD(&family_ht[i]);
949
950 err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1);
951 if (err < 0)
952 goto problem;
953
954 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
955
956 err = register_pernet_subsys(&genl_pernet_ops);
957 if (err)
958 goto problem;
959
960 err = genl_register_mc_group(&genl_ctrl, ¬ify_grp);
961 if (err < 0)
962 goto problem;
963
964 return 0;
965
966problem:
967 panic("GENL: Cannot register controller: %d\n", err);
968}
969
970subsys_initcall(genl_init);
971
972static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
973 gfp_t flags)
974{
975 struct sk_buff *tmp;
976 struct net *net, *prev = NULL;
977 int err;
978
979 for_each_net_rcu(net) {
980 if (prev) {
981 tmp = skb_clone(skb, flags);
982 if (!tmp) {
983 err = -ENOMEM;
984 goto error;
985 }
986 err = nlmsg_multicast(prev->genl_sock, tmp,
987 pid, group, flags);
988 if (err)
989 goto error;
990 }
991
992 prev = net;
993 }
994
995 return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags);
996 error:
997 kfree_skb(skb);
998 return err;
999}
1000
1001int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
1002 gfp_t flags)
1003{
1004 return genlmsg_mcast(skb, pid, group, flags);
1005}
1006EXPORT_SYMBOL(genlmsg_multicast_allns);
1007
1008void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
1009 struct nlmsghdr *nlh, gfp_t flags)
1010{
1011 struct sock *sk = net->genl_sock;
1012 int report = 0;
1013
1014 if (nlh)
1015 report = nlmsg_report(nlh);
1016
1017 nlmsg_notify(sk, skb, pid, group, report, flags);
1018}
1019EXPORT_SYMBOL(genl_notify);