Loading...
1/*
2 * NETLINK Generic Netlink Family
3 *
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/socket.h>
15#include <linux/string.h>
16#include <linux/skbuff.h>
17#include <linux/mutex.h>
18#include <linux/bitmap.h>
19#include <linux/rwsem.h>
20#include <net/sock.h>
21#include <net/genetlink.h>
22
23static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
24static DECLARE_RWSEM(cb_lock);
25
26atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
27DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
28
29void genl_lock(void)
30{
31 mutex_lock(&genl_mutex);
32}
33EXPORT_SYMBOL(genl_lock);
34
35void genl_unlock(void)
36{
37 mutex_unlock(&genl_mutex);
38}
39EXPORT_SYMBOL(genl_unlock);
40
41#ifdef CONFIG_LOCKDEP
42bool lockdep_genl_is_held(void)
43{
44 return lockdep_is_held(&genl_mutex);
45}
46EXPORT_SYMBOL(lockdep_genl_is_held);
47#endif
48
49static void genl_lock_all(void)
50{
51 down_write(&cb_lock);
52 genl_lock();
53}
54
55static void genl_unlock_all(void)
56{
57 genl_unlock();
58 up_write(&cb_lock);
59}
60
61#define GENL_FAM_TAB_SIZE 16
62#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
63
64static struct list_head family_ht[GENL_FAM_TAB_SIZE];
65/*
66 * Bitmap of multicast groups that are currently in use.
67 *
68 * To avoid an allocation at boot of just one unsigned long,
69 * declare it global instead.
70 * Bit 0 is marked as already used since group 0 is invalid.
71 * Bit 1 is marked as already used since the drop-monitor code
72 * abuses the API and thinks it can statically use group 1.
73 * That group will typically conflict with other groups that
74 * any proper users use.
75 * Bit 16 is marked as used since it's used for generic netlink
76 * and the code no longer marks pre-reserved IDs as used.
77 * Bit 17 is marked as already used since the VFS quota code
78 * also abused this API and relied on family == group ID, we
79 * cater to that by giving it a static family and group ID.
80 * Bit 18 is marked as already used since the PMCRAID driver
81 * did the same thing as the VFS quota code (maybe copied?)
82 */
83static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
84 BIT(GENL_ID_VFS_DQUOT) |
85 BIT(GENL_ID_PMCRAID);
86static unsigned long *mc_groups = &mc_group_start;
87static unsigned long mc_groups_longs = 1;
88
89static int genl_ctrl_event(int event, struct genl_family *family,
90 const struct genl_multicast_group *grp,
91 int grp_id);
92
93static inline unsigned int genl_family_hash(unsigned int id)
94{
95 return id & GENL_FAM_TAB_MASK;
96}
97
98static inline struct list_head *genl_family_chain(unsigned int id)
99{
100 return &family_ht[genl_family_hash(id)];
101}
102
103static struct genl_family *genl_family_find_byid(unsigned int id)
104{
105 struct genl_family *f;
106
107 list_for_each_entry(f, genl_family_chain(id), family_list)
108 if (f->id == id)
109 return f;
110
111 return NULL;
112}
113
114static struct genl_family *genl_family_find_byname(char *name)
115{
116 struct genl_family *f;
117 int i;
118
119 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
120 list_for_each_entry(f, genl_family_chain(i), family_list)
121 if (strcmp(f->name, name) == 0)
122 return f;
123
124 return NULL;
125}
126
127static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
128{
129 int i;
130
131 for (i = 0; i < family->n_ops; i++)
132 if (family->ops[i].cmd == cmd)
133 return &family->ops[i];
134
135 return NULL;
136}
137
138/* Of course we are going to have problems once we hit
139 * 2^16 alive types, but that can only happen by year 2K
140*/
141static u16 genl_generate_id(void)
142{
143 static u16 id_gen_idx = GENL_MIN_ID;
144 int i;
145
146 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
147 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
148 id_gen_idx != GENL_ID_PMCRAID &&
149 !genl_family_find_byid(id_gen_idx))
150 return id_gen_idx;
151 if (++id_gen_idx > GENL_MAX_ID)
152 id_gen_idx = GENL_MIN_ID;
153 }
154
155 return 0;
156}
157
158static int genl_allocate_reserve_groups(int n_groups, int *first_id)
159{
160 unsigned long *new_groups;
161 int start = 0;
162 int i;
163 int id;
164 bool fits;
165
166 do {
167 if (start == 0)
168 id = find_first_zero_bit(mc_groups,
169 mc_groups_longs *
170 BITS_PER_LONG);
171 else
172 id = find_next_zero_bit(mc_groups,
173 mc_groups_longs * BITS_PER_LONG,
174 start);
175
176 fits = true;
177 for (i = id;
178 i < min_t(int, id + n_groups,
179 mc_groups_longs * BITS_PER_LONG);
180 i++) {
181 if (test_bit(i, mc_groups)) {
182 start = i;
183 fits = false;
184 break;
185 }
186 }
187
188 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
189 unsigned long new_longs = mc_groups_longs +
190 BITS_TO_LONGS(n_groups);
191 size_t nlen = new_longs * sizeof(unsigned long);
192
193 if (mc_groups == &mc_group_start) {
194 new_groups = kzalloc(nlen, GFP_KERNEL);
195 if (!new_groups)
196 return -ENOMEM;
197 mc_groups = new_groups;
198 *mc_groups = mc_group_start;
199 } else {
200 new_groups = krealloc(mc_groups, nlen,
201 GFP_KERNEL);
202 if (!new_groups)
203 return -ENOMEM;
204 mc_groups = new_groups;
205 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
206 mc_groups[mc_groups_longs + i] = 0;
207 }
208 mc_groups_longs = new_longs;
209 }
210 } while (!fits);
211
212 for (i = id; i < id + n_groups; i++)
213 set_bit(i, mc_groups);
214 *first_id = id;
215 return 0;
216}
217
218static struct genl_family genl_ctrl;
219
220static int genl_validate_assign_mc_groups(struct genl_family *family)
221{
222 int first_id;
223 int n_groups = family->n_mcgrps;
224 int err = 0, i;
225 bool groups_allocated = false;
226
227 if (!n_groups)
228 return 0;
229
230 for (i = 0; i < n_groups; i++) {
231 const struct genl_multicast_group *grp = &family->mcgrps[i];
232
233 if (WARN_ON(grp->name[0] == '\0'))
234 return -EINVAL;
235 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
236 return -EINVAL;
237 }
238
239 /* special-case our own group and hacks */
240 if (family == &genl_ctrl) {
241 first_id = GENL_ID_CTRL;
242 BUG_ON(n_groups != 1);
243 } else if (strcmp(family->name, "NET_DM") == 0) {
244 first_id = 1;
245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_VFS_DQUOT) {
247 first_id = GENL_ID_VFS_DQUOT;
248 BUG_ON(n_groups != 1);
249 } else if (family->id == GENL_ID_PMCRAID) {
250 first_id = GENL_ID_PMCRAID;
251 BUG_ON(n_groups != 1);
252 } else {
253 groups_allocated = true;
254 err = genl_allocate_reserve_groups(n_groups, &first_id);
255 if (err)
256 return err;
257 }
258
259 family->mcgrp_offset = first_id;
260
261 /* if still initializing, can't and don't need to to realloc bitmaps */
262 if (!init_net.genl_sock)
263 return 0;
264
265 if (family->netnsok) {
266 struct net *net;
267
268 netlink_table_grab();
269 rcu_read_lock();
270 for_each_net_rcu(net) {
271 err = __netlink_change_ngroups(net->genl_sock,
272 mc_groups_longs * BITS_PER_LONG);
273 if (err) {
274 /*
275 * No need to roll back, can only fail if
276 * memory allocation fails and then the
277 * number of _possible_ groups has been
278 * increased on some sockets which is ok.
279 */
280 break;
281 }
282 }
283 rcu_read_unlock();
284 netlink_table_ungrab();
285 } else {
286 err = netlink_change_ngroups(init_net.genl_sock,
287 mc_groups_longs * BITS_PER_LONG);
288 }
289
290 if (groups_allocated && err) {
291 for (i = 0; i < family->n_mcgrps; i++)
292 clear_bit(family->mcgrp_offset + i, mc_groups);
293 }
294
295 return err;
296}
297
298static void genl_unregister_mc_groups(struct genl_family *family)
299{
300 struct net *net;
301 int i;
302
303 netlink_table_grab();
304 rcu_read_lock();
305 for_each_net_rcu(net) {
306 for (i = 0; i < family->n_mcgrps; i++)
307 __netlink_clear_multicast_users(
308 net->genl_sock, family->mcgrp_offset + i);
309 }
310 rcu_read_unlock();
311 netlink_table_ungrab();
312
313 for (i = 0; i < family->n_mcgrps; i++) {
314 int grp_id = family->mcgrp_offset + i;
315
316 if (grp_id != 1)
317 clear_bit(grp_id, mc_groups);
318 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
319 &family->mcgrps[i], grp_id);
320 }
321}
322
323static int genl_validate_ops(const struct genl_family *family)
324{
325 const struct genl_ops *ops = family->ops;
326 unsigned int n_ops = family->n_ops;
327 int i, j;
328
329 if (WARN_ON(n_ops && !ops))
330 return -EINVAL;
331
332 if (!n_ops)
333 return 0;
334
335 for (i = 0; i < n_ops; i++) {
336 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
337 return -EINVAL;
338 for (j = i + 1; j < n_ops; j++)
339 if (ops[i].cmd == ops[j].cmd)
340 return -EINVAL;
341 }
342
343 return 0;
344}
345
346/**
347 * __genl_register_family - register a generic netlink family
348 * @family: generic netlink family
349 *
350 * Registers the specified family after validating it first. Only one
351 * family may be registered with the same family name or identifier.
352 * The family id may equal GENL_ID_GENERATE causing an unique id to
353 * be automatically generated and assigned.
354 *
355 * The family's ops array must already be assigned, you can use the
356 * genl_register_family_with_ops() helper function.
357 *
358 * Return 0 on success or a negative error code.
359 */
360int __genl_register_family(struct genl_family *family)
361{
362 int err = -EINVAL, i;
363
364 if (family->id && family->id < GENL_MIN_ID)
365 goto errout;
366
367 if (family->id > GENL_MAX_ID)
368 goto errout;
369
370 err = genl_validate_ops(family);
371 if (err)
372 return err;
373
374 genl_lock_all();
375
376 if (genl_family_find_byname(family->name)) {
377 err = -EEXIST;
378 goto errout_locked;
379 }
380
381 if (family->id == GENL_ID_GENERATE) {
382 u16 newid = genl_generate_id();
383
384 if (!newid) {
385 err = -ENOMEM;
386 goto errout_locked;
387 }
388
389 family->id = newid;
390 } else if (genl_family_find_byid(family->id)) {
391 err = -EEXIST;
392 goto errout_locked;
393 }
394
395 if (family->maxattr && !family->parallel_ops) {
396 family->attrbuf = kmalloc((family->maxattr+1) *
397 sizeof(struct nlattr *), GFP_KERNEL);
398 if (family->attrbuf == NULL) {
399 err = -ENOMEM;
400 goto errout_locked;
401 }
402 } else
403 family->attrbuf = NULL;
404
405 err = genl_validate_assign_mc_groups(family);
406 if (err)
407 goto errout_locked;
408
409 list_add_tail(&family->family_list, genl_family_chain(family->id));
410 genl_unlock_all();
411
412 /* send all events */
413 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
414 for (i = 0; i < family->n_mcgrps; i++)
415 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
416 &family->mcgrps[i], family->mcgrp_offset + i);
417
418 return 0;
419
420errout_locked:
421 genl_unlock_all();
422errout:
423 return err;
424}
425EXPORT_SYMBOL(__genl_register_family);
426
427/**
428 * genl_unregister_family - unregister generic netlink family
429 * @family: generic netlink family
430 *
431 * Unregisters the specified family.
432 *
433 * Returns 0 on success or a negative error code.
434 */
435int genl_unregister_family(struct genl_family *family)
436{
437 struct genl_family *rc;
438
439 genl_lock_all();
440
441 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
442 if (family->id != rc->id || strcmp(rc->name, family->name))
443 continue;
444
445 genl_unregister_mc_groups(family);
446
447 list_del(&rc->family_list);
448 family->n_ops = 0;
449 up_write(&cb_lock);
450 wait_event(genl_sk_destructing_waitq,
451 atomic_read(&genl_sk_destructing_cnt) == 0);
452 genl_unlock();
453
454 kfree(family->attrbuf);
455 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
456 return 0;
457 }
458
459 genl_unlock_all();
460
461 return -ENOENT;
462}
463EXPORT_SYMBOL(genl_unregister_family);
464
465/**
466 * genlmsg_put - Add generic netlink header to netlink message
467 * @skb: socket buffer holding the message
468 * @portid: netlink portid the message is addressed to
469 * @seq: sequence number (usually the one of the sender)
470 * @family: generic netlink family
471 * @flags: netlink message flags
472 * @cmd: generic netlink command
473 *
474 * Returns pointer to user specific header
475 */
476void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
477 struct genl_family *family, int flags, u8 cmd)
478{
479 struct nlmsghdr *nlh;
480 struct genlmsghdr *hdr;
481
482 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
483 family->hdrsize, flags);
484 if (nlh == NULL)
485 return NULL;
486
487 hdr = nlmsg_data(nlh);
488 hdr->cmd = cmd;
489 hdr->version = family->version;
490 hdr->reserved = 0;
491
492 return (char *) hdr + GENL_HDRLEN;
493}
494EXPORT_SYMBOL(genlmsg_put);
495
496static int genl_lock_start(struct netlink_callback *cb)
497{
498 /* our ops are always const - netlink API doesn't propagate that */
499 const struct genl_ops *ops = cb->data;
500 int rc = 0;
501
502 if (ops->start) {
503 genl_lock();
504 rc = ops->start(cb);
505 genl_unlock();
506 }
507 return rc;
508}
509
510static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
511{
512 /* our ops are always const - netlink API doesn't propagate that */
513 const struct genl_ops *ops = cb->data;
514 int rc;
515
516 genl_lock();
517 rc = ops->dumpit(skb, cb);
518 genl_unlock();
519 return rc;
520}
521
522static int genl_lock_done(struct netlink_callback *cb)
523{
524 /* our ops are always const - netlink API doesn't propagate that */
525 const struct genl_ops *ops = cb->data;
526 int rc = 0;
527
528 if (ops->done) {
529 genl_lock();
530 rc = ops->done(cb);
531 genl_unlock();
532 }
533 return rc;
534}
535
536static int genl_family_rcv_msg(struct genl_family *family,
537 struct sk_buff *skb,
538 struct nlmsghdr *nlh)
539{
540 const struct genl_ops *ops;
541 struct net *net = sock_net(skb->sk);
542 struct genl_info info;
543 struct genlmsghdr *hdr = nlmsg_data(nlh);
544 struct nlattr **attrbuf;
545 int hdrlen, err;
546
547 /* this family doesn't exist in this netns */
548 if (!family->netnsok && !net_eq(net, &init_net))
549 return -ENOENT;
550
551 hdrlen = GENL_HDRLEN + family->hdrsize;
552 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
553 return -EINVAL;
554
555 ops = genl_get_cmd(hdr->cmd, family);
556 if (ops == NULL)
557 return -EOPNOTSUPP;
558
559 if ((ops->flags & GENL_ADMIN_PERM) &&
560 !netlink_capable(skb, CAP_NET_ADMIN))
561 return -EPERM;
562
563 if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
564 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
565 return -EPERM;
566
567 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
568 int rc;
569
570 if (ops->dumpit == NULL)
571 return -EOPNOTSUPP;
572
573 if (!family->parallel_ops) {
574 struct netlink_dump_control c = {
575 .module = family->module,
576 /* we have const, but the netlink API doesn't */
577 .data = (void *)ops,
578 .start = genl_lock_start,
579 .dump = genl_lock_dumpit,
580 .done = genl_lock_done,
581 };
582
583 genl_unlock();
584 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
585 genl_lock();
586
587 } else {
588 struct netlink_dump_control c = {
589 .module = family->module,
590 .start = ops->start,
591 .dump = ops->dumpit,
592 .done = ops->done,
593 };
594
595 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
596 }
597
598 return rc;
599 }
600
601 if (ops->doit == NULL)
602 return -EOPNOTSUPP;
603
604 if (family->maxattr && family->parallel_ops) {
605 attrbuf = kmalloc((family->maxattr+1) *
606 sizeof(struct nlattr *), GFP_KERNEL);
607 if (attrbuf == NULL)
608 return -ENOMEM;
609 } else
610 attrbuf = family->attrbuf;
611
612 if (attrbuf) {
613 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
614 ops->policy);
615 if (err < 0)
616 goto out;
617 }
618
619 info.snd_seq = nlh->nlmsg_seq;
620 info.snd_portid = NETLINK_CB(skb).portid;
621 info.nlhdr = nlh;
622 info.genlhdr = nlmsg_data(nlh);
623 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
624 info.attrs = attrbuf;
625 genl_info_net_set(&info, net);
626 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
627
628 if (family->pre_doit) {
629 err = family->pre_doit(ops, skb, &info);
630 if (err)
631 goto out;
632 }
633
634 err = ops->doit(skb, &info);
635
636 if (family->post_doit)
637 family->post_doit(ops, skb, &info);
638
639out:
640 if (family->parallel_ops)
641 kfree(attrbuf);
642
643 return err;
644}
645
646static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
647{
648 struct genl_family *family;
649 int err;
650
651 family = genl_family_find_byid(nlh->nlmsg_type);
652 if (family == NULL)
653 return -ENOENT;
654
655 if (!family->parallel_ops)
656 genl_lock();
657
658 err = genl_family_rcv_msg(family, skb, nlh);
659
660 if (!family->parallel_ops)
661 genl_unlock();
662
663 return err;
664}
665
666static void genl_rcv(struct sk_buff *skb)
667{
668 down_read(&cb_lock);
669 netlink_rcv_skb(skb, &genl_rcv_msg);
670 up_read(&cb_lock);
671}
672
673/**************************************************************************
674 * Controller
675 **************************************************************************/
676
677static struct genl_family genl_ctrl = {
678 .id = GENL_ID_CTRL,
679 .name = "nlctrl",
680 .version = 0x2,
681 .maxattr = CTRL_ATTR_MAX,
682 .netnsok = true,
683};
684
685static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
686 u32 flags, struct sk_buff *skb, u8 cmd)
687{
688 void *hdr;
689
690 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
691 if (hdr == NULL)
692 return -1;
693
694 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
695 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
696 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
697 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
698 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
699 goto nla_put_failure;
700
701 if (family->n_ops) {
702 struct nlattr *nla_ops;
703 int i;
704
705 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
706 if (nla_ops == NULL)
707 goto nla_put_failure;
708
709 for (i = 0; i < family->n_ops; i++) {
710 struct nlattr *nest;
711 const struct genl_ops *ops = &family->ops[i];
712 u32 op_flags = ops->flags;
713
714 if (ops->dumpit)
715 op_flags |= GENL_CMD_CAP_DUMP;
716 if (ops->doit)
717 op_flags |= GENL_CMD_CAP_DO;
718 if (ops->policy)
719 op_flags |= GENL_CMD_CAP_HASPOL;
720
721 nest = nla_nest_start(skb, i + 1);
722 if (nest == NULL)
723 goto nla_put_failure;
724
725 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
726 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
727 goto nla_put_failure;
728
729 nla_nest_end(skb, nest);
730 }
731
732 nla_nest_end(skb, nla_ops);
733 }
734
735 if (family->n_mcgrps) {
736 struct nlattr *nla_grps;
737 int i;
738
739 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
740 if (nla_grps == NULL)
741 goto nla_put_failure;
742
743 for (i = 0; i < family->n_mcgrps; i++) {
744 struct nlattr *nest;
745 const struct genl_multicast_group *grp;
746
747 grp = &family->mcgrps[i];
748
749 nest = nla_nest_start(skb, i + 1);
750 if (nest == NULL)
751 goto nla_put_failure;
752
753 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
754 family->mcgrp_offset + i) ||
755 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
756 grp->name))
757 goto nla_put_failure;
758
759 nla_nest_end(skb, nest);
760 }
761 nla_nest_end(skb, nla_grps);
762 }
763
764 genlmsg_end(skb, hdr);
765 return 0;
766
767nla_put_failure:
768 genlmsg_cancel(skb, hdr);
769 return -EMSGSIZE;
770}
771
772static int ctrl_fill_mcgrp_info(struct genl_family *family,
773 const struct genl_multicast_group *grp,
774 int grp_id, u32 portid, u32 seq, u32 flags,
775 struct sk_buff *skb, u8 cmd)
776{
777 void *hdr;
778 struct nlattr *nla_grps;
779 struct nlattr *nest;
780
781 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
782 if (hdr == NULL)
783 return -1;
784
785 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
786 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
787 goto nla_put_failure;
788
789 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
790 if (nla_grps == NULL)
791 goto nla_put_failure;
792
793 nest = nla_nest_start(skb, 1);
794 if (nest == NULL)
795 goto nla_put_failure;
796
797 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
798 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
799 grp->name))
800 goto nla_put_failure;
801
802 nla_nest_end(skb, nest);
803 nla_nest_end(skb, nla_grps);
804
805 genlmsg_end(skb, hdr);
806 return 0;
807
808nla_put_failure:
809 genlmsg_cancel(skb, hdr);
810 return -EMSGSIZE;
811}
812
813static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
814{
815
816 int i, n = 0;
817 struct genl_family *rt;
818 struct net *net = sock_net(skb->sk);
819 int chains_to_skip = cb->args[0];
820 int fams_to_skip = cb->args[1];
821
822 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
823 n = 0;
824 list_for_each_entry(rt, genl_family_chain(i), family_list) {
825 if (!rt->netnsok && !net_eq(net, &init_net))
826 continue;
827 if (++n < fams_to_skip)
828 continue;
829 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
830 cb->nlh->nlmsg_seq, NLM_F_MULTI,
831 skb, CTRL_CMD_NEWFAMILY) < 0)
832 goto errout;
833 }
834
835 fams_to_skip = 0;
836 }
837
838errout:
839 cb->args[0] = i;
840 cb->args[1] = n;
841
842 return skb->len;
843}
844
845static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
846 u32 portid, int seq, u8 cmd)
847{
848 struct sk_buff *skb;
849 int err;
850
851 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
852 if (skb == NULL)
853 return ERR_PTR(-ENOBUFS);
854
855 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
856 if (err < 0) {
857 nlmsg_free(skb);
858 return ERR_PTR(err);
859 }
860
861 return skb;
862}
863
864static struct sk_buff *
865ctrl_build_mcgrp_msg(struct genl_family *family,
866 const struct genl_multicast_group *grp,
867 int grp_id, u32 portid, int seq, u8 cmd)
868{
869 struct sk_buff *skb;
870 int err;
871
872 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
873 if (skb == NULL)
874 return ERR_PTR(-ENOBUFS);
875
876 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
877 seq, 0, skb, cmd);
878 if (err < 0) {
879 nlmsg_free(skb);
880 return ERR_PTR(err);
881 }
882
883 return skb;
884}
885
886static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
887 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
888 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
889 .len = GENL_NAMSIZ - 1 },
890};
891
892static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
893{
894 struct sk_buff *msg;
895 struct genl_family *res = NULL;
896 int err = -EINVAL;
897
898 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
899 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
900 res = genl_family_find_byid(id);
901 err = -ENOENT;
902 }
903
904 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
905 char *name;
906
907 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
908 res = genl_family_find_byname(name);
909#ifdef CONFIG_MODULES
910 if (res == NULL) {
911 genl_unlock();
912 up_read(&cb_lock);
913 request_module("net-pf-%d-proto-%d-family-%s",
914 PF_NETLINK, NETLINK_GENERIC, name);
915 down_read(&cb_lock);
916 genl_lock();
917 res = genl_family_find_byname(name);
918 }
919#endif
920 err = -ENOENT;
921 }
922
923 if (res == NULL)
924 return err;
925
926 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
927 /* family doesn't exist here */
928 return -ENOENT;
929 }
930
931 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
932 CTRL_CMD_NEWFAMILY);
933 if (IS_ERR(msg))
934 return PTR_ERR(msg);
935
936 return genlmsg_reply(msg, info);
937}
938
939static int genl_ctrl_event(int event, struct genl_family *family,
940 const struct genl_multicast_group *grp,
941 int grp_id)
942{
943 struct sk_buff *msg;
944
945 /* genl is still initialising */
946 if (!init_net.genl_sock)
947 return 0;
948
949 switch (event) {
950 case CTRL_CMD_NEWFAMILY:
951 case CTRL_CMD_DELFAMILY:
952 WARN_ON(grp);
953 msg = ctrl_build_family_msg(family, 0, 0, event);
954 break;
955 case CTRL_CMD_NEWMCAST_GRP:
956 case CTRL_CMD_DELMCAST_GRP:
957 BUG_ON(!grp);
958 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
959 break;
960 default:
961 return -EINVAL;
962 }
963
964 if (IS_ERR(msg))
965 return PTR_ERR(msg);
966
967 if (!family->netnsok) {
968 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
969 0, GFP_KERNEL);
970 } else {
971 rcu_read_lock();
972 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
973 0, GFP_ATOMIC);
974 rcu_read_unlock();
975 }
976
977 return 0;
978}
979
980static struct genl_ops genl_ctrl_ops[] = {
981 {
982 .cmd = CTRL_CMD_GETFAMILY,
983 .doit = ctrl_getfamily,
984 .dumpit = ctrl_dumpfamily,
985 .policy = ctrl_policy,
986 },
987};
988
989static struct genl_multicast_group genl_ctrl_groups[] = {
990 { .name = "notify", },
991};
992
993static int genl_bind(struct net *net, int group)
994{
995 int i, err = -ENOENT;
996
997 down_read(&cb_lock);
998 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
999 struct genl_family *f;
1000
1001 list_for_each_entry(f, genl_family_chain(i), family_list) {
1002 if (group >= f->mcgrp_offset &&
1003 group < f->mcgrp_offset + f->n_mcgrps) {
1004 int fam_grp = group - f->mcgrp_offset;
1005
1006 if (!f->netnsok && net != &init_net)
1007 err = -ENOENT;
1008 else if (f->mcast_bind)
1009 err = f->mcast_bind(net, fam_grp);
1010 else
1011 err = 0;
1012 break;
1013 }
1014 }
1015 }
1016 up_read(&cb_lock);
1017
1018 return err;
1019}
1020
1021static void genl_unbind(struct net *net, int group)
1022{
1023 int i;
1024
1025 down_read(&cb_lock);
1026 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1027 struct genl_family *f;
1028
1029 list_for_each_entry(f, genl_family_chain(i), family_list) {
1030 if (group >= f->mcgrp_offset &&
1031 group < f->mcgrp_offset + f->n_mcgrps) {
1032 int fam_grp = group - f->mcgrp_offset;
1033
1034 if (f->mcast_unbind)
1035 f->mcast_unbind(net, fam_grp);
1036 break;
1037 }
1038 }
1039 }
1040 up_read(&cb_lock);
1041}
1042
1043static int __net_init genl_pernet_init(struct net *net)
1044{
1045 struct netlink_kernel_cfg cfg = {
1046 .input = genl_rcv,
1047 .flags = NL_CFG_F_NONROOT_RECV,
1048 .bind = genl_bind,
1049 .unbind = genl_unbind,
1050 };
1051
1052 /* we'll bump the group number right afterwards */
1053 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1054
1055 if (!net->genl_sock && net_eq(net, &init_net))
1056 panic("GENL: Cannot initialize generic netlink\n");
1057
1058 if (!net->genl_sock)
1059 return -ENOMEM;
1060
1061 return 0;
1062}
1063
1064static void __net_exit genl_pernet_exit(struct net *net)
1065{
1066 netlink_kernel_release(net->genl_sock);
1067 net->genl_sock = NULL;
1068}
1069
1070static struct pernet_operations genl_pernet_ops = {
1071 .init = genl_pernet_init,
1072 .exit = genl_pernet_exit,
1073};
1074
1075static int __init genl_init(void)
1076{
1077 int i, err;
1078
1079 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1080 INIT_LIST_HEAD(&family_ht[i]);
1081
1082 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1083 genl_ctrl_groups);
1084 if (err < 0)
1085 goto problem;
1086
1087 err = register_pernet_subsys(&genl_pernet_ops);
1088 if (err)
1089 goto problem;
1090
1091 return 0;
1092
1093problem:
1094 panic("GENL: Cannot register controller: %d\n", err);
1095}
1096
1097subsys_initcall(genl_init);
1098
1099static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1100 gfp_t flags)
1101{
1102 struct sk_buff *tmp;
1103 struct net *net, *prev = NULL;
1104 int err;
1105
1106 for_each_net_rcu(net) {
1107 if (prev) {
1108 tmp = skb_clone(skb, flags);
1109 if (!tmp) {
1110 err = -ENOMEM;
1111 goto error;
1112 }
1113 err = nlmsg_multicast(prev->genl_sock, tmp,
1114 portid, group, flags);
1115 if (err)
1116 goto error;
1117 }
1118
1119 prev = net;
1120 }
1121
1122 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1123 error:
1124 kfree_skb(skb);
1125 return err;
1126}
1127
1128int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1129 u32 portid, unsigned int group, gfp_t flags)
1130{
1131 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1132 return -EINVAL;
1133 group = family->mcgrp_offset + group;
1134 return genlmsg_mcast(skb, portid, group, flags);
1135}
1136EXPORT_SYMBOL(genlmsg_multicast_allns);
1137
1138void genl_notify(struct genl_family *family, struct sk_buff *skb,
1139 struct genl_info *info, u32 group, gfp_t flags)
1140{
1141 struct net *net = genl_info_net(info);
1142 struct sock *sk = net->genl_sock;
1143 int report = 0;
1144
1145 if (info->nlhdr)
1146 report = nlmsg_report(info->nlhdr);
1147
1148 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1149 return;
1150 group = family->mcgrp_offset + group;
1151 nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
1152}
1153EXPORT_SYMBOL(genl_notify);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NETLINK Generic Netlink Family
4 *
5 * Authors: Jamal Hadi Salim
6 * Thomas Graf <tgraf@suug.ch>
7 * Johannes Berg <johannes@sipsolutions.net>
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/errno.h>
14#include <linux/types.h>
15#include <linux/socket.h>
16#include <linux/string_helpers.h>
17#include <linux/skbuff.h>
18#include <linux/mutex.h>
19#include <linux/bitmap.h>
20#include <linux/rwsem.h>
21#include <linux/idr.h>
22#include <net/sock.h>
23#include <net/genetlink.h>
24
25static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
26static DECLARE_RWSEM(cb_lock);
27
28atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
29DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
30
31void genl_lock(void)
32{
33 mutex_lock(&genl_mutex);
34}
35EXPORT_SYMBOL(genl_lock);
36
37void genl_unlock(void)
38{
39 mutex_unlock(&genl_mutex);
40}
41EXPORT_SYMBOL(genl_unlock);
42
43static void genl_lock_all(void)
44{
45 down_write(&cb_lock);
46 genl_lock();
47}
48
49static void genl_unlock_all(void)
50{
51 genl_unlock();
52 up_write(&cb_lock);
53}
54
55static void genl_op_lock(const struct genl_family *family)
56{
57 if (!family->parallel_ops)
58 genl_lock();
59}
60
61static void genl_op_unlock(const struct genl_family *family)
62{
63 if (!family->parallel_ops)
64 genl_unlock();
65}
66
67static DEFINE_IDR(genl_fam_idr);
68
69/*
70 * Bitmap of multicast groups that are currently in use.
71 *
72 * To avoid an allocation at boot of just one unsigned long,
73 * declare it global instead.
74 * Bit 0 is marked as already used since group 0 is invalid.
75 * Bit 1 is marked as already used since the drop-monitor code
76 * abuses the API and thinks it can statically use group 1.
77 * That group will typically conflict with other groups that
78 * any proper users use.
79 * Bit 16 is marked as used since it's used for generic netlink
80 * and the code no longer marks pre-reserved IDs as used.
81 * Bit 17 is marked as already used since the VFS quota code
82 * also abused this API and relied on family == group ID, we
83 * cater to that by giving it a static family and group ID.
84 * Bit 18 is marked as already used since the PMCRAID driver
85 * did the same thing as the VFS quota code (maybe copied?)
86 */
87static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
88 BIT(GENL_ID_VFS_DQUOT) |
89 BIT(GENL_ID_PMCRAID);
90static unsigned long *mc_groups = &mc_group_start;
91static unsigned long mc_groups_longs = 1;
92
93/* We need the last attribute with non-zero ID therefore a 2-entry array */
94static struct nla_policy genl_policy_reject_all[] = {
95 { .type = NLA_REJECT },
96 { .type = NLA_REJECT },
97};
98
99static int genl_ctrl_event(int event, const struct genl_family *family,
100 const struct genl_multicast_group *grp,
101 int grp_id);
102
103static void
104genl_op_fill_in_reject_policy(const struct genl_family *family,
105 struct genl_ops *op)
106{
107 BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
108
109 if (op->policy || op->cmd < family->resv_start_op)
110 return;
111
112 op->policy = genl_policy_reject_all;
113 op->maxattr = 1;
114}
115
116static void
117genl_op_fill_in_reject_policy_split(const struct genl_family *family,
118 struct genl_split_ops *op)
119{
120 if (op->policy)
121 return;
122
123 op->policy = genl_policy_reject_all;
124 op->maxattr = 1;
125}
126
127static const struct genl_family *genl_family_find_byid(unsigned int id)
128{
129 return idr_find(&genl_fam_idr, id);
130}
131
132static const struct genl_family *genl_family_find_byname(char *name)
133{
134 const struct genl_family *family;
135 unsigned int id;
136
137 idr_for_each_entry(&genl_fam_idr, family, id)
138 if (strcmp(family->name, name) == 0)
139 return family;
140
141 return NULL;
142}
143
144struct genl_op_iter {
145 const struct genl_family *family;
146 struct genl_split_ops doit;
147 struct genl_split_ops dumpit;
148 int cmd_idx;
149 int entry_idx;
150 u32 cmd;
151 u8 flags;
152};
153
154static void genl_op_from_full(const struct genl_family *family,
155 unsigned int i, struct genl_ops *op)
156{
157 *op = family->ops[i];
158
159 if (!op->maxattr)
160 op->maxattr = family->maxattr;
161 if (!op->policy)
162 op->policy = family->policy;
163
164 genl_op_fill_in_reject_policy(family, op);
165}
166
167static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
168 struct genl_ops *op)
169{
170 int i;
171
172 for (i = 0; i < family->n_ops; i++)
173 if (family->ops[i].cmd == cmd) {
174 genl_op_from_full(family, i, op);
175 return 0;
176 }
177
178 return -ENOENT;
179}
180
181static void genl_op_from_small(const struct genl_family *family,
182 unsigned int i, struct genl_ops *op)
183{
184 memset(op, 0, sizeof(*op));
185 op->doit = family->small_ops[i].doit;
186 op->dumpit = family->small_ops[i].dumpit;
187 op->cmd = family->small_ops[i].cmd;
188 op->internal_flags = family->small_ops[i].internal_flags;
189 op->flags = family->small_ops[i].flags;
190 op->validate = family->small_ops[i].validate;
191
192 op->maxattr = family->maxattr;
193 op->policy = family->policy;
194
195 genl_op_fill_in_reject_policy(family, op);
196}
197
198static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
199 struct genl_ops *op)
200{
201 int i;
202
203 for (i = 0; i < family->n_small_ops; i++)
204 if (family->small_ops[i].cmd == cmd) {
205 genl_op_from_small(family, i, op);
206 return 0;
207 }
208
209 return -ENOENT;
210}
211
212static void genl_op_from_split(struct genl_op_iter *iter)
213{
214 const struct genl_family *family = iter->family;
215 int i, cnt = 0;
216
217 i = iter->entry_idx - family->n_ops - family->n_small_ops;
218
219 if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
220 iter->doit = family->split_ops[i + cnt];
221 genl_op_fill_in_reject_policy_split(family, &iter->doit);
222 cnt++;
223 } else {
224 memset(&iter->doit, 0, sizeof(iter->doit));
225 }
226
227 if (i + cnt < family->n_split_ops &&
228 family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
229 (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
230 iter->dumpit = family->split_ops[i + cnt];
231 genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
232 cnt++;
233 } else {
234 memset(&iter->dumpit, 0, sizeof(iter->dumpit));
235 }
236
237 WARN_ON(!cnt);
238 iter->entry_idx += cnt;
239}
240
241static int
242genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
243 struct genl_split_ops *op)
244{
245 int i;
246
247 for (i = 0; i < family->n_split_ops; i++)
248 if (family->split_ops[i].cmd == cmd &&
249 family->split_ops[i].flags & flag) {
250 *op = family->split_ops[i];
251 return 0;
252 }
253
254 return -ENOENT;
255}
256
257static int
258genl_cmd_full_to_split(struct genl_split_ops *op,
259 const struct genl_family *family,
260 const struct genl_ops *full, u8 flags)
261{
262 if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
263 (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
264 memset(op, 0, sizeof(*op));
265 return -ENOENT;
266 }
267
268 if (flags & GENL_CMD_CAP_DUMP) {
269 op->start = full->start;
270 op->dumpit = full->dumpit;
271 op->done = full->done;
272 } else {
273 op->pre_doit = family->pre_doit;
274 op->doit = full->doit;
275 op->post_doit = family->post_doit;
276 }
277
278 if (flags & GENL_CMD_CAP_DUMP &&
279 full->validate & GENL_DONT_VALIDATE_DUMP) {
280 op->policy = NULL;
281 op->maxattr = 0;
282 } else {
283 op->policy = full->policy;
284 op->maxattr = full->maxattr;
285 }
286
287 op->cmd = full->cmd;
288 op->internal_flags = full->internal_flags;
289 op->flags = full->flags;
290 op->validate = full->validate;
291
292 /* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
293 op->flags |= flags;
294
295 return 0;
296}
297
298/* Must make sure that op is initialized to 0 on failure */
299static int
300genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
301 struct genl_split_ops *op)
302{
303 struct genl_ops full;
304 int err;
305
306 err = genl_get_cmd_full(cmd, family, &full);
307 if (err == -ENOENT)
308 err = genl_get_cmd_small(cmd, family, &full);
309 /* Found one of legacy forms */
310 if (err == 0)
311 return genl_cmd_full_to_split(op, family, &full, flags);
312
313 err = genl_get_cmd_split(cmd, flags, family, op);
314 if (err)
315 memset(op, 0, sizeof(*op));
316 return err;
317}
318
319/* For policy dumping only, get ops of both do and dump.
320 * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
321 */
322static int
323genl_get_cmd_both(u32 cmd, const struct genl_family *family,
324 struct genl_split_ops *doit, struct genl_split_ops *dumpit)
325{
326 int err1, err2;
327
328 err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
329 err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
330
331 return err1 && err2 ? -ENOENT : 0;
332}
333
334static bool
335genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
336{
337 iter->family = family;
338 iter->cmd_idx = 0;
339 iter->entry_idx = 0;
340
341 iter->flags = 0;
342
343 return iter->family->n_ops +
344 iter->family->n_small_ops +
345 iter->family->n_split_ops;
346}
347
348static bool genl_op_iter_next(struct genl_op_iter *iter)
349{
350 const struct genl_family *family = iter->family;
351 bool legacy_op = true;
352 struct genl_ops op;
353
354 if (iter->entry_idx < family->n_ops) {
355 genl_op_from_full(family, iter->entry_idx, &op);
356 } else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
357 genl_op_from_small(family, iter->entry_idx - family->n_ops,
358 &op);
359 } else if (iter->entry_idx <
360 family->n_ops + family->n_small_ops + family->n_split_ops) {
361 legacy_op = false;
362 /* updates entry_idx */
363 genl_op_from_split(iter);
364 } else {
365 return false;
366 }
367
368 iter->cmd_idx++;
369
370 if (legacy_op) {
371 iter->entry_idx++;
372
373 genl_cmd_full_to_split(&iter->doit, family,
374 &op, GENL_CMD_CAP_DO);
375 genl_cmd_full_to_split(&iter->dumpit, family,
376 &op, GENL_CMD_CAP_DUMP);
377 }
378
379 iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
380 iter->flags = iter->doit.flags | iter->dumpit.flags;
381
382 return true;
383}
384
385static void
386genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
387{
388 *dst = *src;
389}
390
391static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
392{
393 return iter->cmd_idx;
394}
395
396static int genl_allocate_reserve_groups(int n_groups, int *first_id)
397{
398 unsigned long *new_groups;
399 int start = 0;
400 int i;
401 int id;
402 bool fits;
403
404 do {
405 if (start == 0)
406 id = find_first_zero_bit(mc_groups,
407 mc_groups_longs *
408 BITS_PER_LONG);
409 else
410 id = find_next_zero_bit(mc_groups,
411 mc_groups_longs * BITS_PER_LONG,
412 start);
413
414 fits = true;
415 for (i = id;
416 i < min_t(int, id + n_groups,
417 mc_groups_longs * BITS_PER_LONG);
418 i++) {
419 if (test_bit(i, mc_groups)) {
420 start = i;
421 fits = false;
422 break;
423 }
424 }
425
426 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
427 unsigned long new_longs = mc_groups_longs +
428 BITS_TO_LONGS(n_groups);
429 size_t nlen = new_longs * sizeof(unsigned long);
430
431 if (mc_groups == &mc_group_start) {
432 new_groups = kzalloc(nlen, GFP_KERNEL);
433 if (!new_groups)
434 return -ENOMEM;
435 mc_groups = new_groups;
436 *mc_groups = mc_group_start;
437 } else {
438 new_groups = krealloc(mc_groups, nlen,
439 GFP_KERNEL);
440 if (!new_groups)
441 return -ENOMEM;
442 mc_groups = new_groups;
443 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
444 mc_groups[mc_groups_longs + i] = 0;
445 }
446 mc_groups_longs = new_longs;
447 }
448 } while (!fits);
449
450 for (i = id; i < id + n_groups; i++)
451 set_bit(i, mc_groups);
452 *first_id = id;
453 return 0;
454}
455
456static struct genl_family genl_ctrl;
457
458static int genl_validate_assign_mc_groups(struct genl_family *family)
459{
460 int first_id;
461 int n_groups = family->n_mcgrps;
462 int err = 0, i;
463 bool groups_allocated = false;
464
465 if (!n_groups)
466 return 0;
467
468 for (i = 0; i < n_groups; i++) {
469 const struct genl_multicast_group *grp = &family->mcgrps[i];
470
471 if (WARN_ON(grp->name[0] == '\0'))
472 return -EINVAL;
473 if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
474 return -EINVAL;
475 }
476
477 /* special-case our own group and hacks */
478 if (family == &genl_ctrl) {
479 first_id = GENL_ID_CTRL;
480 BUG_ON(n_groups != 1);
481 } else if (strcmp(family->name, "NET_DM") == 0) {
482 first_id = 1;
483 BUG_ON(n_groups != 1);
484 } else if (family->id == GENL_ID_VFS_DQUOT) {
485 first_id = GENL_ID_VFS_DQUOT;
486 BUG_ON(n_groups != 1);
487 } else if (family->id == GENL_ID_PMCRAID) {
488 first_id = GENL_ID_PMCRAID;
489 BUG_ON(n_groups != 1);
490 } else {
491 groups_allocated = true;
492 err = genl_allocate_reserve_groups(n_groups, &first_id);
493 if (err)
494 return err;
495 }
496
497 family->mcgrp_offset = first_id;
498
499 /* if still initializing, can't and don't need to realloc bitmaps */
500 if (!init_net.genl_sock)
501 return 0;
502
503 if (family->netnsok) {
504 struct net *net;
505
506 netlink_table_grab();
507 rcu_read_lock();
508 for_each_net_rcu(net) {
509 err = __netlink_change_ngroups(net->genl_sock,
510 mc_groups_longs * BITS_PER_LONG);
511 if (err) {
512 /*
513 * No need to roll back, can only fail if
514 * memory allocation fails and then the
515 * number of _possible_ groups has been
516 * increased on some sockets which is ok.
517 */
518 break;
519 }
520 }
521 rcu_read_unlock();
522 netlink_table_ungrab();
523 } else {
524 err = netlink_change_ngroups(init_net.genl_sock,
525 mc_groups_longs * BITS_PER_LONG);
526 }
527
528 if (groups_allocated && err) {
529 for (i = 0; i < family->n_mcgrps; i++)
530 clear_bit(family->mcgrp_offset + i, mc_groups);
531 }
532
533 return err;
534}
535
536static void genl_unregister_mc_groups(const struct genl_family *family)
537{
538 struct net *net;
539 int i;
540
541 netlink_table_grab();
542 rcu_read_lock();
543 for_each_net_rcu(net) {
544 for (i = 0; i < family->n_mcgrps; i++)
545 __netlink_clear_multicast_users(
546 net->genl_sock, family->mcgrp_offset + i);
547 }
548 rcu_read_unlock();
549 netlink_table_ungrab();
550
551 for (i = 0; i < family->n_mcgrps; i++) {
552 int grp_id = family->mcgrp_offset + i;
553
554 if (grp_id != 1)
555 clear_bit(grp_id, mc_groups);
556 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
557 &family->mcgrps[i], grp_id);
558 }
559}
560
561static bool genl_split_op_check(const struct genl_split_ops *op)
562{
563 if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
564 GENL_CMD_CAP_DUMP)) != 1))
565 return true;
566 return false;
567}
568
569static int genl_validate_ops(const struct genl_family *family)
570{
571 struct genl_op_iter i, j;
572 unsigned int s;
573
574 if (WARN_ON(family->n_ops && !family->ops) ||
575 WARN_ON(family->n_small_ops && !family->small_ops) ||
576 WARN_ON(family->n_split_ops && !family->split_ops))
577 return -EINVAL;
578
579 for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
580 if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
581 return -EINVAL;
582
583 if (WARN_ON(i.cmd >= family->resv_start_op &&
584 (i.doit.validate || i.dumpit.validate)))
585 return -EINVAL;
586
587 genl_op_iter_copy(&j, &i);
588 while (genl_op_iter_next(&j)) {
589 if (i.cmd == j.cmd)
590 return -EINVAL;
591 }
592 }
593
594 if (family->n_split_ops) {
595 if (genl_split_op_check(&family->split_ops[0]))
596 return -EINVAL;
597 }
598
599 for (s = 1; s < family->n_split_ops; s++) {
600 const struct genl_split_ops *a, *b;
601
602 a = &family->split_ops[s - 1];
603 b = &family->split_ops[s];
604
605 if (genl_split_op_check(b))
606 return -EINVAL;
607
608 /* Check sort order */
609 if (a->cmd < b->cmd) {
610 continue;
611 } else if (a->cmd > b->cmd) {
612 WARN_ON(1);
613 return -EINVAL;
614 }
615
616 if (a->internal_flags != b->internal_flags ||
617 ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
618 GENL_CMD_CAP_DUMP))) {
619 WARN_ON(1);
620 return -EINVAL;
621 }
622
623 if ((a->flags & GENL_CMD_CAP_DO) &&
624 (b->flags & GENL_CMD_CAP_DUMP))
625 continue;
626
627 WARN_ON(1);
628 return -EINVAL;
629 }
630
631 return 0;
632}
633
634static void *genl_sk_priv_alloc(struct genl_family *family)
635{
636 void *priv;
637
638 priv = kzalloc(family->sock_priv_size, GFP_KERNEL);
639 if (!priv)
640 return ERR_PTR(-ENOMEM);
641
642 if (family->sock_priv_init)
643 family->sock_priv_init(priv);
644
645 return priv;
646}
647
648static void genl_sk_priv_free(const struct genl_family *family, void *priv)
649{
650 if (family->sock_priv_destroy)
651 family->sock_priv_destroy(priv);
652 kfree(priv);
653}
654
655static int genl_sk_privs_alloc(struct genl_family *family)
656{
657 if (!family->sock_priv_size)
658 return 0;
659
660 family->sock_privs = kzalloc(sizeof(*family->sock_privs), GFP_KERNEL);
661 if (!family->sock_privs)
662 return -ENOMEM;
663 xa_init(family->sock_privs);
664 return 0;
665}
666
667static void genl_sk_privs_free(const struct genl_family *family)
668{
669 unsigned long id;
670 void *priv;
671
672 if (!family->sock_priv_size)
673 return;
674
675 xa_for_each(family->sock_privs, id, priv)
676 genl_sk_priv_free(family, priv);
677
678 xa_destroy(family->sock_privs);
679 kfree(family->sock_privs);
680}
681
682static void genl_sk_priv_free_by_sock(struct genl_family *family,
683 struct sock *sk)
684{
685 void *priv;
686
687 if (!family->sock_priv_size)
688 return;
689 priv = xa_erase(family->sock_privs, (unsigned long) sk);
690 if (!priv)
691 return;
692 genl_sk_priv_free(family, priv);
693}
694
695static void genl_release(struct sock *sk, unsigned long *groups)
696{
697 struct genl_family *family;
698 unsigned int id;
699
700 down_read(&cb_lock);
701
702 idr_for_each_entry(&genl_fam_idr, family, id)
703 genl_sk_priv_free_by_sock(family, sk);
704
705 up_read(&cb_lock);
706}
707
708/**
709 * __genl_sk_priv_get - Get family private pointer for socket, if exists
710 *
711 * @family: family
712 * @sk: socket
713 *
714 * Lookup a private memory for a Generic netlink family and specified socket.
715 *
716 * Caller should make sure this is called in RCU read locked section.
717 *
718 * Return: valid pointer on success, otherwise negative error value
719 * encoded by ERR_PTR(), NULL in case priv does not exist.
720 */
721void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk)
722{
723 if (WARN_ON_ONCE(!family->sock_privs))
724 return ERR_PTR(-EINVAL);
725 return xa_load(family->sock_privs, (unsigned long) sk);
726}
727
728/**
729 * genl_sk_priv_get - Get family private pointer for socket
730 *
731 * @family: family
732 * @sk: socket
733 *
734 * Lookup a private memory for a Generic netlink family and specified socket.
735 * Allocate the private memory in case it was not already done.
736 *
737 * Return: valid pointer on success, otherwise negative error value
738 * encoded by ERR_PTR().
739 */
740void *genl_sk_priv_get(struct genl_family *family, struct sock *sk)
741{
742 void *priv, *old_priv;
743
744 priv = __genl_sk_priv_get(family, sk);
745 if (priv)
746 return priv;
747
748 /* priv for the family does not exist so far, create it. */
749
750 priv = genl_sk_priv_alloc(family);
751 if (IS_ERR(priv))
752 return ERR_CAST(priv);
753
754 old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL,
755 priv, GFP_KERNEL);
756 if (old_priv) {
757 genl_sk_priv_free(family, priv);
758 if (xa_is_err(old_priv))
759 return ERR_PTR(xa_err(old_priv));
760 /* Race happened, priv for the socket was already inserted. */
761 return old_priv;
762 }
763 return priv;
764}
765
766/**
767 * genl_register_family - register a generic netlink family
768 * @family: generic netlink family
769 *
770 * Registers the specified family after validating it first. Only one
771 * family may be registered with the same family name or identifier.
772 *
773 * The family's ops, multicast groups and module pointer must already
774 * be assigned.
775 *
776 * Return 0 on success or a negative error code.
777 */
778int genl_register_family(struct genl_family *family)
779{
780 int err, i;
781 int start = GENL_START_ALLOC, end = GENL_MAX_ID;
782
783 err = genl_validate_ops(family);
784 if (err)
785 return err;
786
787 genl_lock_all();
788
789 if (genl_family_find_byname(family->name)) {
790 err = -EEXIST;
791 goto errout_locked;
792 }
793
794 err = genl_sk_privs_alloc(family);
795 if (err)
796 goto errout_locked;
797
798 /*
799 * Sadly, a few cases need to be special-cased
800 * due to them having previously abused the API
801 * and having used their family ID also as their
802 * multicast group ID, so we use reserved IDs
803 * for both to be sure we can do that mapping.
804 */
805 if (family == &genl_ctrl) {
806 /* and this needs to be special for initial family lookups */
807 start = end = GENL_ID_CTRL;
808 } else if (strcmp(family->name, "pmcraid") == 0) {
809 start = end = GENL_ID_PMCRAID;
810 } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
811 start = end = GENL_ID_VFS_DQUOT;
812 }
813
814 family->id = idr_alloc_cyclic(&genl_fam_idr, family,
815 start, end + 1, GFP_KERNEL);
816 if (family->id < 0) {
817 err = family->id;
818 goto errout_sk_privs_free;
819 }
820
821 err = genl_validate_assign_mc_groups(family);
822 if (err)
823 goto errout_remove;
824
825 genl_unlock_all();
826
827 /* send all events */
828 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
829 for (i = 0; i < family->n_mcgrps; i++)
830 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
831 &family->mcgrps[i], family->mcgrp_offset + i);
832
833 return 0;
834
835errout_remove:
836 idr_remove(&genl_fam_idr, family->id);
837errout_sk_privs_free:
838 genl_sk_privs_free(family);
839errout_locked:
840 genl_unlock_all();
841 return err;
842}
843EXPORT_SYMBOL(genl_register_family);
844
845/**
846 * genl_unregister_family - unregister generic netlink family
847 * @family: generic netlink family
848 *
849 * Unregisters the specified family.
850 *
851 * Returns 0 on success or a negative error code.
852 */
853int genl_unregister_family(const struct genl_family *family)
854{
855 genl_lock_all();
856
857 if (!genl_family_find_byid(family->id)) {
858 genl_unlock_all();
859 return -ENOENT;
860 }
861
862 genl_unregister_mc_groups(family);
863
864 idr_remove(&genl_fam_idr, family->id);
865
866 up_write(&cb_lock);
867 wait_event(genl_sk_destructing_waitq,
868 atomic_read(&genl_sk_destructing_cnt) == 0);
869
870 genl_sk_privs_free(family);
871
872 genl_unlock();
873
874 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
875
876 return 0;
877}
878EXPORT_SYMBOL(genl_unregister_family);
879
880/**
881 * genlmsg_put - Add generic netlink header to netlink message
882 * @skb: socket buffer holding the message
883 * @portid: netlink portid the message is addressed to
884 * @seq: sequence number (usually the one of the sender)
885 * @family: generic netlink family
886 * @flags: netlink message flags
887 * @cmd: generic netlink command
888 *
889 * Returns pointer to user specific header
890 */
891void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
892 const struct genl_family *family, int flags, u8 cmd)
893{
894 struct nlmsghdr *nlh;
895 struct genlmsghdr *hdr;
896
897 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
898 family->hdrsize, flags);
899 if (nlh == NULL)
900 return NULL;
901
902 hdr = nlmsg_data(nlh);
903 hdr->cmd = cmd;
904 hdr->version = family->version;
905 hdr->reserved = 0;
906
907 return (char *) hdr + GENL_HDRLEN;
908}
909EXPORT_SYMBOL(genlmsg_put);
910
911static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
912{
913 return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
914}
915
916static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
917{
918 kfree(info);
919}
920
921static struct nlattr **
922genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
923 struct nlmsghdr *nlh,
924 struct netlink_ext_ack *extack,
925 const struct genl_split_ops *ops,
926 int hdrlen,
927 enum genl_validate_flags no_strict_flag)
928{
929 enum netlink_validation validate = ops->validate & no_strict_flag ?
930 NL_VALIDATE_LIBERAL :
931 NL_VALIDATE_STRICT;
932 struct nlattr **attrbuf;
933 int err;
934
935 if (!ops->maxattr)
936 return NULL;
937
938 attrbuf = kmalloc_array(ops->maxattr + 1,
939 sizeof(struct nlattr *), GFP_KERNEL);
940 if (!attrbuf)
941 return ERR_PTR(-ENOMEM);
942
943 err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
944 validate, extack);
945 if (err) {
946 kfree(attrbuf);
947 return ERR_PTR(err);
948 }
949 return attrbuf;
950}
951
952static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
953{
954 kfree(attrbuf);
955}
956
957struct genl_start_context {
958 const struct genl_family *family;
959 struct nlmsghdr *nlh;
960 struct netlink_ext_ack *extack;
961 const struct genl_split_ops *ops;
962 int hdrlen;
963};
964
965static int genl_start(struct netlink_callback *cb)
966{
967 struct genl_start_context *ctx = cb->data;
968 const struct genl_split_ops *ops;
969 struct genl_dumpit_info *info;
970 struct nlattr **attrs = NULL;
971 int rc = 0;
972
973 ops = ctx->ops;
974 if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
975 ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
976 return -EINVAL;
977
978 attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
979 ops, ctx->hdrlen,
980 GENL_DONT_VALIDATE_DUMP_STRICT);
981 if (IS_ERR(attrs))
982 return PTR_ERR(attrs);
983
984 info = genl_dumpit_info_alloc();
985 if (!info) {
986 genl_family_rcv_msg_attrs_free(attrs);
987 return -ENOMEM;
988 }
989 info->op = *ops;
990 info->info.family = ctx->family;
991 info->info.snd_seq = cb->nlh->nlmsg_seq;
992 info->info.snd_portid = NETLINK_CB(cb->skb).portid;
993 info->info.nlhdr = cb->nlh;
994 info->info.genlhdr = nlmsg_data(cb->nlh);
995 info->info.attrs = attrs;
996 genl_info_net_set(&info->info, sock_net(cb->skb->sk));
997 info->info.extack = cb->extack;
998 memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr));
999
1000 cb->data = info;
1001 if (ops->start) {
1002 genl_op_lock(ctx->family);
1003 rc = ops->start(cb);
1004 genl_op_unlock(ctx->family);
1005 }
1006
1007 if (rc) {
1008 genl_family_rcv_msg_attrs_free(info->info.attrs);
1009 genl_dumpit_info_free(info);
1010 cb->data = NULL;
1011 }
1012 return rc;
1013}
1014
1015static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1016{
1017 struct genl_dumpit_info *dump_info = cb->data;
1018 const struct genl_split_ops *ops = &dump_info->op;
1019 struct genl_info *info = &dump_info->info;
1020 int rc;
1021
1022 info->extack = cb->extack;
1023
1024 genl_op_lock(info->family);
1025 rc = ops->dumpit(skb, cb);
1026 genl_op_unlock(info->family);
1027 return rc;
1028}
1029
1030static int genl_done(struct netlink_callback *cb)
1031{
1032 struct genl_dumpit_info *dump_info = cb->data;
1033 const struct genl_split_ops *ops = &dump_info->op;
1034 struct genl_info *info = &dump_info->info;
1035 int rc = 0;
1036
1037 info->extack = cb->extack;
1038
1039 if (ops->done) {
1040 genl_op_lock(info->family);
1041 rc = ops->done(cb);
1042 genl_op_unlock(info->family);
1043 }
1044 genl_family_rcv_msg_attrs_free(info->attrs);
1045 genl_dumpit_info_free(dump_info);
1046 return rc;
1047}
1048
1049static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
1050 struct sk_buff *skb,
1051 struct nlmsghdr *nlh,
1052 struct netlink_ext_ack *extack,
1053 const struct genl_split_ops *ops,
1054 int hdrlen, struct net *net)
1055{
1056 struct genl_start_context ctx;
1057 struct netlink_dump_control c = {
1058 .module = family->module,
1059 .data = &ctx,
1060 .start = genl_start,
1061 .dump = genl_dumpit,
1062 .done = genl_done,
1063 .extack = extack,
1064 };
1065 int err;
1066
1067 ctx.family = family;
1068 ctx.nlh = nlh;
1069 ctx.extack = extack;
1070 ctx.ops = ops;
1071 ctx.hdrlen = hdrlen;
1072
1073 genl_op_unlock(family);
1074 err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1075 genl_op_lock(family);
1076
1077 return err;
1078}
1079
1080static int genl_family_rcv_msg_doit(const struct genl_family *family,
1081 struct sk_buff *skb,
1082 struct nlmsghdr *nlh,
1083 struct netlink_ext_ack *extack,
1084 const struct genl_split_ops *ops,
1085 int hdrlen, struct net *net)
1086{
1087 struct nlattr **attrbuf;
1088 struct genl_info info;
1089 int err;
1090
1091 attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
1092 ops, hdrlen,
1093 GENL_DONT_VALIDATE_STRICT);
1094 if (IS_ERR(attrbuf))
1095 return PTR_ERR(attrbuf);
1096
1097 info.snd_seq = nlh->nlmsg_seq;
1098 info.snd_portid = NETLINK_CB(skb).portid;
1099 info.family = family;
1100 info.nlhdr = nlh;
1101 info.genlhdr = nlmsg_data(nlh);
1102 info.attrs = attrbuf;
1103 info.extack = extack;
1104 genl_info_net_set(&info, net);
1105 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
1106
1107 if (ops->pre_doit) {
1108 err = ops->pre_doit(ops, skb, &info);
1109 if (err)
1110 goto out;
1111 }
1112
1113 err = ops->doit(skb, &info);
1114
1115 if (ops->post_doit)
1116 ops->post_doit(ops, skb, &info);
1117
1118out:
1119 genl_family_rcv_msg_attrs_free(attrbuf);
1120
1121 return err;
1122}
1123
1124static int genl_header_check(const struct genl_family *family,
1125 struct nlmsghdr *nlh, struct genlmsghdr *hdr,
1126 struct netlink_ext_ack *extack)
1127{
1128 u16 flags;
1129
1130 /* Only for commands added after we started validating */
1131 if (hdr->cmd < family->resv_start_op)
1132 return 0;
1133
1134 if (hdr->reserved) {
1135 NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
1136 return -EINVAL;
1137 }
1138
1139 /* Old netlink flags have pretty loose semantics, allow only the flags
1140 * consumed by the core where we can enforce the meaning.
1141 */
1142 flags = nlh->nlmsg_flags;
1143 if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
1144 flags &= ~NLM_F_DUMP;
1145 if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
1146 NL_SET_ERR_MSG(extack,
1147 "ambiguous or reserved bits set in nlmsg_flags");
1148 return -EINVAL;
1149 }
1150
1151 return 0;
1152}
1153
1154static int genl_family_rcv_msg(const struct genl_family *family,
1155 struct sk_buff *skb,
1156 struct nlmsghdr *nlh,
1157 struct netlink_ext_ack *extack)
1158{
1159 struct net *net = sock_net(skb->sk);
1160 struct genlmsghdr *hdr = nlmsg_data(nlh);
1161 struct genl_split_ops op;
1162 int hdrlen;
1163 u8 flags;
1164
1165 /* this family doesn't exist in this netns */
1166 if (!family->netnsok && !net_eq(net, &init_net))
1167 return -ENOENT;
1168
1169 hdrlen = GENL_HDRLEN + family->hdrsize;
1170 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
1171 return -EINVAL;
1172
1173 if (genl_header_check(family, nlh, hdr, extack))
1174 return -EINVAL;
1175
1176 flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
1177 GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
1178 if (genl_get_cmd(hdr->cmd, flags, family, &op))
1179 return -EOPNOTSUPP;
1180
1181 if ((op.flags & GENL_ADMIN_PERM) &&
1182 !netlink_capable(skb, CAP_NET_ADMIN))
1183 return -EPERM;
1184
1185 if ((op.flags & GENL_UNS_ADMIN_PERM) &&
1186 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1187 return -EPERM;
1188
1189 if (flags & GENL_CMD_CAP_DUMP)
1190 return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
1191 &op, hdrlen, net);
1192 else
1193 return genl_family_rcv_msg_doit(family, skb, nlh, extack,
1194 &op, hdrlen, net);
1195}
1196
1197static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
1198 struct netlink_ext_ack *extack)
1199{
1200 const struct genl_family *family;
1201 int err;
1202
1203 family = genl_family_find_byid(nlh->nlmsg_type);
1204 if (family == NULL)
1205 return -ENOENT;
1206
1207 genl_op_lock(family);
1208 err = genl_family_rcv_msg(family, skb, nlh, extack);
1209 genl_op_unlock(family);
1210
1211 return err;
1212}
1213
1214static void genl_rcv(struct sk_buff *skb)
1215{
1216 down_read(&cb_lock);
1217 netlink_rcv_skb(skb, &genl_rcv_msg);
1218 up_read(&cb_lock);
1219}
1220
1221/**************************************************************************
1222 * Controller
1223 **************************************************************************/
1224
1225static struct genl_family genl_ctrl;
1226
1227static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
1228 u32 flags, struct sk_buff *skb, u8 cmd)
1229{
1230 struct genl_op_iter i;
1231 void *hdr;
1232
1233 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1234 if (hdr == NULL)
1235 return -EMSGSIZE;
1236
1237 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1238 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
1239 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
1240 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
1241 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
1242 goto nla_put_failure;
1243
1244 if (genl_op_iter_init(family, &i)) {
1245 struct nlattr *nla_ops;
1246
1247 nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
1248 if (nla_ops == NULL)
1249 goto nla_put_failure;
1250
1251 while (genl_op_iter_next(&i)) {
1252 struct nlattr *nest;
1253 u32 op_flags;
1254
1255 op_flags = i.flags;
1256 if (i.doit.policy || i.dumpit.policy)
1257 op_flags |= GENL_CMD_CAP_HASPOL;
1258
1259 nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
1260 if (nest == NULL)
1261 goto nla_put_failure;
1262
1263 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
1264 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
1265 goto nla_put_failure;
1266
1267 nla_nest_end(skb, nest);
1268 }
1269
1270 nla_nest_end(skb, nla_ops);
1271 }
1272
1273 if (family->n_mcgrps) {
1274 struct nlattr *nla_grps;
1275 int i;
1276
1277 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1278 if (nla_grps == NULL)
1279 goto nla_put_failure;
1280
1281 for (i = 0; i < family->n_mcgrps; i++) {
1282 struct nlattr *nest;
1283 const struct genl_multicast_group *grp;
1284
1285 grp = &family->mcgrps[i];
1286
1287 nest = nla_nest_start_noflag(skb, i + 1);
1288 if (nest == NULL)
1289 goto nla_put_failure;
1290
1291 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
1292 family->mcgrp_offset + i) ||
1293 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1294 grp->name))
1295 goto nla_put_failure;
1296
1297 nla_nest_end(skb, nest);
1298 }
1299 nla_nest_end(skb, nla_grps);
1300 }
1301
1302 genlmsg_end(skb, hdr);
1303 return 0;
1304
1305nla_put_failure:
1306 genlmsg_cancel(skb, hdr);
1307 return -EMSGSIZE;
1308}
1309
1310static int ctrl_fill_mcgrp_info(const struct genl_family *family,
1311 const struct genl_multicast_group *grp,
1312 int grp_id, u32 portid, u32 seq, u32 flags,
1313 struct sk_buff *skb, u8 cmd)
1314{
1315 void *hdr;
1316 struct nlattr *nla_grps;
1317 struct nlattr *nest;
1318
1319 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1320 if (hdr == NULL)
1321 return -1;
1322
1323 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1324 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
1325 goto nla_put_failure;
1326
1327 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1328 if (nla_grps == NULL)
1329 goto nla_put_failure;
1330
1331 nest = nla_nest_start_noflag(skb, 1);
1332 if (nest == NULL)
1333 goto nla_put_failure;
1334
1335 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
1336 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1337 grp->name))
1338 goto nla_put_failure;
1339
1340 nla_nest_end(skb, nest);
1341 nla_nest_end(skb, nla_grps);
1342
1343 genlmsg_end(skb, hdr);
1344 return 0;
1345
1346nla_put_failure:
1347 genlmsg_cancel(skb, hdr);
1348 return -EMSGSIZE;
1349}
1350
1351static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
1352{
1353 int n = 0;
1354 struct genl_family *rt;
1355 struct net *net = sock_net(skb->sk);
1356 int fams_to_skip = cb->args[0];
1357 unsigned int id;
1358 int err = 0;
1359
1360 idr_for_each_entry(&genl_fam_idr, rt, id) {
1361 if (!rt->netnsok && !net_eq(net, &init_net))
1362 continue;
1363
1364 if (n++ < fams_to_skip)
1365 continue;
1366
1367 err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
1368 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1369 skb, CTRL_CMD_NEWFAMILY);
1370 if (err) {
1371 n--;
1372 break;
1373 }
1374 }
1375
1376 cb->args[0] = n;
1377 return err;
1378}
1379
1380static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
1381 u32 portid, int seq, u8 cmd)
1382{
1383 struct sk_buff *skb;
1384 int err;
1385
1386 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1387 if (skb == NULL)
1388 return ERR_PTR(-ENOBUFS);
1389
1390 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
1391 if (err < 0) {
1392 nlmsg_free(skb);
1393 return ERR_PTR(err);
1394 }
1395
1396 return skb;
1397}
1398
1399static struct sk_buff *
1400ctrl_build_mcgrp_msg(const struct genl_family *family,
1401 const struct genl_multicast_group *grp,
1402 int grp_id, u32 portid, int seq, u8 cmd)
1403{
1404 struct sk_buff *skb;
1405 int err;
1406
1407 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1408 if (skb == NULL)
1409 return ERR_PTR(-ENOBUFS);
1410
1411 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
1412 seq, 0, skb, cmd);
1413 if (err < 0) {
1414 nlmsg_free(skb);
1415 return ERR_PTR(err);
1416 }
1417
1418 return skb;
1419}
1420
1421static const struct nla_policy ctrl_policy_family[] = {
1422 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
1423 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
1424 .len = GENL_NAMSIZ - 1 },
1425};
1426
1427static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
1428{
1429 struct sk_buff *msg;
1430 const struct genl_family *res = NULL;
1431 int err = -EINVAL;
1432
1433 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
1434 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
1435 res = genl_family_find_byid(id);
1436 err = -ENOENT;
1437 }
1438
1439 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
1440 char *name;
1441
1442 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
1443 res = genl_family_find_byname(name);
1444#ifdef CONFIG_MODULES
1445 if (res == NULL) {
1446 genl_unlock();
1447 up_read(&cb_lock);
1448 request_module("net-pf-%d-proto-%d-family-%s",
1449 PF_NETLINK, NETLINK_GENERIC, name);
1450 down_read(&cb_lock);
1451 genl_lock();
1452 res = genl_family_find_byname(name);
1453 }
1454#endif
1455 err = -ENOENT;
1456 }
1457
1458 if (res == NULL)
1459 return err;
1460
1461 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
1462 /* family doesn't exist here */
1463 return -ENOENT;
1464 }
1465
1466 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
1467 CTRL_CMD_NEWFAMILY);
1468 if (IS_ERR(msg))
1469 return PTR_ERR(msg);
1470
1471 return genlmsg_reply(msg, info);
1472}
1473
1474static int genl_ctrl_event(int event, const struct genl_family *family,
1475 const struct genl_multicast_group *grp,
1476 int grp_id)
1477{
1478 struct sk_buff *msg;
1479
1480 /* genl is still initialising */
1481 if (!init_net.genl_sock)
1482 return 0;
1483
1484 switch (event) {
1485 case CTRL_CMD_NEWFAMILY:
1486 case CTRL_CMD_DELFAMILY:
1487 WARN_ON(grp);
1488 msg = ctrl_build_family_msg(family, 0, 0, event);
1489 break;
1490 case CTRL_CMD_NEWMCAST_GRP:
1491 case CTRL_CMD_DELMCAST_GRP:
1492 BUG_ON(!grp);
1493 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
1494 break;
1495 default:
1496 return -EINVAL;
1497 }
1498
1499 if (IS_ERR(msg))
1500 return PTR_ERR(msg);
1501
1502 if (!family->netnsok) {
1503 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
1504 0, GFP_KERNEL);
1505 } else {
1506 rcu_read_lock();
1507 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
1508 0, GFP_ATOMIC);
1509 rcu_read_unlock();
1510 }
1511
1512 return 0;
1513}
1514
1515struct ctrl_dump_policy_ctx {
1516 struct netlink_policy_dump_state *state;
1517 const struct genl_family *rt;
1518 struct genl_op_iter *op_iter;
1519 u32 op;
1520 u16 fam_id;
1521 u8 dump_map:1,
1522 single_op:1;
1523};
1524
1525static const struct nla_policy ctrl_policy_policy[] = {
1526 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
1527 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
1528 .len = GENL_NAMSIZ - 1 },
1529 [CTRL_ATTR_OP] = { .type = NLA_U32 },
1530};
1531
1532static int ctrl_dumppolicy_start(struct netlink_callback *cb)
1533{
1534 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
1535 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1536 struct nlattr **tb = info->info.attrs;
1537 const struct genl_family *rt;
1538 struct genl_op_iter i;
1539 int err;
1540
1541 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
1542
1543 if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
1544 return -EINVAL;
1545
1546 if (tb[CTRL_ATTR_FAMILY_ID]) {
1547 ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
1548 } else {
1549 rt = genl_family_find_byname(
1550 nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
1551 if (!rt)
1552 return -ENOENT;
1553 ctx->fam_id = rt->id;
1554 }
1555
1556 rt = genl_family_find_byid(ctx->fam_id);
1557 if (!rt)
1558 return -ENOENT;
1559
1560 ctx->rt = rt;
1561
1562 if (tb[CTRL_ATTR_OP]) {
1563 struct genl_split_ops doit, dump;
1564
1565 ctx->single_op = true;
1566 ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
1567
1568 err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
1569 if (err) {
1570 NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
1571 return err;
1572 }
1573
1574 if (doit.policy) {
1575 err = netlink_policy_dump_add_policy(&ctx->state,
1576 doit.policy,
1577 doit.maxattr);
1578 if (err)
1579 goto err_free_state;
1580 }
1581 if (dump.policy) {
1582 err = netlink_policy_dump_add_policy(&ctx->state,
1583 dump.policy,
1584 dump.maxattr);
1585 if (err)
1586 goto err_free_state;
1587 }
1588
1589 if (!ctx->state)
1590 return -ENODATA;
1591
1592 ctx->dump_map = 1;
1593 return 0;
1594 }
1595
1596 ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
1597 if (!ctx->op_iter)
1598 return -ENOMEM;
1599
1600 genl_op_iter_init(rt, ctx->op_iter);
1601 ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1602
1603 for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
1604 if (i.doit.policy) {
1605 err = netlink_policy_dump_add_policy(&ctx->state,
1606 i.doit.policy,
1607 i.doit.maxattr);
1608 if (err)
1609 goto err_free_state;
1610 }
1611 if (i.dumpit.policy) {
1612 err = netlink_policy_dump_add_policy(&ctx->state,
1613 i.dumpit.policy,
1614 i.dumpit.maxattr);
1615 if (err)
1616 goto err_free_state;
1617 }
1618 }
1619
1620 if (!ctx->state) {
1621 err = -ENODATA;
1622 goto err_free_op_iter;
1623 }
1624 return 0;
1625
1626err_free_state:
1627 netlink_policy_dump_free(ctx->state);
1628err_free_op_iter:
1629 kfree(ctx->op_iter);
1630 return err;
1631}
1632
1633static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
1634 struct netlink_callback *cb)
1635{
1636 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1637 void *hdr;
1638
1639 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1640 cb->nlh->nlmsg_seq, &genl_ctrl,
1641 NLM_F_MULTI, CTRL_CMD_GETPOLICY);
1642 if (!hdr)
1643 return NULL;
1644
1645 if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
1646 return NULL;
1647
1648 return hdr;
1649}
1650
1651static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
1652 struct netlink_callback *cb,
1653 struct genl_split_ops *doit,
1654 struct genl_split_ops *dumpit)
1655{
1656 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1657 struct nlattr *nest_pol, *nest_op;
1658 void *hdr;
1659 int idx;
1660
1661 /* skip if we have nothing to show */
1662 if (!doit->policy && !dumpit->policy)
1663 return 0;
1664
1665 hdr = ctrl_dumppolicy_prep(skb, cb);
1666 if (!hdr)
1667 return -ENOBUFS;
1668
1669 nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
1670 if (!nest_pol)
1671 goto err;
1672
1673 nest_op = nla_nest_start(skb, doit->cmd);
1674 if (!nest_op)
1675 goto err;
1676
1677 if (doit->policy) {
1678 idx = netlink_policy_dump_get_policy_idx(ctx->state,
1679 doit->policy,
1680 doit->maxattr);
1681
1682 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
1683 goto err;
1684 }
1685 if (dumpit->policy) {
1686 idx = netlink_policy_dump_get_policy_idx(ctx->state,
1687 dumpit->policy,
1688 dumpit->maxattr);
1689
1690 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
1691 goto err;
1692 }
1693
1694 nla_nest_end(skb, nest_op);
1695 nla_nest_end(skb, nest_pol);
1696 genlmsg_end(skb, hdr);
1697
1698 return 0;
1699err:
1700 genlmsg_cancel(skb, hdr);
1701 return -ENOBUFS;
1702}
1703
1704static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
1705{
1706 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1707 void *hdr;
1708
1709 if (ctx->dump_map) {
1710 if (ctx->single_op) {
1711 struct genl_split_ops doit, dumpit;
1712
1713 if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
1714 &doit, &dumpit)))
1715 return -ENOENT;
1716
1717 if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
1718 return skb->len;
1719
1720 /* done with the per-op policy index list */
1721 ctx->dump_map = 0;
1722 }
1723
1724 while (ctx->dump_map) {
1725 if (ctrl_dumppolicy_put_op(skb, cb,
1726 &ctx->op_iter->doit,
1727 &ctx->op_iter->dumpit))
1728 return skb->len;
1729
1730 ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1731 }
1732 }
1733
1734 while (netlink_policy_dump_loop(ctx->state)) {
1735 struct nlattr *nest;
1736
1737 hdr = ctrl_dumppolicy_prep(skb, cb);
1738 if (!hdr)
1739 goto nla_put_failure;
1740
1741 nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
1742 if (!nest)
1743 goto nla_put_failure;
1744
1745 if (netlink_policy_dump_write(skb, ctx->state))
1746 goto nla_put_failure;
1747
1748 nla_nest_end(skb, nest);
1749
1750 genlmsg_end(skb, hdr);
1751 }
1752
1753 return skb->len;
1754
1755nla_put_failure:
1756 genlmsg_cancel(skb, hdr);
1757 return skb->len;
1758}
1759
1760static int ctrl_dumppolicy_done(struct netlink_callback *cb)
1761{
1762 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1763
1764 kfree(ctx->op_iter);
1765 netlink_policy_dump_free(ctx->state);
1766 return 0;
1767}
1768
1769static const struct genl_split_ops genl_ctrl_ops[] = {
1770 {
1771 .cmd = CTRL_CMD_GETFAMILY,
1772 .validate = GENL_DONT_VALIDATE_STRICT,
1773 .policy = ctrl_policy_family,
1774 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
1775 .doit = ctrl_getfamily,
1776 .flags = GENL_CMD_CAP_DO,
1777 },
1778 {
1779 .cmd = CTRL_CMD_GETFAMILY,
1780 .validate = GENL_DONT_VALIDATE_DUMP,
1781 .policy = ctrl_policy_family,
1782 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
1783 .dumpit = ctrl_dumpfamily,
1784 .flags = GENL_CMD_CAP_DUMP,
1785 },
1786 {
1787 .cmd = CTRL_CMD_GETPOLICY,
1788 .policy = ctrl_policy_policy,
1789 .maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1,
1790 .start = ctrl_dumppolicy_start,
1791 .dumpit = ctrl_dumppolicy,
1792 .done = ctrl_dumppolicy_done,
1793 .flags = GENL_CMD_CAP_DUMP,
1794 },
1795};
1796
1797static const struct genl_multicast_group genl_ctrl_groups[] = {
1798 { .name = "notify", },
1799};
1800
1801static struct genl_family genl_ctrl __ro_after_init = {
1802 .module = THIS_MODULE,
1803 .split_ops = genl_ctrl_ops,
1804 .n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
1805 .resv_start_op = CTRL_CMD_GETPOLICY + 1,
1806 .mcgrps = genl_ctrl_groups,
1807 .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
1808 .id = GENL_ID_CTRL,
1809 .name = "nlctrl",
1810 .version = 0x2,
1811 .netnsok = true,
1812};
1813
1814static int genl_bind(struct net *net, int group)
1815{
1816 const struct genl_family *family;
1817 unsigned int id;
1818 int ret = 0;
1819
1820 down_read(&cb_lock);
1821
1822 idr_for_each_entry(&genl_fam_idr, family, id) {
1823 const struct genl_multicast_group *grp;
1824 int i;
1825
1826 if (family->n_mcgrps == 0)
1827 continue;
1828
1829 i = group - family->mcgrp_offset;
1830 if (i < 0 || i >= family->n_mcgrps)
1831 continue;
1832
1833 grp = &family->mcgrps[i];
1834 if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) &&
1835 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1836 ret = -EPERM;
1837 if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) &&
1838 !ns_capable(net->user_ns, CAP_SYS_ADMIN))
1839 ret = -EPERM;
1840
1841 if (family->bind)
1842 family->bind(i);
1843
1844 break;
1845 }
1846
1847 up_read(&cb_lock);
1848 return ret;
1849}
1850
1851static void genl_unbind(struct net *net, int group)
1852{
1853 const struct genl_family *family;
1854 unsigned int id;
1855
1856 down_read(&cb_lock);
1857
1858 idr_for_each_entry(&genl_fam_idr, family, id) {
1859 int i;
1860
1861 if (family->n_mcgrps == 0)
1862 continue;
1863
1864 i = group - family->mcgrp_offset;
1865 if (i < 0 || i >= family->n_mcgrps)
1866 continue;
1867
1868 if (family->unbind)
1869 family->unbind(i);
1870
1871 break;
1872 }
1873
1874 up_read(&cb_lock);
1875}
1876
1877static int __net_init genl_pernet_init(struct net *net)
1878{
1879 struct netlink_kernel_cfg cfg = {
1880 .input = genl_rcv,
1881 .flags = NL_CFG_F_NONROOT_RECV,
1882 .bind = genl_bind,
1883 .unbind = genl_unbind,
1884 .release = genl_release,
1885 };
1886
1887 /* we'll bump the group number right afterwards */
1888 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1889
1890 if (!net->genl_sock && net_eq(net, &init_net))
1891 panic("GENL: Cannot initialize generic netlink\n");
1892
1893 if (!net->genl_sock)
1894 return -ENOMEM;
1895
1896 return 0;
1897}
1898
1899static void __net_exit genl_pernet_exit(struct net *net)
1900{
1901 netlink_kernel_release(net->genl_sock);
1902 net->genl_sock = NULL;
1903}
1904
1905static struct pernet_operations genl_pernet_ops = {
1906 .init = genl_pernet_init,
1907 .exit = genl_pernet_exit,
1908};
1909
1910static int __init genl_init(void)
1911{
1912 int err;
1913
1914 err = genl_register_family(&genl_ctrl);
1915 if (err < 0)
1916 goto problem;
1917
1918 err = register_pernet_subsys(&genl_pernet_ops);
1919 if (err)
1920 goto problem;
1921
1922 return 0;
1923
1924problem:
1925 panic("GENL: Cannot register controller: %d\n", err);
1926}
1927
1928core_initcall(genl_init);
1929
1930static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1931 gfp_t flags)
1932{
1933 struct sk_buff *tmp;
1934 struct net *net, *prev = NULL;
1935 bool delivered = false;
1936 int err;
1937
1938 for_each_net_rcu(net) {
1939 if (prev) {
1940 tmp = skb_clone(skb, flags);
1941 if (!tmp) {
1942 err = -ENOMEM;
1943 goto error;
1944 }
1945 err = nlmsg_multicast(prev->genl_sock, tmp,
1946 portid, group, flags);
1947 if (!err)
1948 delivered = true;
1949 else if (err != -ESRCH)
1950 goto error;
1951 }
1952
1953 prev = net;
1954 }
1955
1956 err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1957 if (!err)
1958 delivered = true;
1959 else if (err != -ESRCH)
1960 return err;
1961 return delivered ? 0 : -ESRCH;
1962 error:
1963 kfree_skb(skb);
1964 return err;
1965}
1966
1967int genlmsg_multicast_allns(const struct genl_family *family,
1968 struct sk_buff *skb, u32 portid,
1969 unsigned int group, gfp_t flags)
1970{
1971 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1972 return -EINVAL;
1973
1974 group = family->mcgrp_offset + group;
1975 return genlmsg_mcast(skb, portid, group, flags);
1976}
1977EXPORT_SYMBOL(genlmsg_multicast_allns);
1978
1979void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1980 struct genl_info *info, u32 group, gfp_t flags)
1981{
1982 struct net *net = genl_info_net(info);
1983 struct sock *sk = net->genl_sock;
1984
1985 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1986 return;
1987
1988 group = family->mcgrp_offset + group;
1989 nlmsg_notify(sk, skb, info->snd_portid, group,
1990 nlmsg_report(info->nlhdr), flags);
1991}
1992EXPORT_SYMBOL(genl_notify);