Loading...
1/*
2 * NETLINK Generic Netlink Family
3 *
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/socket.h>
15#include <linux/string.h>
16#include <linux/skbuff.h>
17#include <linux/mutex.h>
18#include <linux/bitmap.h>
19#include <linux/rwsem.h>
20#include <linux/idr.h>
21#include <net/sock.h>
22#include <net/genetlink.h>
23
24static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
25static DECLARE_RWSEM(cb_lock);
26
27atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
28DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
29
30void genl_lock(void)
31{
32 mutex_lock(&genl_mutex);
33}
34EXPORT_SYMBOL(genl_lock);
35
36void genl_unlock(void)
37{
38 mutex_unlock(&genl_mutex);
39}
40EXPORT_SYMBOL(genl_unlock);
41
42#ifdef CONFIG_LOCKDEP
43bool lockdep_genl_is_held(void)
44{
45 return lockdep_is_held(&genl_mutex);
46}
47EXPORT_SYMBOL(lockdep_genl_is_held);
48#endif
49
50static void genl_lock_all(void)
51{
52 down_write(&cb_lock);
53 genl_lock();
54}
55
56static void genl_unlock_all(void)
57{
58 genl_unlock();
59 up_write(&cb_lock);
60}
61
62static DEFINE_IDR(genl_fam_idr);
63
64/*
65 * Bitmap of multicast groups that are currently in use.
66 *
67 * To avoid an allocation at boot of just one unsigned long,
68 * declare it global instead.
69 * Bit 0 is marked as already used since group 0 is invalid.
70 * Bit 1 is marked as already used since the drop-monitor code
71 * abuses the API and thinks it can statically use group 1.
72 * That group will typically conflict with other groups that
73 * any proper users use.
74 * Bit 16 is marked as used since it's used for generic netlink
75 * and the code no longer marks pre-reserved IDs as used.
76 * Bit 17 is marked as already used since the VFS quota code
77 * also abused this API and relied on family == group ID, we
78 * cater to that by giving it a static family and group ID.
79 * Bit 18 is marked as already used since the PMCRAID driver
80 * did the same thing as the VFS quota code (maybe copied?)
81 */
82static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
83 BIT(GENL_ID_VFS_DQUOT) |
84 BIT(GENL_ID_PMCRAID);
85static unsigned long *mc_groups = &mc_group_start;
86static unsigned long mc_groups_longs = 1;
87
88static int genl_ctrl_event(int event, const struct genl_family *family,
89 const struct genl_multicast_group *grp,
90 int grp_id);
91
92static const struct genl_family *genl_family_find_byid(unsigned int id)
93{
94 return idr_find(&genl_fam_idr, id);
95}
96
97static const struct genl_family *genl_family_find_byname(char *name)
98{
99 const struct genl_family *family;
100 unsigned int id;
101
102 idr_for_each_entry(&genl_fam_idr, family, id)
103 if (strcmp(family->name, name) == 0)
104 return family;
105
106 return NULL;
107}
108
109static const struct genl_ops *genl_get_cmd(u8 cmd,
110 const struct genl_family *family)
111{
112 int i;
113
114 for (i = 0; i < family->n_ops; i++)
115 if (family->ops[i].cmd == cmd)
116 return &family->ops[i];
117
118 return NULL;
119}
120
121static int genl_allocate_reserve_groups(int n_groups, int *first_id)
122{
123 unsigned long *new_groups;
124 int start = 0;
125 int i;
126 int id;
127 bool fits;
128
129 do {
130 if (start == 0)
131 id = find_first_zero_bit(mc_groups,
132 mc_groups_longs *
133 BITS_PER_LONG);
134 else
135 id = find_next_zero_bit(mc_groups,
136 mc_groups_longs * BITS_PER_LONG,
137 start);
138
139 fits = true;
140 for (i = id;
141 i < min_t(int, id + n_groups,
142 mc_groups_longs * BITS_PER_LONG);
143 i++) {
144 if (test_bit(i, mc_groups)) {
145 start = i;
146 fits = false;
147 break;
148 }
149 }
150
151 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
152 unsigned long new_longs = mc_groups_longs +
153 BITS_TO_LONGS(n_groups);
154 size_t nlen = new_longs * sizeof(unsigned long);
155
156 if (mc_groups == &mc_group_start) {
157 new_groups = kzalloc(nlen, GFP_KERNEL);
158 if (!new_groups)
159 return -ENOMEM;
160 mc_groups = new_groups;
161 *mc_groups = mc_group_start;
162 } else {
163 new_groups = krealloc(mc_groups, nlen,
164 GFP_KERNEL);
165 if (!new_groups)
166 return -ENOMEM;
167 mc_groups = new_groups;
168 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
169 mc_groups[mc_groups_longs + i] = 0;
170 }
171 mc_groups_longs = new_longs;
172 }
173 } while (!fits);
174
175 for (i = id; i < id + n_groups; i++)
176 set_bit(i, mc_groups);
177 *first_id = id;
178 return 0;
179}
180
181static struct genl_family genl_ctrl;
182
183static int genl_validate_assign_mc_groups(struct genl_family *family)
184{
185 int first_id;
186 int n_groups = family->n_mcgrps;
187 int err = 0, i;
188 bool groups_allocated = false;
189
190 if (!n_groups)
191 return 0;
192
193 for (i = 0; i < n_groups; i++) {
194 const struct genl_multicast_group *grp = &family->mcgrps[i];
195
196 if (WARN_ON(grp->name[0] == '\0'))
197 return -EINVAL;
198 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
199 return -EINVAL;
200 }
201
202 /* special-case our own group and hacks */
203 if (family == &genl_ctrl) {
204 first_id = GENL_ID_CTRL;
205 BUG_ON(n_groups != 1);
206 } else if (strcmp(family->name, "NET_DM") == 0) {
207 first_id = 1;
208 BUG_ON(n_groups != 1);
209 } else if (family->id == GENL_ID_VFS_DQUOT) {
210 first_id = GENL_ID_VFS_DQUOT;
211 BUG_ON(n_groups != 1);
212 } else if (family->id == GENL_ID_PMCRAID) {
213 first_id = GENL_ID_PMCRAID;
214 BUG_ON(n_groups != 1);
215 } else {
216 groups_allocated = true;
217 err = genl_allocate_reserve_groups(n_groups, &first_id);
218 if (err)
219 return err;
220 }
221
222 family->mcgrp_offset = first_id;
223
224 /* if still initializing, can't and don't need to to realloc bitmaps */
225 if (!init_net.genl_sock)
226 return 0;
227
228 if (family->netnsok) {
229 struct net *net;
230
231 netlink_table_grab();
232 rcu_read_lock();
233 for_each_net_rcu(net) {
234 err = __netlink_change_ngroups(net->genl_sock,
235 mc_groups_longs * BITS_PER_LONG);
236 if (err) {
237 /*
238 * No need to roll back, can only fail if
239 * memory allocation fails and then the
240 * number of _possible_ groups has been
241 * increased on some sockets which is ok.
242 */
243 break;
244 }
245 }
246 rcu_read_unlock();
247 netlink_table_ungrab();
248 } else {
249 err = netlink_change_ngroups(init_net.genl_sock,
250 mc_groups_longs * BITS_PER_LONG);
251 }
252
253 if (groups_allocated && err) {
254 for (i = 0; i < family->n_mcgrps; i++)
255 clear_bit(family->mcgrp_offset + i, mc_groups);
256 }
257
258 return err;
259}
260
261static void genl_unregister_mc_groups(const struct genl_family *family)
262{
263 struct net *net;
264 int i;
265
266 netlink_table_grab();
267 rcu_read_lock();
268 for_each_net_rcu(net) {
269 for (i = 0; i < family->n_mcgrps; i++)
270 __netlink_clear_multicast_users(
271 net->genl_sock, family->mcgrp_offset + i);
272 }
273 rcu_read_unlock();
274 netlink_table_ungrab();
275
276 for (i = 0; i < family->n_mcgrps; i++) {
277 int grp_id = family->mcgrp_offset + i;
278
279 if (grp_id != 1)
280 clear_bit(grp_id, mc_groups);
281 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
282 &family->mcgrps[i], grp_id);
283 }
284}
285
286static int genl_validate_ops(const struct genl_family *family)
287{
288 const struct genl_ops *ops = family->ops;
289 unsigned int n_ops = family->n_ops;
290 int i, j;
291
292 if (WARN_ON(n_ops && !ops))
293 return -EINVAL;
294
295 if (!n_ops)
296 return 0;
297
298 for (i = 0; i < n_ops; i++) {
299 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
300 return -EINVAL;
301 for (j = i + 1; j < n_ops; j++)
302 if (ops[i].cmd == ops[j].cmd)
303 return -EINVAL;
304 }
305
306 return 0;
307}
308
309/**
310 * genl_register_family - register a generic netlink family
311 * @family: generic netlink family
312 *
313 * Registers the specified family after validating it first. Only one
314 * family may be registered with the same family name or identifier.
315 *
316 * The family's ops, multicast groups and module pointer must already
317 * be assigned.
318 *
319 * Return 0 on success or a negative error code.
320 */
321int genl_register_family(struct genl_family *family)
322{
323 int err, i;
324 int start = GENL_START_ALLOC, end = GENL_MAX_ID;
325
326 err = genl_validate_ops(family);
327 if (err)
328 return err;
329
330 genl_lock_all();
331
332 if (genl_family_find_byname(family->name)) {
333 err = -EEXIST;
334 goto errout_locked;
335 }
336
337 /*
338 * Sadly, a few cases need to be special-cased
339 * due to them having previously abused the API
340 * and having used their family ID also as their
341 * multicast group ID, so we use reserved IDs
342 * for both to be sure we can do that mapping.
343 */
344 if (family == &genl_ctrl) {
345 /* and this needs to be special for initial family lookups */
346 start = end = GENL_ID_CTRL;
347 } else if (strcmp(family->name, "pmcraid") == 0) {
348 start = end = GENL_ID_PMCRAID;
349 } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
350 start = end = GENL_ID_VFS_DQUOT;
351 }
352
353 if (family->maxattr && !family->parallel_ops) {
354 family->attrbuf = kmalloc((family->maxattr+1) *
355 sizeof(struct nlattr *), GFP_KERNEL);
356 if (family->attrbuf == NULL) {
357 err = -ENOMEM;
358 goto errout_locked;
359 }
360 } else
361 family->attrbuf = NULL;
362
363 family->id = idr_alloc(&genl_fam_idr, family,
364 start, end + 1, GFP_KERNEL);
365 if (family->id < 0) {
366 err = family->id;
367 goto errout_locked;
368 }
369
370 err = genl_validate_assign_mc_groups(family);
371 if (err)
372 goto errout_remove;
373
374 genl_unlock_all();
375
376 /* send all events */
377 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
378 for (i = 0; i < family->n_mcgrps; i++)
379 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
380 &family->mcgrps[i], family->mcgrp_offset + i);
381
382 return 0;
383
384errout_remove:
385 idr_remove(&genl_fam_idr, family->id);
386 kfree(family->attrbuf);
387errout_locked:
388 genl_unlock_all();
389 return err;
390}
391EXPORT_SYMBOL(genl_register_family);
392
393/**
394 * genl_unregister_family - unregister generic netlink family
395 * @family: generic netlink family
396 *
397 * Unregisters the specified family.
398 *
399 * Returns 0 on success or a negative error code.
400 */
401int genl_unregister_family(const struct genl_family *family)
402{
403 genl_lock_all();
404
405 if (!genl_family_find_byid(family->id)) {
406 genl_unlock_all();
407 return -ENOENT;
408 }
409
410 genl_unregister_mc_groups(family);
411
412 idr_remove(&genl_fam_idr, family->id);
413
414 up_write(&cb_lock);
415 wait_event(genl_sk_destructing_waitq,
416 atomic_read(&genl_sk_destructing_cnt) == 0);
417 genl_unlock();
418
419 kfree(family->attrbuf);
420
421 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
422
423 return 0;
424}
425EXPORT_SYMBOL(genl_unregister_family);
426
427/**
428 * genlmsg_put - Add generic netlink header to netlink message
429 * @skb: socket buffer holding the message
430 * @portid: netlink portid the message is addressed to
431 * @seq: sequence number (usually the one of the sender)
432 * @family: generic netlink family
433 * @flags: netlink message flags
434 * @cmd: generic netlink command
435 *
436 * Returns pointer to user specific header
437 */
438void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
439 const struct genl_family *family, int flags, u8 cmd)
440{
441 struct nlmsghdr *nlh;
442 struct genlmsghdr *hdr;
443
444 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
445 family->hdrsize, flags);
446 if (nlh == NULL)
447 return NULL;
448
449 hdr = nlmsg_data(nlh);
450 hdr->cmd = cmd;
451 hdr->version = family->version;
452 hdr->reserved = 0;
453
454 return (char *) hdr + GENL_HDRLEN;
455}
456EXPORT_SYMBOL(genlmsg_put);
457
458static int genl_lock_start(struct netlink_callback *cb)
459{
460 /* our ops are always const - netlink API doesn't propagate that */
461 const struct genl_ops *ops = cb->data;
462 int rc = 0;
463
464 if (ops->start) {
465 genl_lock();
466 rc = ops->start(cb);
467 genl_unlock();
468 }
469 return rc;
470}
471
472static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
473{
474 /* our ops are always const - netlink API doesn't propagate that */
475 const struct genl_ops *ops = cb->data;
476 int rc;
477
478 genl_lock();
479 rc = ops->dumpit(skb, cb);
480 genl_unlock();
481 return rc;
482}
483
484static int genl_lock_done(struct netlink_callback *cb)
485{
486 /* our ops are always const - netlink API doesn't propagate that */
487 const struct genl_ops *ops = cb->data;
488 int rc = 0;
489
490 if (ops->done) {
491 genl_lock();
492 rc = ops->done(cb);
493 genl_unlock();
494 }
495 return rc;
496}
497
498static int genl_family_rcv_msg(const struct genl_family *family,
499 struct sk_buff *skb,
500 struct nlmsghdr *nlh)
501{
502 const struct genl_ops *ops;
503 struct net *net = sock_net(skb->sk);
504 struct genl_info info;
505 struct genlmsghdr *hdr = nlmsg_data(nlh);
506 struct nlattr **attrbuf;
507 int hdrlen, err;
508
509 /* this family doesn't exist in this netns */
510 if (!family->netnsok && !net_eq(net, &init_net))
511 return -ENOENT;
512
513 hdrlen = GENL_HDRLEN + family->hdrsize;
514 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
515 return -EINVAL;
516
517 ops = genl_get_cmd(hdr->cmd, family);
518 if (ops == NULL)
519 return -EOPNOTSUPP;
520
521 if ((ops->flags & GENL_ADMIN_PERM) &&
522 !netlink_capable(skb, CAP_NET_ADMIN))
523 return -EPERM;
524
525 if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
526 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
527 return -EPERM;
528
529 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
530 int rc;
531
532 if (ops->dumpit == NULL)
533 return -EOPNOTSUPP;
534
535 if (!family->parallel_ops) {
536 struct netlink_dump_control c = {
537 .module = family->module,
538 /* we have const, but the netlink API doesn't */
539 .data = (void *)ops,
540 .start = genl_lock_start,
541 .dump = genl_lock_dumpit,
542 .done = genl_lock_done,
543 };
544
545 genl_unlock();
546 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
547 genl_lock();
548
549 } else {
550 struct netlink_dump_control c = {
551 .module = family->module,
552 .start = ops->start,
553 .dump = ops->dumpit,
554 .done = ops->done,
555 };
556
557 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
558 }
559
560 return rc;
561 }
562
563 if (ops->doit == NULL)
564 return -EOPNOTSUPP;
565
566 if (family->maxattr && family->parallel_ops) {
567 attrbuf = kmalloc((family->maxattr+1) *
568 sizeof(struct nlattr *), GFP_KERNEL);
569 if (attrbuf == NULL)
570 return -ENOMEM;
571 } else
572 attrbuf = family->attrbuf;
573
574 if (attrbuf) {
575 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
576 ops->policy);
577 if (err < 0)
578 goto out;
579 }
580
581 info.snd_seq = nlh->nlmsg_seq;
582 info.snd_portid = NETLINK_CB(skb).portid;
583 info.nlhdr = nlh;
584 info.genlhdr = nlmsg_data(nlh);
585 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
586 info.attrs = attrbuf;
587 genl_info_net_set(&info, net);
588 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
589
590 if (family->pre_doit) {
591 err = family->pre_doit(ops, skb, &info);
592 if (err)
593 goto out;
594 }
595
596 err = ops->doit(skb, &info);
597
598 if (family->post_doit)
599 family->post_doit(ops, skb, &info);
600
601out:
602 if (family->parallel_ops)
603 kfree(attrbuf);
604
605 return err;
606}
607
608static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
609{
610 const struct genl_family *family;
611 int err;
612
613 family = genl_family_find_byid(nlh->nlmsg_type);
614 if (family == NULL)
615 return -ENOENT;
616
617 if (!family->parallel_ops)
618 genl_lock();
619
620 err = genl_family_rcv_msg(family, skb, nlh);
621
622 if (!family->parallel_ops)
623 genl_unlock();
624
625 return err;
626}
627
628static void genl_rcv(struct sk_buff *skb)
629{
630 down_read(&cb_lock);
631 netlink_rcv_skb(skb, &genl_rcv_msg);
632 up_read(&cb_lock);
633}
634
635/**************************************************************************
636 * Controller
637 **************************************************************************/
638
639static struct genl_family genl_ctrl;
640
641static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
642 u32 flags, struct sk_buff *skb, u8 cmd)
643{
644 void *hdr;
645
646 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
647 if (hdr == NULL)
648 return -1;
649
650 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
651 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
652 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
653 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
654 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
655 goto nla_put_failure;
656
657 if (family->n_ops) {
658 struct nlattr *nla_ops;
659 int i;
660
661 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
662 if (nla_ops == NULL)
663 goto nla_put_failure;
664
665 for (i = 0; i < family->n_ops; i++) {
666 struct nlattr *nest;
667 const struct genl_ops *ops = &family->ops[i];
668 u32 op_flags = ops->flags;
669
670 if (ops->dumpit)
671 op_flags |= GENL_CMD_CAP_DUMP;
672 if (ops->doit)
673 op_flags |= GENL_CMD_CAP_DO;
674 if (ops->policy)
675 op_flags |= GENL_CMD_CAP_HASPOL;
676
677 nest = nla_nest_start(skb, i + 1);
678 if (nest == NULL)
679 goto nla_put_failure;
680
681 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
682 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
683 goto nla_put_failure;
684
685 nla_nest_end(skb, nest);
686 }
687
688 nla_nest_end(skb, nla_ops);
689 }
690
691 if (family->n_mcgrps) {
692 struct nlattr *nla_grps;
693 int i;
694
695 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
696 if (nla_grps == NULL)
697 goto nla_put_failure;
698
699 for (i = 0; i < family->n_mcgrps; i++) {
700 struct nlattr *nest;
701 const struct genl_multicast_group *grp;
702
703 grp = &family->mcgrps[i];
704
705 nest = nla_nest_start(skb, i + 1);
706 if (nest == NULL)
707 goto nla_put_failure;
708
709 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
710 family->mcgrp_offset + i) ||
711 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
712 grp->name))
713 goto nla_put_failure;
714
715 nla_nest_end(skb, nest);
716 }
717 nla_nest_end(skb, nla_grps);
718 }
719
720 genlmsg_end(skb, hdr);
721 return 0;
722
723nla_put_failure:
724 genlmsg_cancel(skb, hdr);
725 return -EMSGSIZE;
726}
727
728static int ctrl_fill_mcgrp_info(const struct genl_family *family,
729 const struct genl_multicast_group *grp,
730 int grp_id, u32 portid, u32 seq, u32 flags,
731 struct sk_buff *skb, u8 cmd)
732{
733 void *hdr;
734 struct nlattr *nla_grps;
735 struct nlattr *nest;
736
737 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
738 if (hdr == NULL)
739 return -1;
740
741 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
742 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
743 goto nla_put_failure;
744
745 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
746 if (nla_grps == NULL)
747 goto nla_put_failure;
748
749 nest = nla_nest_start(skb, 1);
750 if (nest == NULL)
751 goto nla_put_failure;
752
753 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
754 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
755 grp->name))
756 goto nla_put_failure;
757
758 nla_nest_end(skb, nest);
759 nla_nest_end(skb, nla_grps);
760
761 genlmsg_end(skb, hdr);
762 return 0;
763
764nla_put_failure:
765 genlmsg_cancel(skb, hdr);
766 return -EMSGSIZE;
767}
768
769static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
770{
771 int n = 0;
772 struct genl_family *rt;
773 struct net *net = sock_net(skb->sk);
774 int fams_to_skip = cb->args[0];
775 unsigned int id;
776
777 idr_for_each_entry(&genl_fam_idr, rt, id) {
778 if (!rt->netnsok && !net_eq(net, &init_net))
779 continue;
780
781 if (n++ < fams_to_skip)
782 continue;
783
784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
785 cb->nlh->nlmsg_seq, NLM_F_MULTI,
786 skb, CTRL_CMD_NEWFAMILY) < 0) {
787 n--;
788 break;
789 }
790 }
791
792 cb->args[0] = n;
793 return skb->len;
794}
795
796static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
797 u32 portid, int seq, u8 cmd)
798{
799 struct sk_buff *skb;
800 int err;
801
802 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
803 if (skb == NULL)
804 return ERR_PTR(-ENOBUFS);
805
806 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
807 if (err < 0) {
808 nlmsg_free(skb);
809 return ERR_PTR(err);
810 }
811
812 return skb;
813}
814
815static struct sk_buff *
816ctrl_build_mcgrp_msg(const struct genl_family *family,
817 const struct genl_multicast_group *grp,
818 int grp_id, u32 portid, int seq, u8 cmd)
819{
820 struct sk_buff *skb;
821 int err;
822
823 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
824 if (skb == NULL)
825 return ERR_PTR(-ENOBUFS);
826
827 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
828 seq, 0, skb, cmd);
829 if (err < 0) {
830 nlmsg_free(skb);
831 return ERR_PTR(err);
832 }
833
834 return skb;
835}
836
837static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
838 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
839 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
840 .len = GENL_NAMSIZ - 1 },
841};
842
843static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
844{
845 struct sk_buff *msg;
846 const struct genl_family *res = NULL;
847 int err = -EINVAL;
848
849 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
850 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
851 res = genl_family_find_byid(id);
852 err = -ENOENT;
853 }
854
855 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
856 char *name;
857
858 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
859 res = genl_family_find_byname(name);
860#ifdef CONFIG_MODULES
861 if (res == NULL) {
862 genl_unlock();
863 up_read(&cb_lock);
864 request_module("net-pf-%d-proto-%d-family-%s",
865 PF_NETLINK, NETLINK_GENERIC, name);
866 down_read(&cb_lock);
867 genl_lock();
868 res = genl_family_find_byname(name);
869 }
870#endif
871 err = -ENOENT;
872 }
873
874 if (res == NULL)
875 return err;
876
877 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
878 /* family doesn't exist here */
879 return -ENOENT;
880 }
881
882 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
883 CTRL_CMD_NEWFAMILY);
884 if (IS_ERR(msg))
885 return PTR_ERR(msg);
886
887 return genlmsg_reply(msg, info);
888}
889
890static int genl_ctrl_event(int event, const struct genl_family *family,
891 const struct genl_multicast_group *grp,
892 int grp_id)
893{
894 struct sk_buff *msg;
895
896 /* genl is still initialising */
897 if (!init_net.genl_sock)
898 return 0;
899
900 switch (event) {
901 case CTRL_CMD_NEWFAMILY:
902 case CTRL_CMD_DELFAMILY:
903 WARN_ON(grp);
904 msg = ctrl_build_family_msg(family, 0, 0, event);
905 break;
906 case CTRL_CMD_NEWMCAST_GRP:
907 case CTRL_CMD_DELMCAST_GRP:
908 BUG_ON(!grp);
909 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
910 break;
911 default:
912 return -EINVAL;
913 }
914
915 if (IS_ERR(msg))
916 return PTR_ERR(msg);
917
918 if (!family->netnsok) {
919 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
920 0, GFP_KERNEL);
921 } else {
922 rcu_read_lock();
923 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
924 0, GFP_ATOMIC);
925 rcu_read_unlock();
926 }
927
928 return 0;
929}
930
931static const struct genl_ops genl_ctrl_ops[] = {
932 {
933 .cmd = CTRL_CMD_GETFAMILY,
934 .doit = ctrl_getfamily,
935 .dumpit = ctrl_dumpfamily,
936 .policy = ctrl_policy,
937 },
938};
939
940static const struct genl_multicast_group genl_ctrl_groups[] = {
941 { .name = "notify", },
942};
943
944static struct genl_family genl_ctrl __ro_after_init = {
945 .module = THIS_MODULE,
946 .ops = genl_ctrl_ops,
947 .n_ops = ARRAY_SIZE(genl_ctrl_ops),
948 .mcgrps = genl_ctrl_groups,
949 .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
950 .id = GENL_ID_CTRL,
951 .name = "nlctrl",
952 .version = 0x2,
953 .maxattr = CTRL_ATTR_MAX,
954 .netnsok = true,
955};
956
957static int genl_bind(struct net *net, int group)
958{
959 struct genl_family *f;
960 int err = -ENOENT;
961 unsigned int id;
962
963 down_read(&cb_lock);
964
965 idr_for_each_entry(&genl_fam_idr, f, id) {
966 if (group >= f->mcgrp_offset &&
967 group < f->mcgrp_offset + f->n_mcgrps) {
968 int fam_grp = group - f->mcgrp_offset;
969
970 if (!f->netnsok && net != &init_net)
971 err = -ENOENT;
972 else if (f->mcast_bind)
973 err = f->mcast_bind(net, fam_grp);
974 else
975 err = 0;
976 break;
977 }
978 }
979 up_read(&cb_lock);
980
981 return err;
982}
983
984static void genl_unbind(struct net *net, int group)
985{
986 struct genl_family *f;
987 unsigned int id;
988
989 down_read(&cb_lock);
990
991 idr_for_each_entry(&genl_fam_idr, f, id) {
992 if (group >= f->mcgrp_offset &&
993 group < f->mcgrp_offset + f->n_mcgrps) {
994 int fam_grp = group - f->mcgrp_offset;
995
996 if (f->mcast_unbind)
997 f->mcast_unbind(net, fam_grp);
998 break;
999 }
1000 }
1001 up_read(&cb_lock);
1002}
1003
1004static int __net_init genl_pernet_init(struct net *net)
1005{
1006 struct netlink_kernel_cfg cfg = {
1007 .input = genl_rcv,
1008 .flags = NL_CFG_F_NONROOT_RECV,
1009 .bind = genl_bind,
1010 .unbind = genl_unbind,
1011 };
1012
1013 /* we'll bump the group number right afterwards */
1014 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1015
1016 if (!net->genl_sock && net_eq(net, &init_net))
1017 panic("GENL: Cannot initialize generic netlink\n");
1018
1019 if (!net->genl_sock)
1020 return -ENOMEM;
1021
1022 return 0;
1023}
1024
1025static void __net_exit genl_pernet_exit(struct net *net)
1026{
1027 netlink_kernel_release(net->genl_sock);
1028 net->genl_sock = NULL;
1029}
1030
1031static struct pernet_operations genl_pernet_ops = {
1032 .init = genl_pernet_init,
1033 .exit = genl_pernet_exit,
1034};
1035
1036static int __init genl_init(void)
1037{
1038 int err;
1039
1040 err = genl_register_family(&genl_ctrl);
1041 if (err < 0)
1042 goto problem;
1043
1044 err = register_pernet_subsys(&genl_pernet_ops);
1045 if (err)
1046 goto problem;
1047
1048 return 0;
1049
1050problem:
1051 panic("GENL: Cannot register controller: %d\n", err);
1052}
1053
1054subsys_initcall(genl_init);
1055
1056/**
1057 * genl_family_attrbuf - return family's attrbuf
1058 * @family: the family
1059 *
1060 * Return the family's attrbuf, while validating that it's
1061 * actually valid to access it.
1062 *
1063 * You cannot use this function with a family that has parallel_ops
1064 * and you can only use it within (pre/post) doit/dumpit callbacks.
1065 */
1066struct nlattr **genl_family_attrbuf(const struct genl_family *family)
1067{
1068 if (!WARN_ON(family->parallel_ops))
1069 lockdep_assert_held(&genl_mutex);
1070
1071 return family->attrbuf;
1072}
1073EXPORT_SYMBOL(genl_family_attrbuf);
1074
1075static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1076 gfp_t flags)
1077{
1078 struct sk_buff *tmp;
1079 struct net *net, *prev = NULL;
1080 int err;
1081
1082 for_each_net_rcu(net) {
1083 if (prev) {
1084 tmp = skb_clone(skb, flags);
1085 if (!tmp) {
1086 err = -ENOMEM;
1087 goto error;
1088 }
1089 err = nlmsg_multicast(prev->genl_sock, tmp,
1090 portid, group, flags);
1091 if (err)
1092 goto error;
1093 }
1094
1095 prev = net;
1096 }
1097
1098 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1099 error:
1100 kfree_skb(skb);
1101 return err;
1102}
1103
1104int genlmsg_multicast_allns(const struct genl_family *family,
1105 struct sk_buff *skb, u32 portid,
1106 unsigned int group, gfp_t flags)
1107{
1108 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1109 return -EINVAL;
1110 group = family->mcgrp_offset + group;
1111 return genlmsg_mcast(skb, portid, group, flags);
1112}
1113EXPORT_SYMBOL(genlmsg_multicast_allns);
1114
1115void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1116 struct genl_info *info, u32 group, gfp_t flags)
1117{
1118 struct net *net = genl_info_net(info);
1119 struct sock *sk = net->genl_sock;
1120 int report = 0;
1121
1122 if (info->nlhdr)
1123 report = nlmsg_report(info->nlhdr);
1124
1125 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1126 return;
1127 group = family->mcgrp_offset + group;
1128 nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
1129}
1130EXPORT_SYMBOL(genl_notify);
1/*
2 * NETLINK Generic Netlink Family
3 *
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/socket.h>
15#include <linux/string.h>
16#include <linux/skbuff.h>
17#include <linux/mutex.h>
18#include <linux/bitmap.h>
19#include <linux/rwsem.h>
20#include <net/sock.h>
21#include <net/genetlink.h>
22
23static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
24static DECLARE_RWSEM(cb_lock);
25
26atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
27DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
28
29void genl_lock(void)
30{
31 mutex_lock(&genl_mutex);
32}
33EXPORT_SYMBOL(genl_lock);
34
35void genl_unlock(void)
36{
37 mutex_unlock(&genl_mutex);
38}
39EXPORT_SYMBOL(genl_unlock);
40
41#ifdef CONFIG_LOCKDEP
42bool lockdep_genl_is_held(void)
43{
44 return lockdep_is_held(&genl_mutex);
45}
46EXPORT_SYMBOL(lockdep_genl_is_held);
47#endif
48
49static void genl_lock_all(void)
50{
51 down_write(&cb_lock);
52 genl_lock();
53}
54
55static void genl_unlock_all(void)
56{
57 genl_unlock();
58 up_write(&cb_lock);
59}
60
61#define GENL_FAM_TAB_SIZE 16
62#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
63
64static struct list_head family_ht[GENL_FAM_TAB_SIZE];
65/*
66 * Bitmap of multicast groups that are currently in use.
67 *
68 * To avoid an allocation at boot of just one unsigned long,
69 * declare it global instead.
70 * Bit 0 is marked as already used since group 0 is invalid.
71 * Bit 1 is marked as already used since the drop-monitor code
72 * abuses the API and thinks it can statically use group 1.
73 * That group will typically conflict with other groups that
74 * any proper users use.
75 * Bit 16 is marked as used since it's used for generic netlink
76 * and the code no longer marks pre-reserved IDs as used.
77 * Bit 17 is marked as already used since the VFS quota code
78 * also abused this API and relied on family == group ID, we
79 * cater to that by giving it a static family and group ID.
80 * Bit 18 is marked as already used since the PMCRAID driver
81 * did the same thing as the VFS quota code (maybe copied?)
82 */
83static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
84 BIT(GENL_ID_VFS_DQUOT) |
85 BIT(GENL_ID_PMCRAID);
86static unsigned long *mc_groups = &mc_group_start;
87static unsigned long mc_groups_longs = 1;
88
89static int genl_ctrl_event(int event, struct genl_family *family,
90 const struct genl_multicast_group *grp,
91 int grp_id);
92
93static inline unsigned int genl_family_hash(unsigned int id)
94{
95 return id & GENL_FAM_TAB_MASK;
96}
97
98static inline struct list_head *genl_family_chain(unsigned int id)
99{
100 return &family_ht[genl_family_hash(id)];
101}
102
103static struct genl_family *genl_family_find_byid(unsigned int id)
104{
105 struct genl_family *f;
106
107 list_for_each_entry(f, genl_family_chain(id), family_list)
108 if (f->id == id)
109 return f;
110
111 return NULL;
112}
113
114static struct genl_family *genl_family_find_byname(char *name)
115{
116 struct genl_family *f;
117 int i;
118
119 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
120 list_for_each_entry(f, genl_family_chain(i), family_list)
121 if (strcmp(f->name, name) == 0)
122 return f;
123
124 return NULL;
125}
126
127static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
128{
129 int i;
130
131 for (i = 0; i < family->n_ops; i++)
132 if (family->ops[i].cmd == cmd)
133 return &family->ops[i];
134
135 return NULL;
136}
137
138/* Of course we are going to have problems once we hit
139 * 2^16 alive types, but that can only happen by year 2K
140*/
141static u16 genl_generate_id(void)
142{
143 static u16 id_gen_idx = GENL_MIN_ID;
144 int i;
145
146 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
147 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
148 id_gen_idx != GENL_ID_PMCRAID &&
149 !genl_family_find_byid(id_gen_idx))
150 return id_gen_idx;
151 if (++id_gen_idx > GENL_MAX_ID)
152 id_gen_idx = GENL_MIN_ID;
153 }
154
155 return 0;
156}
157
158static int genl_allocate_reserve_groups(int n_groups, int *first_id)
159{
160 unsigned long *new_groups;
161 int start = 0;
162 int i;
163 int id;
164 bool fits;
165
166 do {
167 if (start == 0)
168 id = find_first_zero_bit(mc_groups,
169 mc_groups_longs *
170 BITS_PER_LONG);
171 else
172 id = find_next_zero_bit(mc_groups,
173 mc_groups_longs * BITS_PER_LONG,
174 start);
175
176 fits = true;
177 for (i = id;
178 i < min_t(int, id + n_groups,
179 mc_groups_longs * BITS_PER_LONG);
180 i++) {
181 if (test_bit(i, mc_groups)) {
182 start = i;
183 fits = false;
184 break;
185 }
186 }
187
188 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
189 unsigned long new_longs = mc_groups_longs +
190 BITS_TO_LONGS(n_groups);
191 size_t nlen = new_longs * sizeof(unsigned long);
192
193 if (mc_groups == &mc_group_start) {
194 new_groups = kzalloc(nlen, GFP_KERNEL);
195 if (!new_groups)
196 return -ENOMEM;
197 mc_groups = new_groups;
198 *mc_groups = mc_group_start;
199 } else {
200 new_groups = krealloc(mc_groups, nlen,
201 GFP_KERNEL);
202 if (!new_groups)
203 return -ENOMEM;
204 mc_groups = new_groups;
205 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
206 mc_groups[mc_groups_longs + i] = 0;
207 }
208 mc_groups_longs = new_longs;
209 }
210 } while (!fits);
211
212 for (i = id; i < id + n_groups; i++)
213 set_bit(i, mc_groups);
214 *first_id = id;
215 return 0;
216}
217
218static struct genl_family genl_ctrl;
219
220static int genl_validate_assign_mc_groups(struct genl_family *family)
221{
222 int first_id;
223 int n_groups = family->n_mcgrps;
224 int err = 0, i;
225 bool groups_allocated = false;
226
227 if (!n_groups)
228 return 0;
229
230 for (i = 0; i < n_groups; i++) {
231 const struct genl_multicast_group *grp = &family->mcgrps[i];
232
233 if (WARN_ON(grp->name[0] == '\0'))
234 return -EINVAL;
235 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
236 return -EINVAL;
237 }
238
239 /* special-case our own group and hacks */
240 if (family == &genl_ctrl) {
241 first_id = GENL_ID_CTRL;
242 BUG_ON(n_groups != 1);
243 } else if (strcmp(family->name, "NET_DM") == 0) {
244 first_id = 1;
245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_VFS_DQUOT) {
247 first_id = GENL_ID_VFS_DQUOT;
248 BUG_ON(n_groups != 1);
249 } else if (family->id == GENL_ID_PMCRAID) {
250 first_id = GENL_ID_PMCRAID;
251 BUG_ON(n_groups != 1);
252 } else {
253 groups_allocated = true;
254 err = genl_allocate_reserve_groups(n_groups, &first_id);
255 if (err)
256 return err;
257 }
258
259 family->mcgrp_offset = first_id;
260
261 /* if still initializing, can't and don't need to to realloc bitmaps */
262 if (!init_net.genl_sock)
263 return 0;
264
265 if (family->netnsok) {
266 struct net *net;
267
268 netlink_table_grab();
269 rcu_read_lock();
270 for_each_net_rcu(net) {
271 err = __netlink_change_ngroups(net->genl_sock,
272 mc_groups_longs * BITS_PER_LONG);
273 if (err) {
274 /*
275 * No need to roll back, can only fail if
276 * memory allocation fails and then the
277 * number of _possible_ groups has been
278 * increased on some sockets which is ok.
279 */
280 break;
281 }
282 }
283 rcu_read_unlock();
284 netlink_table_ungrab();
285 } else {
286 err = netlink_change_ngroups(init_net.genl_sock,
287 mc_groups_longs * BITS_PER_LONG);
288 }
289
290 if (groups_allocated && err) {
291 for (i = 0; i < family->n_mcgrps; i++)
292 clear_bit(family->mcgrp_offset + i, mc_groups);
293 }
294
295 return err;
296}
297
298static void genl_unregister_mc_groups(struct genl_family *family)
299{
300 struct net *net;
301 int i;
302
303 netlink_table_grab();
304 rcu_read_lock();
305 for_each_net_rcu(net) {
306 for (i = 0; i < family->n_mcgrps; i++)
307 __netlink_clear_multicast_users(
308 net->genl_sock, family->mcgrp_offset + i);
309 }
310 rcu_read_unlock();
311 netlink_table_ungrab();
312
313 for (i = 0; i < family->n_mcgrps; i++) {
314 int grp_id = family->mcgrp_offset + i;
315
316 if (grp_id != 1)
317 clear_bit(grp_id, mc_groups);
318 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
319 &family->mcgrps[i], grp_id);
320 }
321}
322
323static int genl_validate_ops(const struct genl_family *family)
324{
325 const struct genl_ops *ops = family->ops;
326 unsigned int n_ops = family->n_ops;
327 int i, j;
328
329 if (WARN_ON(n_ops && !ops))
330 return -EINVAL;
331
332 if (!n_ops)
333 return 0;
334
335 for (i = 0; i < n_ops; i++) {
336 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
337 return -EINVAL;
338 for (j = i + 1; j < n_ops; j++)
339 if (ops[i].cmd == ops[j].cmd)
340 return -EINVAL;
341 }
342
343 return 0;
344}
345
346/**
347 * __genl_register_family - register a generic netlink family
348 * @family: generic netlink family
349 *
350 * Registers the specified family after validating it first. Only one
351 * family may be registered with the same family name or identifier.
352 * The family id may equal GENL_ID_GENERATE causing an unique id to
353 * be automatically generated and assigned.
354 *
355 * The family's ops array must already be assigned, you can use the
356 * genl_register_family_with_ops() helper function.
357 *
358 * Return 0 on success or a negative error code.
359 */
360int __genl_register_family(struct genl_family *family)
361{
362 int err = -EINVAL, i;
363
364 if (family->id && family->id < GENL_MIN_ID)
365 goto errout;
366
367 if (family->id > GENL_MAX_ID)
368 goto errout;
369
370 err = genl_validate_ops(family);
371 if (err)
372 return err;
373
374 genl_lock_all();
375
376 if (genl_family_find_byname(family->name)) {
377 err = -EEXIST;
378 goto errout_locked;
379 }
380
381 if (family->id == GENL_ID_GENERATE) {
382 u16 newid = genl_generate_id();
383
384 if (!newid) {
385 err = -ENOMEM;
386 goto errout_locked;
387 }
388
389 family->id = newid;
390 } else if (genl_family_find_byid(family->id)) {
391 err = -EEXIST;
392 goto errout_locked;
393 }
394
395 if (family->maxattr && !family->parallel_ops) {
396 family->attrbuf = kmalloc((family->maxattr+1) *
397 sizeof(struct nlattr *), GFP_KERNEL);
398 if (family->attrbuf == NULL) {
399 err = -ENOMEM;
400 goto errout_locked;
401 }
402 } else
403 family->attrbuf = NULL;
404
405 err = genl_validate_assign_mc_groups(family);
406 if (err)
407 goto errout_locked;
408
409 list_add_tail(&family->family_list, genl_family_chain(family->id));
410 genl_unlock_all();
411
412 /* send all events */
413 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
414 for (i = 0; i < family->n_mcgrps; i++)
415 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
416 &family->mcgrps[i], family->mcgrp_offset + i);
417
418 return 0;
419
420errout_locked:
421 genl_unlock_all();
422errout:
423 return err;
424}
425EXPORT_SYMBOL(__genl_register_family);
426
427/**
428 * genl_unregister_family - unregister generic netlink family
429 * @family: generic netlink family
430 *
431 * Unregisters the specified family.
432 *
433 * Returns 0 on success or a negative error code.
434 */
435int genl_unregister_family(struct genl_family *family)
436{
437 struct genl_family *rc;
438
439 genl_lock_all();
440
441 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
442 if (family->id != rc->id || strcmp(rc->name, family->name))
443 continue;
444
445 genl_unregister_mc_groups(family);
446
447 list_del(&rc->family_list);
448 family->n_ops = 0;
449 up_write(&cb_lock);
450 wait_event(genl_sk_destructing_waitq,
451 atomic_read(&genl_sk_destructing_cnt) == 0);
452 genl_unlock();
453
454 kfree(family->attrbuf);
455 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
456 return 0;
457 }
458
459 genl_unlock_all();
460
461 return -ENOENT;
462}
463EXPORT_SYMBOL(genl_unregister_family);
464
465/**
466 * genlmsg_put - Add generic netlink header to netlink message
467 * @skb: socket buffer holding the message
468 * @portid: netlink portid the message is addressed to
469 * @seq: sequence number (usually the one of the sender)
470 * @family: generic netlink family
471 * @flags: netlink message flags
472 * @cmd: generic netlink command
473 *
474 * Returns pointer to user specific header
475 */
476void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
477 struct genl_family *family, int flags, u8 cmd)
478{
479 struct nlmsghdr *nlh;
480 struct genlmsghdr *hdr;
481
482 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
483 family->hdrsize, flags);
484 if (nlh == NULL)
485 return NULL;
486
487 hdr = nlmsg_data(nlh);
488 hdr->cmd = cmd;
489 hdr->version = family->version;
490 hdr->reserved = 0;
491
492 return (char *) hdr + GENL_HDRLEN;
493}
494EXPORT_SYMBOL(genlmsg_put);
495
496static int genl_lock_start(struct netlink_callback *cb)
497{
498 /* our ops are always const - netlink API doesn't propagate that */
499 const struct genl_ops *ops = cb->data;
500 int rc = 0;
501
502 if (ops->start) {
503 genl_lock();
504 rc = ops->start(cb);
505 genl_unlock();
506 }
507 return rc;
508}
509
510static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
511{
512 /* our ops are always const - netlink API doesn't propagate that */
513 const struct genl_ops *ops = cb->data;
514 int rc;
515
516 genl_lock();
517 rc = ops->dumpit(skb, cb);
518 genl_unlock();
519 return rc;
520}
521
522static int genl_lock_done(struct netlink_callback *cb)
523{
524 /* our ops are always const - netlink API doesn't propagate that */
525 const struct genl_ops *ops = cb->data;
526 int rc = 0;
527
528 if (ops->done) {
529 genl_lock();
530 rc = ops->done(cb);
531 genl_unlock();
532 }
533 return rc;
534}
535
536static int genl_family_rcv_msg(struct genl_family *family,
537 struct sk_buff *skb,
538 struct nlmsghdr *nlh)
539{
540 const struct genl_ops *ops;
541 struct net *net = sock_net(skb->sk);
542 struct genl_info info;
543 struct genlmsghdr *hdr = nlmsg_data(nlh);
544 struct nlattr **attrbuf;
545 int hdrlen, err;
546
547 /* this family doesn't exist in this netns */
548 if (!family->netnsok && !net_eq(net, &init_net))
549 return -ENOENT;
550
551 hdrlen = GENL_HDRLEN + family->hdrsize;
552 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
553 return -EINVAL;
554
555 ops = genl_get_cmd(hdr->cmd, family);
556 if (ops == NULL)
557 return -EOPNOTSUPP;
558
559 if ((ops->flags & GENL_ADMIN_PERM) &&
560 !netlink_capable(skb, CAP_NET_ADMIN))
561 return -EPERM;
562
563 if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
564 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
565 return -EPERM;
566
567 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
568 int rc;
569
570 if (ops->dumpit == NULL)
571 return -EOPNOTSUPP;
572
573 if (!family->parallel_ops) {
574 struct netlink_dump_control c = {
575 .module = family->module,
576 /* we have const, but the netlink API doesn't */
577 .data = (void *)ops,
578 .start = genl_lock_start,
579 .dump = genl_lock_dumpit,
580 .done = genl_lock_done,
581 };
582
583 genl_unlock();
584 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
585 genl_lock();
586
587 } else {
588 struct netlink_dump_control c = {
589 .module = family->module,
590 .start = ops->start,
591 .dump = ops->dumpit,
592 .done = ops->done,
593 };
594
595 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
596 }
597
598 return rc;
599 }
600
601 if (ops->doit == NULL)
602 return -EOPNOTSUPP;
603
604 if (family->maxattr && family->parallel_ops) {
605 attrbuf = kmalloc((family->maxattr+1) *
606 sizeof(struct nlattr *), GFP_KERNEL);
607 if (attrbuf == NULL)
608 return -ENOMEM;
609 } else
610 attrbuf = family->attrbuf;
611
612 if (attrbuf) {
613 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
614 ops->policy);
615 if (err < 0)
616 goto out;
617 }
618
619 info.snd_seq = nlh->nlmsg_seq;
620 info.snd_portid = NETLINK_CB(skb).portid;
621 info.nlhdr = nlh;
622 info.genlhdr = nlmsg_data(nlh);
623 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
624 info.attrs = attrbuf;
625 genl_info_net_set(&info, net);
626 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
627
628 if (family->pre_doit) {
629 err = family->pre_doit(ops, skb, &info);
630 if (err)
631 goto out;
632 }
633
634 err = ops->doit(skb, &info);
635
636 if (family->post_doit)
637 family->post_doit(ops, skb, &info);
638
639out:
640 if (family->parallel_ops)
641 kfree(attrbuf);
642
643 return err;
644}
645
646static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
647{
648 struct genl_family *family;
649 int err;
650
651 family = genl_family_find_byid(nlh->nlmsg_type);
652 if (family == NULL)
653 return -ENOENT;
654
655 if (!family->parallel_ops)
656 genl_lock();
657
658 err = genl_family_rcv_msg(family, skb, nlh);
659
660 if (!family->parallel_ops)
661 genl_unlock();
662
663 return err;
664}
665
666static void genl_rcv(struct sk_buff *skb)
667{
668 down_read(&cb_lock);
669 netlink_rcv_skb(skb, &genl_rcv_msg);
670 up_read(&cb_lock);
671}
672
673/**************************************************************************
674 * Controller
675 **************************************************************************/
676
677static struct genl_family genl_ctrl = {
678 .id = GENL_ID_CTRL,
679 .name = "nlctrl",
680 .version = 0x2,
681 .maxattr = CTRL_ATTR_MAX,
682 .netnsok = true,
683};
684
685static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
686 u32 flags, struct sk_buff *skb, u8 cmd)
687{
688 void *hdr;
689
690 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
691 if (hdr == NULL)
692 return -1;
693
694 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
695 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
696 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
697 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
698 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
699 goto nla_put_failure;
700
701 if (family->n_ops) {
702 struct nlattr *nla_ops;
703 int i;
704
705 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
706 if (nla_ops == NULL)
707 goto nla_put_failure;
708
709 for (i = 0; i < family->n_ops; i++) {
710 struct nlattr *nest;
711 const struct genl_ops *ops = &family->ops[i];
712 u32 op_flags = ops->flags;
713
714 if (ops->dumpit)
715 op_flags |= GENL_CMD_CAP_DUMP;
716 if (ops->doit)
717 op_flags |= GENL_CMD_CAP_DO;
718 if (ops->policy)
719 op_flags |= GENL_CMD_CAP_HASPOL;
720
721 nest = nla_nest_start(skb, i + 1);
722 if (nest == NULL)
723 goto nla_put_failure;
724
725 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
726 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
727 goto nla_put_failure;
728
729 nla_nest_end(skb, nest);
730 }
731
732 nla_nest_end(skb, nla_ops);
733 }
734
735 if (family->n_mcgrps) {
736 struct nlattr *nla_grps;
737 int i;
738
739 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
740 if (nla_grps == NULL)
741 goto nla_put_failure;
742
743 for (i = 0; i < family->n_mcgrps; i++) {
744 struct nlattr *nest;
745 const struct genl_multicast_group *grp;
746
747 grp = &family->mcgrps[i];
748
749 nest = nla_nest_start(skb, i + 1);
750 if (nest == NULL)
751 goto nla_put_failure;
752
753 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
754 family->mcgrp_offset + i) ||
755 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
756 grp->name))
757 goto nla_put_failure;
758
759 nla_nest_end(skb, nest);
760 }
761 nla_nest_end(skb, nla_grps);
762 }
763
764 genlmsg_end(skb, hdr);
765 return 0;
766
767nla_put_failure:
768 genlmsg_cancel(skb, hdr);
769 return -EMSGSIZE;
770}
771
772static int ctrl_fill_mcgrp_info(struct genl_family *family,
773 const struct genl_multicast_group *grp,
774 int grp_id, u32 portid, u32 seq, u32 flags,
775 struct sk_buff *skb, u8 cmd)
776{
777 void *hdr;
778 struct nlattr *nla_grps;
779 struct nlattr *nest;
780
781 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
782 if (hdr == NULL)
783 return -1;
784
785 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
786 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
787 goto nla_put_failure;
788
789 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
790 if (nla_grps == NULL)
791 goto nla_put_failure;
792
793 nest = nla_nest_start(skb, 1);
794 if (nest == NULL)
795 goto nla_put_failure;
796
797 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
798 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
799 grp->name))
800 goto nla_put_failure;
801
802 nla_nest_end(skb, nest);
803 nla_nest_end(skb, nla_grps);
804
805 genlmsg_end(skb, hdr);
806 return 0;
807
808nla_put_failure:
809 genlmsg_cancel(skb, hdr);
810 return -EMSGSIZE;
811}
812
813static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
814{
815
816 int i, n = 0;
817 struct genl_family *rt;
818 struct net *net = sock_net(skb->sk);
819 int chains_to_skip = cb->args[0];
820 int fams_to_skip = cb->args[1];
821
822 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
823 n = 0;
824 list_for_each_entry(rt, genl_family_chain(i), family_list) {
825 if (!rt->netnsok && !net_eq(net, &init_net))
826 continue;
827 if (++n < fams_to_skip)
828 continue;
829 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
830 cb->nlh->nlmsg_seq, NLM_F_MULTI,
831 skb, CTRL_CMD_NEWFAMILY) < 0)
832 goto errout;
833 }
834
835 fams_to_skip = 0;
836 }
837
838errout:
839 cb->args[0] = i;
840 cb->args[1] = n;
841
842 return skb->len;
843}
844
845static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
846 u32 portid, int seq, u8 cmd)
847{
848 struct sk_buff *skb;
849 int err;
850
851 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
852 if (skb == NULL)
853 return ERR_PTR(-ENOBUFS);
854
855 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
856 if (err < 0) {
857 nlmsg_free(skb);
858 return ERR_PTR(err);
859 }
860
861 return skb;
862}
863
864static struct sk_buff *
865ctrl_build_mcgrp_msg(struct genl_family *family,
866 const struct genl_multicast_group *grp,
867 int grp_id, u32 portid, int seq, u8 cmd)
868{
869 struct sk_buff *skb;
870 int err;
871
872 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
873 if (skb == NULL)
874 return ERR_PTR(-ENOBUFS);
875
876 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
877 seq, 0, skb, cmd);
878 if (err < 0) {
879 nlmsg_free(skb);
880 return ERR_PTR(err);
881 }
882
883 return skb;
884}
885
886static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
887 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
888 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
889 .len = GENL_NAMSIZ - 1 },
890};
891
892static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
893{
894 struct sk_buff *msg;
895 struct genl_family *res = NULL;
896 int err = -EINVAL;
897
898 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
899 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
900 res = genl_family_find_byid(id);
901 err = -ENOENT;
902 }
903
904 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
905 char *name;
906
907 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
908 res = genl_family_find_byname(name);
909#ifdef CONFIG_MODULES
910 if (res == NULL) {
911 genl_unlock();
912 up_read(&cb_lock);
913 request_module("net-pf-%d-proto-%d-family-%s",
914 PF_NETLINK, NETLINK_GENERIC, name);
915 down_read(&cb_lock);
916 genl_lock();
917 res = genl_family_find_byname(name);
918 }
919#endif
920 err = -ENOENT;
921 }
922
923 if (res == NULL)
924 return err;
925
926 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
927 /* family doesn't exist here */
928 return -ENOENT;
929 }
930
931 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
932 CTRL_CMD_NEWFAMILY);
933 if (IS_ERR(msg))
934 return PTR_ERR(msg);
935
936 return genlmsg_reply(msg, info);
937}
938
939static int genl_ctrl_event(int event, struct genl_family *family,
940 const struct genl_multicast_group *grp,
941 int grp_id)
942{
943 struct sk_buff *msg;
944
945 /* genl is still initialising */
946 if (!init_net.genl_sock)
947 return 0;
948
949 switch (event) {
950 case CTRL_CMD_NEWFAMILY:
951 case CTRL_CMD_DELFAMILY:
952 WARN_ON(grp);
953 msg = ctrl_build_family_msg(family, 0, 0, event);
954 break;
955 case CTRL_CMD_NEWMCAST_GRP:
956 case CTRL_CMD_DELMCAST_GRP:
957 BUG_ON(!grp);
958 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
959 break;
960 default:
961 return -EINVAL;
962 }
963
964 if (IS_ERR(msg))
965 return PTR_ERR(msg);
966
967 if (!family->netnsok) {
968 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
969 0, GFP_KERNEL);
970 } else {
971 rcu_read_lock();
972 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
973 0, GFP_ATOMIC);
974 rcu_read_unlock();
975 }
976
977 return 0;
978}
979
980static struct genl_ops genl_ctrl_ops[] = {
981 {
982 .cmd = CTRL_CMD_GETFAMILY,
983 .doit = ctrl_getfamily,
984 .dumpit = ctrl_dumpfamily,
985 .policy = ctrl_policy,
986 },
987};
988
989static struct genl_multicast_group genl_ctrl_groups[] = {
990 { .name = "notify", },
991};
992
993static int genl_bind(struct net *net, int group)
994{
995 int i, err = -ENOENT;
996
997 down_read(&cb_lock);
998 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
999 struct genl_family *f;
1000
1001 list_for_each_entry(f, genl_family_chain(i), family_list) {
1002 if (group >= f->mcgrp_offset &&
1003 group < f->mcgrp_offset + f->n_mcgrps) {
1004 int fam_grp = group - f->mcgrp_offset;
1005
1006 if (!f->netnsok && net != &init_net)
1007 err = -ENOENT;
1008 else if (f->mcast_bind)
1009 err = f->mcast_bind(net, fam_grp);
1010 else
1011 err = 0;
1012 break;
1013 }
1014 }
1015 }
1016 up_read(&cb_lock);
1017
1018 return err;
1019}
1020
1021static void genl_unbind(struct net *net, int group)
1022{
1023 int i;
1024
1025 down_read(&cb_lock);
1026 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1027 struct genl_family *f;
1028
1029 list_for_each_entry(f, genl_family_chain(i), family_list) {
1030 if (group >= f->mcgrp_offset &&
1031 group < f->mcgrp_offset + f->n_mcgrps) {
1032 int fam_grp = group - f->mcgrp_offset;
1033
1034 if (f->mcast_unbind)
1035 f->mcast_unbind(net, fam_grp);
1036 break;
1037 }
1038 }
1039 }
1040 up_read(&cb_lock);
1041}
1042
1043static int __net_init genl_pernet_init(struct net *net)
1044{
1045 struct netlink_kernel_cfg cfg = {
1046 .input = genl_rcv,
1047 .flags = NL_CFG_F_NONROOT_RECV,
1048 .bind = genl_bind,
1049 .unbind = genl_unbind,
1050 };
1051
1052 /* we'll bump the group number right afterwards */
1053 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1054
1055 if (!net->genl_sock && net_eq(net, &init_net))
1056 panic("GENL: Cannot initialize generic netlink\n");
1057
1058 if (!net->genl_sock)
1059 return -ENOMEM;
1060
1061 return 0;
1062}
1063
1064static void __net_exit genl_pernet_exit(struct net *net)
1065{
1066 netlink_kernel_release(net->genl_sock);
1067 net->genl_sock = NULL;
1068}
1069
1070static struct pernet_operations genl_pernet_ops = {
1071 .init = genl_pernet_init,
1072 .exit = genl_pernet_exit,
1073};
1074
1075static int __init genl_init(void)
1076{
1077 int i, err;
1078
1079 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1080 INIT_LIST_HEAD(&family_ht[i]);
1081
1082 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1083 genl_ctrl_groups);
1084 if (err < 0)
1085 goto problem;
1086
1087 err = register_pernet_subsys(&genl_pernet_ops);
1088 if (err)
1089 goto problem;
1090
1091 return 0;
1092
1093problem:
1094 panic("GENL: Cannot register controller: %d\n", err);
1095}
1096
1097subsys_initcall(genl_init);
1098
1099static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1100 gfp_t flags)
1101{
1102 struct sk_buff *tmp;
1103 struct net *net, *prev = NULL;
1104 int err;
1105
1106 for_each_net_rcu(net) {
1107 if (prev) {
1108 tmp = skb_clone(skb, flags);
1109 if (!tmp) {
1110 err = -ENOMEM;
1111 goto error;
1112 }
1113 err = nlmsg_multicast(prev->genl_sock, tmp,
1114 portid, group, flags);
1115 if (err)
1116 goto error;
1117 }
1118
1119 prev = net;
1120 }
1121
1122 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1123 error:
1124 kfree_skb(skb);
1125 return err;
1126}
1127
1128int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1129 u32 portid, unsigned int group, gfp_t flags)
1130{
1131 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1132 return -EINVAL;
1133 group = family->mcgrp_offset + group;
1134 return genlmsg_mcast(skb, portid, group, flags);
1135}
1136EXPORT_SYMBOL(genlmsg_multicast_allns);
1137
1138void genl_notify(struct genl_family *family, struct sk_buff *skb,
1139 struct genl_info *info, u32 group, gfp_t flags)
1140{
1141 struct net *net = genl_info_net(info);
1142 struct sock *sk = net->genl_sock;
1143 int report = 0;
1144
1145 if (info->nlhdr)
1146 report = nlmsg_report(info->nlhdr);
1147
1148 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1149 return;
1150 group = family->mcgrp_offset + group;
1151 nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
1152}
1153EXPORT_SYMBOL(genl_notify);