Loading...
1/*
2 * connector.c
3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/list.h>
25#include <linux/skbuff.h>
26#include <linux/netlink.h>
27#include <linux/moduleparam.h>
28#include <linux/connector.h>
29#include <linux/slab.h>
30#include <linux/mutex.h>
31#include <linux/proc_fs.h>
32#include <linux/spinlock.h>
33
34#include <net/sock.h>
35
36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
39MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
40
41static struct cn_dev cdev;
42
43static int cn_already_initialized;
44
45/*
46 * msg->seq and msg->ack are used to determine message genealogy.
47 * When someone sends message it puts there locally unique sequence
48 * and random acknowledge numbers. Sequence number may be copied into
49 * nlmsghdr->nlmsg_seq too.
50 *
51 * Sequence number is incremented with each message to be sent.
52 *
53 * If we expect reply to our message then the sequence number in
54 * received message MUST be the same as in original message, and
55 * acknowledge number MUST be the same + 1.
56 *
57 * If we receive a message and its sequence number is not equal to the
58 * one we are expecting then it is a new message.
59 *
60 * If we receive a message and its sequence number is the same as one
61 * we are expecting but it's acknowledgement number is not equal to
62 * the acknowledgement number in the original message + 1, then it is
63 * a new message.
64 *
65 */
66int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
67{
68 struct cn_callback_entry *__cbq;
69 unsigned int size;
70 struct sk_buff *skb;
71 struct nlmsghdr *nlh;
72 struct cn_msg *data;
73 struct cn_dev *dev = &cdev;
74 u32 group = 0;
75 int found = 0;
76
77 if (!__group) {
78 spin_lock_bh(&dev->cbdev->queue_lock);
79 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
80 callback_entry) {
81 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
82 found = 1;
83 group = __cbq->group;
84 break;
85 }
86 }
87 spin_unlock_bh(&dev->cbdev->queue_lock);
88
89 if (!found)
90 return -ENODEV;
91 } else {
92 group = __group;
93 }
94
95 if (!netlink_has_listeners(dev->nls, group))
96 return -ESRCH;
97
98 size = NLMSG_SPACE(sizeof(*msg) + msg->len);
99
100 skb = alloc_skb(size, gfp_mask);
101 if (!skb)
102 return -ENOMEM;
103
104 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh));
105
106 data = NLMSG_DATA(nlh);
107
108 memcpy(data, msg, sizeof(*data) + msg->len);
109
110 NETLINK_CB(skb).dst_group = group;
111
112 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
113
114nlmsg_failure:
115 kfree_skb(skb);
116 return -EINVAL;
117}
118EXPORT_SYMBOL_GPL(cn_netlink_send);
119
120/*
121 * Callback helper - queues work and setup destructor for given data.
122 */
123static int cn_call_callback(struct sk_buff *skb)
124{
125 struct cn_callback_entry *i, *cbq = NULL;
126 struct cn_dev *dev = &cdev;
127 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
128 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
129 int err = -ENODEV;
130
131 spin_lock_bh(&dev->cbdev->queue_lock);
132 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
133 if (cn_cb_equal(&i->id.id, &msg->id)) {
134 atomic_inc(&i->refcnt);
135 cbq = i;
136 break;
137 }
138 }
139 spin_unlock_bh(&dev->cbdev->queue_lock);
140
141 if (cbq != NULL) {
142 err = 0;
143 cbq->callback(msg, nsp);
144 kfree_skb(skb);
145 cn_queue_release_callback(cbq);
146 err = 0;
147 }
148
149 return err;
150}
151
152/*
153 * Main netlink receiving function.
154 *
155 * It checks skb, netlink header and msg sizes, and calls callback helper.
156 */
157static void cn_rx_skb(struct sk_buff *__skb)
158{
159 struct nlmsghdr *nlh;
160 int err;
161 struct sk_buff *skb;
162
163 skb = skb_get(__skb);
164
165 if (skb->len >= NLMSG_SPACE(0)) {
166 nlh = nlmsg_hdr(skb);
167
168 if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
169 skb->len < nlh->nlmsg_len ||
170 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
171 kfree_skb(skb);
172 return;
173 }
174
175 err = cn_call_callback(skb);
176 if (err < 0)
177 kfree_skb(skb);
178 }
179}
180
181/*
182 * Callback add routing - adds callback with given ID and name.
183 * If there is registered callback with the same ID it will not be added.
184 *
185 * May sleep.
186 */
187int cn_add_callback(struct cb_id *id, const char *name,
188 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
189{
190 int err;
191 struct cn_dev *dev = &cdev;
192
193 if (!cn_already_initialized)
194 return -EAGAIN;
195
196 err = cn_queue_add_callback(dev->cbdev, name, id, callback);
197 if (err)
198 return err;
199
200 return 0;
201}
202EXPORT_SYMBOL_GPL(cn_add_callback);
203
204/*
205 * Callback remove routing - removes callback
206 * with given ID.
207 * If there is no registered callback with given
208 * ID nothing happens.
209 *
210 * May sleep while waiting for reference counter to become zero.
211 */
212void cn_del_callback(struct cb_id *id)
213{
214 struct cn_dev *dev = &cdev;
215
216 cn_queue_del_callback(dev->cbdev, id);
217}
218EXPORT_SYMBOL_GPL(cn_del_callback);
219
220static int cn_proc_show(struct seq_file *m, void *v)
221{
222 struct cn_queue_dev *dev = cdev.cbdev;
223 struct cn_callback_entry *cbq;
224
225 seq_printf(m, "Name ID\n");
226
227 spin_lock_bh(&dev->queue_lock);
228
229 list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
230 seq_printf(m, "%-15s %u:%u\n",
231 cbq->id.name,
232 cbq->id.id.idx,
233 cbq->id.id.val);
234 }
235
236 spin_unlock_bh(&dev->queue_lock);
237
238 return 0;
239}
240
241static int cn_proc_open(struct inode *inode, struct file *file)
242{
243 return single_open(file, cn_proc_show, NULL);
244}
245
246static const struct file_operations cn_file_ops = {
247 .owner = THIS_MODULE,
248 .open = cn_proc_open,
249 .read = seq_read,
250 .llseek = seq_lseek,
251 .release = single_release
252};
253
254static int __devinit cn_init(void)
255{
256 struct cn_dev *dev = &cdev;
257
258 dev->input = cn_rx_skb;
259
260 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
261 CN_NETLINK_USERS + 0xf,
262 dev->input, NULL, THIS_MODULE);
263 if (!dev->nls)
264 return -EIO;
265
266 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
267 if (!dev->cbdev) {
268 netlink_kernel_release(dev->nls);
269 return -EINVAL;
270 }
271
272 cn_already_initialized = 1;
273
274 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
275
276 return 0;
277}
278
279static void __devexit cn_fini(void)
280{
281 struct cn_dev *dev = &cdev;
282
283 cn_already_initialized = 0;
284
285 proc_net_remove(&init_net, "connector");
286
287 cn_queue_free_dev(dev->cbdev);
288 netlink_kernel_release(dev->nls);
289}
290
291subsys_initcall(cn_init);
292module_exit(cn_fini);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * connector.c
4 *
5 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
6 * All rights reserved.
7 */
8
9#include <linux/compiler.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/skbuff.h>
14#include <net/netlink.h>
15#include <linux/moduleparam.h>
16#include <linux/connector.h>
17#include <linux/slab.h>
18#include <linux/mutex.h>
19#include <linux/proc_fs.h>
20#include <linux/spinlock.h>
21
22#include <net/sock.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
26MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
27MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
28
29static struct cn_dev cdev;
30
31static int cn_already_initialized;
32
33/*
34 * Sends mult (multiple) cn_msg at a time.
35 *
36 * msg->seq and msg->ack are used to determine message genealogy.
37 * When someone sends message it puts there locally unique sequence
38 * and random acknowledge numbers. Sequence number may be copied into
39 * nlmsghdr->nlmsg_seq too.
40 *
41 * Sequence number is incremented with each message to be sent.
42 *
43 * If we expect a reply to our message then the sequence number in
44 * received message MUST be the same as in original message, and
45 * acknowledge number MUST be the same + 1.
46 *
47 * If we receive a message and its sequence number is not equal to the
48 * one we are expecting then it is a new message.
49 *
50 * If we receive a message and its sequence number is the same as one
51 * we are expecting but it's acknowledgement number is not equal to
52 * the acknowledgement number in the original message + 1, then it is
53 * a new message.
54 *
55 * If msg->len != len, then additional cn_msg messages are expected following
56 * the first msg.
57 *
58 * The message is sent to, the portid if given, the group if given, both if
59 * both, or if both are zero then the group is looked up and sent there.
60 */
61int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
62 gfp_t gfp_mask, netlink_filter_fn filter,
63 void *filter_data)
64{
65 struct cn_callback_entry *__cbq;
66 unsigned int size;
67 struct sk_buff *skb;
68 struct nlmsghdr *nlh;
69 struct cn_msg *data;
70 struct cn_dev *dev = &cdev;
71 u32 group = 0;
72 int found = 0;
73
74 if (portid || __group) {
75 group = __group;
76 } else {
77 spin_lock_bh(&dev->cbdev->queue_lock);
78 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
79 callback_entry) {
80 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
81 found = 1;
82 group = __cbq->group;
83 break;
84 }
85 }
86 spin_unlock_bh(&dev->cbdev->queue_lock);
87
88 if (!found)
89 return -ENODEV;
90 }
91
92 if (!portid && !netlink_has_listeners(dev->nls, group))
93 return -ESRCH;
94
95 size = sizeof(*msg) + len;
96
97 skb = nlmsg_new(size, gfp_mask);
98 if (!skb)
99 return -ENOMEM;
100
101 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
102 if (!nlh) {
103 kfree_skb(skb);
104 return -EMSGSIZE;
105 }
106
107 data = nlmsg_data(nlh);
108
109 memcpy(data, msg, size);
110
111 NETLINK_CB(skb).dst_group = group;
112
113 if (group)
114 return netlink_broadcast_filtered(dev->nls, skb, portid, group,
115 gfp_mask, filter,
116 (void *)filter_data);
117 return netlink_unicast(dev->nls, skb, portid,
118 !gfpflags_allow_blocking(gfp_mask));
119}
120EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
121
122/* same as cn_netlink_send_mult except msg->len is used for len */
123int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
124 gfp_t gfp_mask)
125{
126 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask,
127 NULL, NULL);
128}
129EXPORT_SYMBOL_GPL(cn_netlink_send);
130
131/*
132 * Callback helper - queues work and setup destructor for given data.
133 */
134static int cn_call_callback(struct sk_buff *skb)
135{
136 struct nlmsghdr *nlh;
137 struct cn_callback_entry *i, *cbq = NULL;
138 struct cn_dev *dev = &cdev;
139 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
140 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
141 int err = -ENODEV;
142
143 /* verify msg->len is within skb */
144 nlh = nlmsg_hdr(skb);
145 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
146 return -EINVAL;
147
148 spin_lock_bh(&dev->cbdev->queue_lock);
149 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
150 if (cn_cb_equal(&i->id.id, &msg->id)) {
151 refcount_inc(&i->refcnt);
152 cbq = i;
153 break;
154 }
155 }
156 spin_unlock_bh(&dev->cbdev->queue_lock);
157
158 if (cbq != NULL) {
159 cbq->callback(msg, nsp);
160 kfree_skb(skb);
161 cn_queue_release_callback(cbq);
162 err = 0;
163 }
164
165 return err;
166}
167
168/*
169 * Allow non-root access for NETLINK_CONNECTOR family having CN_IDX_PROC
170 * multicast group.
171 */
172static int cn_bind(struct net *net, int group)
173{
174 unsigned long groups = (unsigned long) group;
175
176 if (ns_capable(net->user_ns, CAP_NET_ADMIN))
177 return 0;
178
179 if (test_bit(CN_IDX_PROC - 1, &groups))
180 return 0;
181
182 return -EPERM;
183}
184
185static void cn_release(struct sock *sk, unsigned long *groups)
186{
187 if (groups && test_bit(CN_IDX_PROC - 1, groups)) {
188 kfree(sk->sk_user_data);
189 sk->sk_user_data = NULL;
190 }
191}
192
193/*
194 * Main netlink receiving function.
195 *
196 * It checks skb, netlink header and msg sizes, and calls callback helper.
197 */
198static void cn_rx_skb(struct sk_buff *skb)
199{
200 struct nlmsghdr *nlh;
201 int len, err;
202
203 if (skb->len >= NLMSG_HDRLEN) {
204 nlh = nlmsg_hdr(skb);
205 len = nlmsg_len(nlh);
206
207 if (len < (int)sizeof(struct cn_msg) ||
208 skb->len < nlh->nlmsg_len ||
209 len > CONNECTOR_MAX_MSG_SIZE)
210 return;
211
212 err = cn_call_callback(skb_get(skb));
213 if (err < 0)
214 kfree_skb(skb);
215 }
216}
217
218/*
219 * Callback add routing - adds callback with given ID and name.
220 * If there is registered callback with the same ID it will not be added.
221 *
222 * May sleep.
223 */
224int cn_add_callback(const struct cb_id *id, const char *name,
225 void (*callback)(struct cn_msg *,
226 struct netlink_skb_parms *))
227{
228 struct cn_dev *dev = &cdev;
229
230 if (!cn_already_initialized)
231 return -EAGAIN;
232
233 return cn_queue_add_callback(dev->cbdev, name, id, callback);
234}
235EXPORT_SYMBOL_GPL(cn_add_callback);
236
237/*
238 * Callback remove routing - removes callback
239 * with given ID.
240 * If there is no registered callback with given
241 * ID nothing happens.
242 *
243 * May sleep while waiting for reference counter to become zero.
244 */
245void cn_del_callback(const struct cb_id *id)
246{
247 struct cn_dev *dev = &cdev;
248
249 cn_queue_del_callback(dev->cbdev, id);
250}
251EXPORT_SYMBOL_GPL(cn_del_callback);
252
253static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
254{
255 struct cn_queue_dev *dev = cdev.cbdev;
256 struct cn_callback_entry *cbq;
257
258 seq_printf(m, "Name ID\n");
259
260 spin_lock_bh(&dev->queue_lock);
261
262 list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
263 seq_printf(m, "%-15s %u:%u\n",
264 cbq->id.name,
265 cbq->id.id.idx,
266 cbq->id.id.val);
267 }
268
269 spin_unlock_bh(&dev->queue_lock);
270
271 return 0;
272}
273
274static int cn_init(void)
275{
276 struct cn_dev *dev = &cdev;
277 struct netlink_kernel_cfg cfg = {
278 .groups = CN_NETLINK_USERS + 0xf,
279 .input = cn_rx_skb,
280 .flags = NL_CFG_F_NONROOT_RECV,
281 .bind = cn_bind,
282 .release = cn_release,
283 };
284
285 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
286 if (!dev->nls)
287 return -EIO;
288
289 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
290 if (!dev->cbdev) {
291 netlink_kernel_release(dev->nls);
292 return -EINVAL;
293 }
294
295 cn_already_initialized = 1;
296
297 proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
298
299 return 0;
300}
301
302static void cn_fini(void)
303{
304 struct cn_dev *dev = &cdev;
305
306 cn_already_initialized = 0;
307
308 remove_proc_entry("connector", init_net.proc_net);
309
310 cn_queue_free_dev(dev->cbdev);
311 netlink_kernel_release(dev->nls);
312}
313
314subsys_initcall(cn_init);
315module_exit(cn_fini);