Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	connector.c
  4 *
  5 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  6 * All rights reserved.
  7 */
  8
  9#include <linux/compiler.h>
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/skbuff.h>
 14#include <net/netlink.h>
 15#include <linux/moduleparam.h>
 16#include <linux/connector.h>
 17#include <linux/slab.h>
 18#include <linux/mutex.h>
 19#include <linux/proc_fs.h>
 20#include <linux/spinlock.h>
 21
 22#include <net/sock.h>
 23
 24MODULE_LICENSE("GPL");
 25MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 26MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
 27MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
 28
 29static struct cn_dev cdev;
 30
 31static int cn_already_initialized;
 32
 33/*
 34 * Sends mult (multiple) cn_msg at a time.
 35 *
 36 * msg->seq and msg->ack are used to determine message genealogy.
 37 * When someone sends message it puts there locally unique sequence
 38 * and random acknowledge numbers.  Sequence number may be copied into
 39 * nlmsghdr->nlmsg_seq too.
 40 *
 41 * Sequence number is incremented with each message to be sent.
 42 *
 43 * If we expect a reply to our message then the sequence number in
 44 * received message MUST be the same as in original message, and
 45 * acknowledge number MUST be the same + 1.
 46 *
 47 * If we receive a message and its sequence number is not equal to the
 48 * one we are expecting then it is a new message.
 49 *
 50 * If we receive a message and its sequence number is the same as one
 51 * we are expecting but it's acknowledgement number is not equal to
 52 * the acknowledgement number in the original message + 1, then it is
 53 * a new message.
 54 *
 55 * If msg->len != len, then additional cn_msg messages are expected following
 56 * the first msg.
 57 *
 58 * The message is sent to, the portid if given, the group if given, both if
 59 * both, or if both are zero then the group is looked up and sent there.
 60 */
 61int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
 62			 gfp_t gfp_mask, netlink_filter_fn filter,
 63			 void *filter_data)
 64{
 65	struct cn_callback_entry *__cbq;
 66	unsigned int size;
 67	struct sk_buff *skb;
 68	struct nlmsghdr *nlh;
 69	struct cn_msg *data;
 70	struct cn_dev *dev = &cdev;
 71	u32 group = 0;
 72	int found = 0;
 73
 74	if (portid || __group) {
 75		group = __group;
 76	} else {
 77		spin_lock_bh(&dev->cbdev->queue_lock);
 78		list_for_each_entry(__cbq, &dev->cbdev->queue_list,
 79				    callback_entry) {
 80			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
 81				found = 1;
 82				group = __cbq->group;
 83				break;
 84			}
 85		}
 86		spin_unlock_bh(&dev->cbdev->queue_lock);
 87
 88		if (!found)
 89			return -ENODEV;
 90	}
 91
 92	if (!portid && !netlink_has_listeners(dev->nls, group))
 93		return -ESRCH;
 94
 95	size = sizeof(*msg) + len;
 96
 97	skb = nlmsg_new(size, gfp_mask);
 98	if (!skb)
 99		return -ENOMEM;
100
101	nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
102	if (!nlh) {
103		kfree_skb(skb);
104		return -EMSGSIZE;
105	}
106
107	data = nlmsg_data(nlh);
108
109	memcpy(data, msg, size);
110
111	NETLINK_CB(skb).dst_group = group;
112
113	if (group)
114		return netlink_broadcast_filtered(dev->nls, skb, portid, group,
115						  gfp_mask, filter,
116						  (void *)filter_data);
117	return netlink_unicast(dev->nls, skb, portid,
118			!gfpflags_allow_blocking(gfp_mask));
119}
120EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
121
122/* same as cn_netlink_send_mult except msg->len is used for len */
123int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
124	gfp_t gfp_mask)
125{
126	return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask,
127				    NULL, NULL);
128}
129EXPORT_SYMBOL_GPL(cn_netlink_send);
130
131/*
132 * Callback helper - queues work and setup destructor for given data.
133 */
134static int cn_call_callback(struct sk_buff *skb)
135{
136	struct nlmsghdr *nlh;
137	struct cn_callback_entry *i, *cbq = NULL;
138	struct cn_dev *dev = &cdev;
139	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
140	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
141	int err = -ENODEV;
142
143	/* verify msg->len is within skb */
144	nlh = nlmsg_hdr(skb);
145	if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
146		return -EINVAL;
147
148	spin_lock_bh(&dev->cbdev->queue_lock);
149	list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
150		if (cn_cb_equal(&i->id.id, &msg->id)) {
151			refcount_inc(&i->refcnt);
152			cbq = i;
153			break;
154		}
155	}
156	spin_unlock_bh(&dev->cbdev->queue_lock);
157
158	if (cbq != NULL) {
159		cbq->callback(msg, nsp);
160		kfree_skb(skb);
161		cn_queue_release_callback(cbq);
162		err = 0;
163	}
164
165	return err;
166}
167
168/*
169 * Allow non-root access for NETLINK_CONNECTOR family having CN_IDX_PROC
170 * multicast group.
171 */
172static int cn_bind(struct net *net, int group)
173{
174	unsigned long groups = (unsigned long) group;
175
176	if (ns_capable(net->user_ns, CAP_NET_ADMIN))
177		return 0;
178
179	if (test_bit(CN_IDX_PROC - 1, &groups))
180		return 0;
181
182	return -EPERM;
183}
184
185static void cn_release(struct sock *sk, unsigned long *groups)
186{
187	if (groups && test_bit(CN_IDX_PROC - 1, groups)) {
188		kfree(sk->sk_user_data);
189		sk->sk_user_data = NULL;
190	}
191}
192
193/*
194 * Main netlink receiving function.
195 *
196 * It checks skb, netlink header and msg sizes, and calls callback helper.
197 */
198static void cn_rx_skb(struct sk_buff *skb)
199{
200	struct nlmsghdr *nlh;
201	int len, err;
202
203	if (skb->len >= NLMSG_HDRLEN) {
204		nlh = nlmsg_hdr(skb);
205		len = nlmsg_len(nlh);
206
207		if (len < (int)sizeof(struct cn_msg) ||
208		    skb->len < nlh->nlmsg_len ||
209		    len > CONNECTOR_MAX_MSG_SIZE)
210			return;
211
212		err = cn_call_callback(skb_get(skb));
213		if (err < 0)
214			kfree_skb(skb);
215	}
216}
217
218/*
219 * Callback add routing - adds callback with given ID and name.
220 * If there is registered callback with the same ID it will not be added.
221 *
222 * May sleep.
223 */
224int cn_add_callback(const struct cb_id *id, const char *name,
225		    void (*callback)(struct cn_msg *,
226				     struct netlink_skb_parms *))
227{
 
228	struct cn_dev *dev = &cdev;
229
230	if (!cn_already_initialized)
231		return -EAGAIN;
232
233	return cn_queue_add_callback(dev->cbdev, name, id, callback);
 
 
 
 
234}
235EXPORT_SYMBOL_GPL(cn_add_callback);
236
237/*
238 * Callback remove routing - removes callback
239 * with given ID.
240 * If there is no registered callback with given
241 * ID nothing happens.
242 *
243 * May sleep while waiting for reference counter to become zero.
244 */
245void cn_del_callback(const struct cb_id *id)
246{
247	struct cn_dev *dev = &cdev;
248
249	cn_queue_del_callback(dev->cbdev, id);
250}
251EXPORT_SYMBOL_GPL(cn_del_callback);
252
253static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
254{
255	struct cn_queue_dev *dev = cdev.cbdev;
256	struct cn_callback_entry *cbq;
257
258	seq_printf(m, "Name            ID\n");
259
260	spin_lock_bh(&dev->queue_lock);
261
262	list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
263		seq_printf(m, "%-15s %u:%u\n",
264			   cbq->id.name,
265			   cbq->id.id.idx,
266			   cbq->id.id.val);
267	}
268
269	spin_unlock_bh(&dev->queue_lock);
270
271	return 0;
272}
273
274static int cn_init(void)
275{
276	struct cn_dev *dev = &cdev;
277	struct netlink_kernel_cfg cfg = {
278		.groups	= CN_NETLINK_USERS + 0xf,
279		.input	= cn_rx_skb,
280		.flags  = NL_CFG_F_NONROOT_RECV,
281		.bind   = cn_bind,
282		.release = cn_release,
283	};
284
285	dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
286	if (!dev->nls)
287		return -EIO;
288
289	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
290	if (!dev->cbdev) {
291		netlink_kernel_release(dev->nls);
292		return -EINVAL;
293	}
294
295	cn_already_initialized = 1;
296
297	proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
298
299	return 0;
300}
301
302static void cn_fini(void)
303{
304	struct cn_dev *dev = &cdev;
305
306	cn_already_initialized = 0;
307
308	remove_proc_entry("connector", init_net.proc_net);
309
310	cn_queue_free_dev(dev->cbdev);
311	netlink_kernel_release(dev->nls);
312}
313
314subsys_initcall(cn_init);
315module_exit(cn_fini);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	connector.c
  4 *
  5 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  6 * All rights reserved.
  7 */
  8
  9#include <linux/compiler.h>
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/skbuff.h>
 14#include <net/netlink.h>
 15#include <linux/moduleparam.h>
 16#include <linux/connector.h>
 17#include <linux/slab.h>
 18#include <linux/mutex.h>
 19#include <linux/proc_fs.h>
 20#include <linux/spinlock.h>
 21
 22#include <net/sock.h>
 23
 24MODULE_LICENSE("GPL");
 25MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 26MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
 27MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
 28
 29static struct cn_dev cdev;
 30
 31static int cn_already_initialized;
 32
 33/*
 34 * Sends mult (multiple) cn_msg at a time.
 35 *
 36 * msg->seq and msg->ack are used to determine message genealogy.
 37 * When someone sends message it puts there locally unique sequence
 38 * and random acknowledge numbers.  Sequence number may be copied into
 39 * nlmsghdr->nlmsg_seq too.
 40 *
 41 * Sequence number is incremented with each message to be sent.
 42 *
 43 * If we expect a reply to our message then the sequence number in
 44 * received message MUST be the same as in original message, and
 45 * acknowledge number MUST be the same + 1.
 46 *
 47 * If we receive a message and its sequence number is not equal to the
 48 * one we are expecting then it is a new message.
 49 *
 50 * If we receive a message and its sequence number is the same as one
 51 * we are expecting but it's acknowledgement number is not equal to
 52 * the acknowledgement number in the original message + 1, then it is
 53 * a new message.
 54 *
 55 * If msg->len != len, then additional cn_msg messages are expected following
 56 * the first msg.
 57 *
 58 * The message is sent to, the portid if given, the group if given, both if
 59 * both, or if both are zero then the group is looked up and sent there.
 60 */
 61int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
 62	gfp_t gfp_mask)
 
 63{
 64	struct cn_callback_entry *__cbq;
 65	unsigned int size;
 66	struct sk_buff *skb;
 67	struct nlmsghdr *nlh;
 68	struct cn_msg *data;
 69	struct cn_dev *dev = &cdev;
 70	u32 group = 0;
 71	int found = 0;
 72
 73	if (portid || __group) {
 74		group = __group;
 75	} else {
 76		spin_lock_bh(&dev->cbdev->queue_lock);
 77		list_for_each_entry(__cbq, &dev->cbdev->queue_list,
 78				    callback_entry) {
 79			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
 80				found = 1;
 81				group = __cbq->group;
 82				break;
 83			}
 84		}
 85		spin_unlock_bh(&dev->cbdev->queue_lock);
 86
 87		if (!found)
 88			return -ENODEV;
 89	}
 90
 91	if (!portid && !netlink_has_listeners(dev->nls, group))
 92		return -ESRCH;
 93
 94	size = sizeof(*msg) + len;
 95
 96	skb = nlmsg_new(size, gfp_mask);
 97	if (!skb)
 98		return -ENOMEM;
 99
100	nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
101	if (!nlh) {
102		kfree_skb(skb);
103		return -EMSGSIZE;
104	}
105
106	data = nlmsg_data(nlh);
107
108	memcpy(data, msg, size);
109
110	NETLINK_CB(skb).dst_group = group;
111
112	if (group)
113		return netlink_broadcast(dev->nls, skb, portid, group,
114					 gfp_mask);
 
115	return netlink_unicast(dev->nls, skb, portid,
116			!gfpflags_allow_blocking(gfp_mask));
117}
118EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
119
120/* same as cn_netlink_send_mult except msg->len is used for len */
121int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
122	gfp_t gfp_mask)
123{
124	return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
 
125}
126EXPORT_SYMBOL_GPL(cn_netlink_send);
127
128/*
129 * Callback helper - queues work and setup destructor for given data.
130 */
131static int cn_call_callback(struct sk_buff *skb)
132{
133	struct nlmsghdr *nlh;
134	struct cn_callback_entry *i, *cbq = NULL;
135	struct cn_dev *dev = &cdev;
136	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
137	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
138	int err = -ENODEV;
139
140	/* verify msg->len is within skb */
141	nlh = nlmsg_hdr(skb);
142	if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
143		return -EINVAL;
144
145	spin_lock_bh(&dev->cbdev->queue_lock);
146	list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
147		if (cn_cb_equal(&i->id.id, &msg->id)) {
148			refcount_inc(&i->refcnt);
149			cbq = i;
150			break;
151		}
152	}
153	spin_unlock_bh(&dev->cbdev->queue_lock);
154
155	if (cbq != NULL) {
156		cbq->callback(msg, nsp);
157		kfree_skb(skb);
158		cn_queue_release_callback(cbq);
159		err = 0;
160	}
161
162	return err;
163}
164
165/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166 * Main netlink receiving function.
167 *
168 * It checks skb, netlink header and msg sizes, and calls callback helper.
169 */
170static void cn_rx_skb(struct sk_buff *skb)
171{
172	struct nlmsghdr *nlh;
173	int len, err;
174
175	if (skb->len >= NLMSG_HDRLEN) {
176		nlh = nlmsg_hdr(skb);
177		len = nlmsg_len(nlh);
178
179		if (len < (int)sizeof(struct cn_msg) ||
180		    skb->len < nlh->nlmsg_len ||
181		    len > CONNECTOR_MAX_MSG_SIZE)
182			return;
183
184		err = cn_call_callback(skb_get(skb));
185		if (err < 0)
186			kfree_skb(skb);
187	}
188}
189
190/*
191 * Callback add routing - adds callback with given ID and name.
192 * If there is registered callback with the same ID it will not be added.
193 *
194 * May sleep.
195 */
196int cn_add_callback(struct cb_id *id, const char *name,
197		    void (*callback)(struct cn_msg *,
198				     struct netlink_skb_parms *))
199{
200	int err;
201	struct cn_dev *dev = &cdev;
202
203	if (!cn_already_initialized)
204		return -EAGAIN;
205
206	err = cn_queue_add_callback(dev->cbdev, name, id, callback);
207	if (err)
208		return err;
209
210	return 0;
211}
212EXPORT_SYMBOL_GPL(cn_add_callback);
213
214/*
215 * Callback remove routing - removes callback
216 * with given ID.
217 * If there is no registered callback with given
218 * ID nothing happens.
219 *
220 * May sleep while waiting for reference counter to become zero.
221 */
222void cn_del_callback(struct cb_id *id)
223{
224	struct cn_dev *dev = &cdev;
225
226	cn_queue_del_callback(dev->cbdev, id);
227}
228EXPORT_SYMBOL_GPL(cn_del_callback);
229
230static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
231{
232	struct cn_queue_dev *dev = cdev.cbdev;
233	struct cn_callback_entry *cbq;
234
235	seq_printf(m, "Name            ID\n");
236
237	spin_lock_bh(&dev->queue_lock);
238
239	list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
240		seq_printf(m, "%-15s %u:%u\n",
241			   cbq->id.name,
242			   cbq->id.id.idx,
243			   cbq->id.id.val);
244	}
245
246	spin_unlock_bh(&dev->queue_lock);
247
248	return 0;
249}
250
251static int cn_init(void)
252{
253	struct cn_dev *dev = &cdev;
254	struct netlink_kernel_cfg cfg = {
255		.groups	= CN_NETLINK_USERS + 0xf,
256		.input	= cn_rx_skb,
 
 
 
257	};
258
259	dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
260	if (!dev->nls)
261		return -EIO;
262
263	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
264	if (!dev->cbdev) {
265		netlink_kernel_release(dev->nls);
266		return -EINVAL;
267	}
268
269	cn_already_initialized = 1;
270
271	proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
272
273	return 0;
274}
275
276static void cn_fini(void)
277{
278	struct cn_dev *dev = &cdev;
279
280	cn_already_initialized = 0;
281
282	remove_proc_entry("connector", init_net.proc_net);
283
284	cn_queue_free_dev(dev->cbdev);
285	netlink_kernel_release(dev->nls);
286}
287
288subsys_initcall(cn_init);
289module_exit(cn_fini);