Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022, STMicroelectronics
4 * Copyright (c) 2016, Linaro Ltd.
5 * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012, PetaLogix
7 * Copyright (c) 2011, Texas Instruments, Inc.
8 * Copyright (c) 2011, Google, Inc.
9 *
10 * Based on rpmsg performance statistics driver by Michal Simek, which in turn
11 * was based on TI & Google OMX rpmsg driver.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/fs.h>
19#include <linux/idr.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/poll.h>
23#include <linux/rpmsg.h>
24#include <linux/skbuff.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <uapi/linux/rpmsg.h>
28
29#include "rpmsg_char.h"
30#include "rpmsg_internal.h"
31
32#define RPMSG_DEV_MAX (MINORMASK + 1)
33
34static dev_t rpmsg_major;
35
36static DEFINE_IDA(rpmsg_ept_ida);
37static DEFINE_IDA(rpmsg_minor_ida);
38
39#define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
40#define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
41
42/**
43 * struct rpmsg_eptdev - endpoint device context
44 * @dev: endpoint device
45 * @cdev: cdev for the endpoint device
46 * @rpdev: underlaying rpmsg device
47 * @chinfo: info used to open the endpoint
48 * @ept_lock: synchronization of @ept modifications
49 * @ept: rpmsg endpoint reference, when open
50 * @queue_lock: synchronization of @queue operations
51 * @queue: incoming message queue
52 * @readq: wait object for incoming queue
53 * @default_ept: set to channel default endpoint if the default endpoint should be re-used
54 * on device open to prevent endpoint address update.
55 * remote_flow_restricted: to indicate if the remote has requested for flow to be limited
56 * remote_flow_updated: to indicate if the flow control has been requested
57 */
58struct rpmsg_eptdev {
59 struct device dev;
60 struct cdev cdev;
61
62 struct rpmsg_device *rpdev;
63 struct rpmsg_channel_info chinfo;
64
65 struct mutex ept_lock;
66 struct rpmsg_endpoint *ept;
67 struct rpmsg_endpoint *default_ept;
68
69 spinlock_t queue_lock;
70 struct sk_buff_head queue;
71 wait_queue_head_t readq;
72
73 bool remote_flow_restricted;
74 bool remote_flow_updated;
75};
76
77int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
78{
79 struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
80
81 mutex_lock(&eptdev->ept_lock);
82 eptdev->rpdev = NULL;
83 if (eptdev->ept) {
84 /* The default endpoint is released by the rpmsg core */
85 if (!eptdev->default_ept)
86 rpmsg_destroy_ept(eptdev->ept);
87 eptdev->ept = NULL;
88 }
89 mutex_unlock(&eptdev->ept_lock);
90
91 /* wake up any blocked readers */
92 wake_up_interruptible(&eptdev->readq);
93
94 cdev_device_del(&eptdev->cdev, &eptdev->dev);
95 put_device(&eptdev->dev);
96
97 return 0;
98}
99EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
100
101static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
102 void *priv, u32 addr)
103{
104 struct rpmsg_eptdev *eptdev = priv;
105 struct sk_buff *skb;
106
107 skb = alloc_skb(len, GFP_ATOMIC);
108 if (!skb)
109 return -ENOMEM;
110
111 skb_put_data(skb, buf, len);
112
113 spin_lock(&eptdev->queue_lock);
114 skb_queue_tail(&eptdev->queue, skb);
115 spin_unlock(&eptdev->queue_lock);
116
117 /* wake up any blocking processes, waiting for new data */
118 wake_up_interruptible(&eptdev->readq);
119
120 return 0;
121}
122
123static int rpmsg_ept_flow_cb(struct rpmsg_device *rpdev, void *priv, bool enable)
124{
125 struct rpmsg_eptdev *eptdev = priv;
126
127 eptdev->remote_flow_restricted = enable;
128 eptdev->remote_flow_updated = true;
129
130 wake_up_interruptible(&eptdev->readq);
131
132 return 0;
133}
134
135static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
136{
137 struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
138 struct rpmsg_endpoint *ept;
139 struct rpmsg_device *rpdev = eptdev->rpdev;
140 struct device *dev = &eptdev->dev;
141
142 mutex_lock(&eptdev->ept_lock);
143 if (eptdev->ept) {
144 mutex_unlock(&eptdev->ept_lock);
145 return -EBUSY;
146 }
147
148 if (!eptdev->rpdev) {
149 mutex_unlock(&eptdev->ept_lock);
150 return -ENETRESET;
151 }
152
153 get_device(dev);
154
155 /*
156 * If the default_ept is set, the rpmsg device default endpoint is used.
157 * Else a new endpoint is created on open that will be destroyed on release.
158 */
159 if (eptdev->default_ept)
160 ept = eptdev->default_ept;
161 else
162 ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
163
164 if (!ept) {
165 dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
166 put_device(dev);
167 mutex_unlock(&eptdev->ept_lock);
168 return -EINVAL;
169 }
170
171 ept->flow_cb = rpmsg_ept_flow_cb;
172 eptdev->ept = ept;
173 filp->private_data = eptdev;
174 mutex_unlock(&eptdev->ept_lock);
175
176 return 0;
177}
178
179static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
180{
181 struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
182 struct device *dev = &eptdev->dev;
183
184 /* Close the endpoint, if it's not already destroyed by the parent */
185 mutex_lock(&eptdev->ept_lock);
186 if (eptdev->ept) {
187 if (!eptdev->default_ept)
188 rpmsg_destroy_ept(eptdev->ept);
189 eptdev->ept = NULL;
190 }
191 mutex_unlock(&eptdev->ept_lock);
192 eptdev->remote_flow_updated = false;
193
194 /* Discard all SKBs */
195 skb_queue_purge(&eptdev->queue);
196
197 put_device(dev);
198
199 return 0;
200}
201
202static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
203{
204 struct file *filp = iocb->ki_filp;
205 struct rpmsg_eptdev *eptdev = filp->private_data;
206 unsigned long flags;
207 struct sk_buff *skb;
208 int use;
209
210 if (!eptdev->ept)
211 return -EPIPE;
212
213 spin_lock_irqsave(&eptdev->queue_lock, flags);
214
215 /* Wait for data in the queue */
216 if (skb_queue_empty(&eptdev->queue)) {
217 spin_unlock_irqrestore(&eptdev->queue_lock, flags);
218
219 if (filp->f_flags & O_NONBLOCK)
220 return -EAGAIN;
221
222 /* Wait until we get data or the endpoint goes away */
223 if (wait_event_interruptible(eptdev->readq,
224 !skb_queue_empty(&eptdev->queue) ||
225 !eptdev->ept))
226 return -ERESTARTSYS;
227
228 /* We lost the endpoint while waiting */
229 if (!eptdev->ept)
230 return -EPIPE;
231
232 spin_lock_irqsave(&eptdev->queue_lock, flags);
233 }
234
235 skb = skb_dequeue(&eptdev->queue);
236 spin_unlock_irqrestore(&eptdev->queue_lock, flags);
237 if (!skb)
238 return -EFAULT;
239
240 use = min_t(size_t, iov_iter_count(to), skb->len);
241 if (copy_to_iter(skb->data, use, to) != use)
242 use = -EFAULT;
243
244 kfree_skb(skb);
245
246 return use;
247}
248
249static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
250 struct iov_iter *from)
251{
252 struct file *filp = iocb->ki_filp;
253 struct rpmsg_eptdev *eptdev = filp->private_data;
254 size_t len = iov_iter_count(from);
255 void *kbuf;
256 int ret;
257
258 kbuf = kzalloc(len, GFP_KERNEL);
259 if (!kbuf)
260 return -ENOMEM;
261
262 if (!copy_from_iter_full(kbuf, len, from)) {
263 ret = -EFAULT;
264 goto free_kbuf;
265 }
266
267 if (mutex_lock_interruptible(&eptdev->ept_lock)) {
268 ret = -ERESTARTSYS;
269 goto free_kbuf;
270 }
271
272 if (!eptdev->ept) {
273 ret = -EPIPE;
274 goto unlock_eptdev;
275 }
276
277 if (filp->f_flags & O_NONBLOCK) {
278 ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
279 if (ret == -ENOMEM)
280 ret = -EAGAIN;
281 } else {
282 ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
283 }
284
285unlock_eptdev:
286 mutex_unlock(&eptdev->ept_lock);
287
288free_kbuf:
289 kfree(kbuf);
290 return ret < 0 ? ret : len;
291}
292
293static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
294{
295 struct rpmsg_eptdev *eptdev = filp->private_data;
296 __poll_t mask = 0;
297
298 if (!eptdev->ept)
299 return EPOLLERR;
300
301 poll_wait(filp, &eptdev->readq, wait);
302
303 if (!skb_queue_empty(&eptdev->queue))
304 mask |= EPOLLIN | EPOLLRDNORM;
305
306 if (eptdev->remote_flow_updated)
307 mask |= EPOLLPRI;
308
309 mutex_lock(&eptdev->ept_lock);
310 mask |= rpmsg_poll(eptdev->ept, filp, wait);
311 mutex_unlock(&eptdev->ept_lock);
312
313 return mask;
314}
315
316static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
317 unsigned long arg)
318{
319 struct rpmsg_eptdev *eptdev = fp->private_data;
320
321 bool set;
322 int ret;
323
324 switch (cmd) {
325 case RPMSG_GET_OUTGOING_FLOWCONTROL:
326 eptdev->remote_flow_updated = false;
327 ret = put_user(eptdev->remote_flow_restricted, (int __user *)arg);
328 break;
329 case RPMSG_SET_INCOMING_FLOWCONTROL:
330 if (arg > 1) {
331 ret = -EINVAL;
332 break;
333 }
334 set = !!arg;
335 ret = rpmsg_set_flow_control(eptdev->ept, set, eptdev->chinfo.dst);
336 break;
337 case RPMSG_DESTROY_EPT_IOCTL:
338 /* Don't allow to destroy a default endpoint. */
339 if (eptdev->default_ept) {
340 ret = -EINVAL;
341 break;
342 }
343 ret = rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
344 break;
345 default:
346 ret = -EINVAL;
347 }
348
349 return ret;
350}
351
352static const struct file_operations rpmsg_eptdev_fops = {
353 .owner = THIS_MODULE,
354 .open = rpmsg_eptdev_open,
355 .release = rpmsg_eptdev_release,
356 .read_iter = rpmsg_eptdev_read_iter,
357 .write_iter = rpmsg_eptdev_write_iter,
358 .poll = rpmsg_eptdev_poll,
359 .unlocked_ioctl = rpmsg_eptdev_ioctl,
360 .compat_ioctl = compat_ptr_ioctl,
361};
362
363static ssize_t name_show(struct device *dev, struct device_attribute *attr,
364 char *buf)
365{
366 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
367
368 return sprintf(buf, "%s\n", eptdev->chinfo.name);
369}
370static DEVICE_ATTR_RO(name);
371
372static ssize_t src_show(struct device *dev, struct device_attribute *attr,
373 char *buf)
374{
375 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
376
377 return sprintf(buf, "%d\n", eptdev->chinfo.src);
378}
379static DEVICE_ATTR_RO(src);
380
381static ssize_t dst_show(struct device *dev, struct device_attribute *attr,
382 char *buf)
383{
384 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
385
386 return sprintf(buf, "%d\n", eptdev->chinfo.dst);
387}
388static DEVICE_ATTR_RO(dst);
389
390static struct attribute *rpmsg_eptdev_attrs[] = {
391 &dev_attr_name.attr,
392 &dev_attr_src.attr,
393 &dev_attr_dst.attr,
394 NULL
395};
396ATTRIBUTE_GROUPS(rpmsg_eptdev);
397
398static void rpmsg_eptdev_release_device(struct device *dev)
399{
400 struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
401
402 ida_simple_remove(&rpmsg_ept_ida, dev->id);
403 ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
404 kfree(eptdev);
405}
406
407static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
408 struct device *parent)
409{
410 struct rpmsg_eptdev *eptdev;
411 struct device *dev;
412
413 eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
414 if (!eptdev)
415 return ERR_PTR(-ENOMEM);
416
417 dev = &eptdev->dev;
418 eptdev->rpdev = rpdev;
419
420 mutex_init(&eptdev->ept_lock);
421 spin_lock_init(&eptdev->queue_lock);
422 skb_queue_head_init(&eptdev->queue);
423 init_waitqueue_head(&eptdev->readq);
424
425 device_initialize(dev);
426 dev->class = rpmsg_class;
427 dev->parent = parent;
428 dev->groups = rpmsg_eptdev_groups;
429 dev_set_drvdata(dev, eptdev);
430
431 cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
432 eptdev->cdev.owner = THIS_MODULE;
433
434 return eptdev;
435}
436
437static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
438{
439 struct device *dev = &eptdev->dev;
440 int ret;
441
442 eptdev->chinfo = chinfo;
443
444 ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
445 if (ret < 0)
446 goto free_eptdev;
447 dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
448
449 ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
450 if (ret < 0)
451 goto free_minor_ida;
452 dev->id = ret;
453 dev_set_name(dev, "rpmsg%d", ret);
454
455 ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
456 if (ret)
457 goto free_ept_ida;
458
459 /* We can now rely on the release function for cleanup */
460 dev->release = rpmsg_eptdev_release_device;
461
462 return ret;
463
464free_ept_ida:
465 ida_simple_remove(&rpmsg_ept_ida, dev->id);
466free_minor_ida:
467 ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
468free_eptdev:
469 put_device(dev);
470 kfree(eptdev);
471
472 return ret;
473}
474
475int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
476 struct rpmsg_channel_info chinfo)
477{
478 struct rpmsg_eptdev *eptdev;
479
480 eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
481 if (IS_ERR(eptdev))
482 return PTR_ERR(eptdev);
483
484 return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
485}
486EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
487
488static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
489{
490 struct rpmsg_channel_info chinfo;
491 struct rpmsg_eptdev *eptdev;
492 struct device *dev = &rpdev->dev;
493
494 memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
495 chinfo.src = rpdev->src;
496 chinfo.dst = rpdev->dst;
497
498 eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
499 if (IS_ERR(eptdev))
500 return PTR_ERR(eptdev);
501
502 /* Set the default_ept to the rpmsg device endpoint */
503 eptdev->default_ept = rpdev->ept;
504
505 /*
506 * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
507 * Storedit in default_ept *priv field.
508 */
509 eptdev->default_ept->priv = eptdev;
510
511 return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
512}
513
514static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
515{
516 int ret;
517
518 ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
519 if (ret)
520 dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
521}
522
523static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
524 { .name = "rpmsg-raw" },
525 { },
526};
527
528static struct rpmsg_driver rpmsg_chrdev_driver = {
529 .probe = rpmsg_chrdev_probe,
530 .remove = rpmsg_chrdev_remove,
531 .callback = rpmsg_ept_cb,
532 .id_table = rpmsg_chrdev_id_table,
533 .drv.name = "rpmsg_chrdev",
534};
535
536static int rpmsg_chrdev_init(void)
537{
538 int ret;
539
540 ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
541 if (ret < 0) {
542 pr_err("failed to allocate char dev region\n");
543 return ret;
544 }
545
546 ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
547 if (ret < 0) {
548 pr_err("rpmsg: failed to register rpmsg raw driver\n");
549 goto free_region;
550 }
551
552 return 0;
553
554free_region:
555 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
556
557 return ret;
558}
559postcore_initcall(rpmsg_chrdev_init);
560
561static void rpmsg_chrdev_exit(void)
562{
563 unregister_rpmsg_driver(&rpmsg_chrdev_driver);
564 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
565}
566module_exit(rpmsg_chrdev_exit);
567
568MODULE_ALIAS("rpmsg:rpmsg_chrdev");
569MODULE_LICENSE("GPL v2");
1/*
2 * Copyright (c) 2016, Linaro Ltd.
3 * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
4 * Copyright (c) 2012, PetaLogix
5 * Copyright (c) 2011, Texas Instruments, Inc.
6 * Copyright (c) 2011, Google, Inc.
7 *
8 * Based on rpmsg performance statistics driver by Michal Simek, which in turn
9 * was based on TI & Google OMX rpmsg driver.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 and
13 * only version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20#include <linux/cdev.h>
21#include <linux/device.h>
22#include <linux/fs.h>
23#include <linux/idr.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/poll.h>
27#include <linux/rpmsg.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/uaccess.h>
31#include <uapi/linux/rpmsg.h>
32
33#include "rpmsg_internal.h"
34
35#define RPMSG_DEV_MAX (MINORMASK + 1)
36
37static dev_t rpmsg_major;
38static struct class *rpmsg_class;
39
40static DEFINE_IDA(rpmsg_ctrl_ida);
41static DEFINE_IDA(rpmsg_ept_ida);
42static DEFINE_IDA(rpmsg_minor_ida);
43
44#define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
45#define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
46
47#define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev)
48#define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev)
49
50/**
51 * struct rpmsg_ctrldev - control device for instantiating endpoint devices
52 * @rpdev: underlaying rpmsg device
53 * @cdev: cdev for the ctrl device
54 * @dev: device for the ctrl device
55 */
56struct rpmsg_ctrldev {
57 struct rpmsg_device *rpdev;
58 struct cdev cdev;
59 struct device dev;
60};
61
62/**
63 * struct rpmsg_eptdev - endpoint device context
64 * @dev: endpoint device
65 * @cdev: cdev for the endpoint device
66 * @rpdev: underlaying rpmsg device
67 * @chinfo: info used to open the endpoint
68 * @ept_lock: synchronization of @ept modifications
69 * @ept: rpmsg endpoint reference, when open
70 * @queue_lock: synchronization of @queue operations
71 * @queue: incoming message queue
72 * @readq: wait object for incoming queue
73 */
74struct rpmsg_eptdev {
75 struct device dev;
76 struct cdev cdev;
77
78 struct rpmsg_device *rpdev;
79 struct rpmsg_channel_info chinfo;
80
81 struct mutex ept_lock;
82 struct rpmsg_endpoint *ept;
83
84 spinlock_t queue_lock;
85 struct sk_buff_head queue;
86 wait_queue_head_t readq;
87};
88
89static int rpmsg_eptdev_destroy(struct device *dev, void *data)
90{
91 struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
92
93 mutex_lock(&eptdev->ept_lock);
94 if (eptdev->ept) {
95 rpmsg_destroy_ept(eptdev->ept);
96 eptdev->ept = NULL;
97 }
98 mutex_unlock(&eptdev->ept_lock);
99
100 /* wake up any blocked readers */
101 wake_up_interruptible(&eptdev->readq);
102
103 device_del(&eptdev->dev);
104 put_device(&eptdev->dev);
105
106 return 0;
107}
108
109static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
110 void *priv, u32 addr)
111{
112 struct rpmsg_eptdev *eptdev = priv;
113 struct sk_buff *skb;
114
115 skb = alloc_skb(len, GFP_ATOMIC);
116 if (!skb)
117 return -ENOMEM;
118
119 skb_put_data(skb, buf, len);
120
121 spin_lock(&eptdev->queue_lock);
122 skb_queue_tail(&eptdev->queue, skb);
123 spin_unlock(&eptdev->queue_lock);
124
125 /* wake up any blocking processes, waiting for new data */
126 wake_up_interruptible(&eptdev->readq);
127
128 return 0;
129}
130
131static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
132{
133 struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
134 struct rpmsg_endpoint *ept;
135 struct rpmsg_device *rpdev = eptdev->rpdev;
136 struct device *dev = &eptdev->dev;
137
138 get_device(dev);
139
140 ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
141 if (!ept) {
142 dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
143 put_device(dev);
144 return -EINVAL;
145 }
146
147 eptdev->ept = ept;
148 filp->private_data = eptdev;
149
150 return 0;
151}
152
153static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
154{
155 struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
156 struct device *dev = &eptdev->dev;
157 struct sk_buff *skb;
158
159 /* Close the endpoint, if it's not already destroyed by the parent */
160 mutex_lock(&eptdev->ept_lock);
161 if (eptdev->ept) {
162 rpmsg_destroy_ept(eptdev->ept);
163 eptdev->ept = NULL;
164 }
165 mutex_unlock(&eptdev->ept_lock);
166
167 /* Discard all SKBs */
168 while (!skb_queue_empty(&eptdev->queue)) {
169 skb = skb_dequeue(&eptdev->queue);
170 kfree_skb(skb);
171 }
172
173 put_device(dev);
174
175 return 0;
176}
177
178static ssize_t rpmsg_eptdev_read(struct file *filp, char __user *buf,
179 size_t len, loff_t *f_pos)
180{
181 struct rpmsg_eptdev *eptdev = filp->private_data;
182 unsigned long flags;
183 struct sk_buff *skb;
184 int use;
185
186 if (!eptdev->ept)
187 return -EPIPE;
188
189 spin_lock_irqsave(&eptdev->queue_lock, flags);
190
191 /* Wait for data in the queue */
192 if (skb_queue_empty(&eptdev->queue)) {
193 spin_unlock_irqrestore(&eptdev->queue_lock, flags);
194
195 if (filp->f_flags & O_NONBLOCK)
196 return -EAGAIN;
197
198 /* Wait until we get data or the endpoint goes away */
199 if (wait_event_interruptible(eptdev->readq,
200 !skb_queue_empty(&eptdev->queue) ||
201 !eptdev->ept))
202 return -ERESTARTSYS;
203
204 /* We lost the endpoint while waiting */
205 if (!eptdev->ept)
206 return -EPIPE;
207
208 spin_lock_irqsave(&eptdev->queue_lock, flags);
209 }
210
211 skb = skb_dequeue(&eptdev->queue);
212 spin_unlock_irqrestore(&eptdev->queue_lock, flags);
213 if (!skb)
214 return -EFAULT;
215
216 use = min_t(size_t, len, skb->len);
217 if (copy_to_user(buf, skb->data, use))
218 use = -EFAULT;
219
220 kfree_skb(skb);
221
222 return use;
223}
224
225static ssize_t rpmsg_eptdev_write(struct file *filp, const char __user *buf,
226 size_t len, loff_t *f_pos)
227{
228 struct rpmsg_eptdev *eptdev = filp->private_data;
229 void *kbuf;
230 int ret;
231
232 kbuf = memdup_user(buf, len);
233 if (IS_ERR(kbuf))
234 return PTR_ERR(kbuf);
235
236 if (mutex_lock_interruptible(&eptdev->ept_lock)) {
237 ret = -ERESTARTSYS;
238 goto free_kbuf;
239 }
240
241 if (!eptdev->ept) {
242 ret = -EPIPE;
243 goto unlock_eptdev;
244 }
245
246 if (filp->f_flags & O_NONBLOCK)
247 ret = rpmsg_trysend(eptdev->ept, kbuf, len);
248 else
249 ret = rpmsg_send(eptdev->ept, kbuf, len);
250
251unlock_eptdev:
252 mutex_unlock(&eptdev->ept_lock);
253
254free_kbuf:
255 kfree(kbuf);
256 return ret < 0 ? ret : len;
257}
258
259static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
260{
261 struct rpmsg_eptdev *eptdev = filp->private_data;
262 __poll_t mask = 0;
263
264 if (!eptdev->ept)
265 return EPOLLERR;
266
267 poll_wait(filp, &eptdev->readq, wait);
268
269 if (!skb_queue_empty(&eptdev->queue))
270 mask |= EPOLLIN | EPOLLRDNORM;
271
272 mask |= rpmsg_poll(eptdev->ept, filp, wait);
273
274 return mask;
275}
276
277static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
278 unsigned long arg)
279{
280 struct rpmsg_eptdev *eptdev = fp->private_data;
281
282 if (cmd != RPMSG_DESTROY_EPT_IOCTL)
283 return -EINVAL;
284
285 return rpmsg_eptdev_destroy(&eptdev->dev, NULL);
286}
287
288static const struct file_operations rpmsg_eptdev_fops = {
289 .owner = THIS_MODULE,
290 .open = rpmsg_eptdev_open,
291 .release = rpmsg_eptdev_release,
292 .read = rpmsg_eptdev_read,
293 .write = rpmsg_eptdev_write,
294 .poll = rpmsg_eptdev_poll,
295 .unlocked_ioctl = rpmsg_eptdev_ioctl,
296};
297
298static ssize_t name_show(struct device *dev, struct device_attribute *attr,
299 char *buf)
300{
301 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
302
303 return sprintf(buf, "%s\n", eptdev->chinfo.name);
304}
305static DEVICE_ATTR_RO(name);
306
307static ssize_t src_show(struct device *dev, struct device_attribute *attr,
308 char *buf)
309{
310 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
311
312 return sprintf(buf, "%d\n", eptdev->chinfo.src);
313}
314static DEVICE_ATTR_RO(src);
315
316static ssize_t dst_show(struct device *dev, struct device_attribute *attr,
317 char *buf)
318{
319 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
320
321 return sprintf(buf, "%d\n", eptdev->chinfo.dst);
322}
323static DEVICE_ATTR_RO(dst);
324
325static struct attribute *rpmsg_eptdev_attrs[] = {
326 &dev_attr_name.attr,
327 &dev_attr_src.attr,
328 &dev_attr_dst.attr,
329 NULL
330};
331ATTRIBUTE_GROUPS(rpmsg_eptdev);
332
333static void rpmsg_eptdev_release_device(struct device *dev)
334{
335 struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
336
337 ida_simple_remove(&rpmsg_ept_ida, dev->id);
338 ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
339 cdev_del(&eptdev->cdev);
340 kfree(eptdev);
341}
342
343static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
344 struct rpmsg_channel_info chinfo)
345{
346 struct rpmsg_device *rpdev = ctrldev->rpdev;
347 struct rpmsg_eptdev *eptdev;
348 struct device *dev;
349 int ret;
350
351 eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
352 if (!eptdev)
353 return -ENOMEM;
354
355 dev = &eptdev->dev;
356 eptdev->rpdev = rpdev;
357 eptdev->chinfo = chinfo;
358
359 mutex_init(&eptdev->ept_lock);
360 spin_lock_init(&eptdev->queue_lock);
361 skb_queue_head_init(&eptdev->queue);
362 init_waitqueue_head(&eptdev->readq);
363
364 device_initialize(dev);
365 dev->class = rpmsg_class;
366 dev->parent = &ctrldev->dev;
367 dev->groups = rpmsg_eptdev_groups;
368 dev_set_drvdata(dev, eptdev);
369
370 cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
371 eptdev->cdev.owner = THIS_MODULE;
372
373 ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
374 if (ret < 0)
375 goto free_eptdev;
376 dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
377
378 ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
379 if (ret < 0)
380 goto free_minor_ida;
381 dev->id = ret;
382 dev_set_name(dev, "rpmsg%d", ret);
383
384 ret = cdev_add(&eptdev->cdev, dev->devt, 1);
385 if (ret)
386 goto free_ept_ida;
387
388 /* We can now rely on the release function for cleanup */
389 dev->release = rpmsg_eptdev_release_device;
390
391 ret = device_add(dev);
392 if (ret) {
393 dev_err(dev, "device_add failed: %d\n", ret);
394 put_device(dev);
395 }
396
397 return ret;
398
399free_ept_ida:
400 ida_simple_remove(&rpmsg_ept_ida, dev->id);
401free_minor_ida:
402 ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
403free_eptdev:
404 put_device(dev);
405 kfree(eptdev);
406
407 return ret;
408}
409
410static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp)
411{
412 struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
413
414 get_device(&ctrldev->dev);
415 filp->private_data = ctrldev;
416
417 return 0;
418}
419
420static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp)
421{
422 struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
423
424 put_device(&ctrldev->dev);
425
426 return 0;
427}
428
429static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd,
430 unsigned long arg)
431{
432 struct rpmsg_ctrldev *ctrldev = fp->private_data;
433 void __user *argp = (void __user *)arg;
434 struct rpmsg_endpoint_info eptinfo;
435 struct rpmsg_channel_info chinfo;
436
437 if (cmd != RPMSG_CREATE_EPT_IOCTL)
438 return -EINVAL;
439
440 if (copy_from_user(&eptinfo, argp, sizeof(eptinfo)))
441 return -EFAULT;
442
443 memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE);
444 chinfo.name[RPMSG_NAME_SIZE-1] = '\0';
445 chinfo.src = eptinfo.src;
446 chinfo.dst = eptinfo.dst;
447
448 return rpmsg_eptdev_create(ctrldev, chinfo);
449};
450
451static const struct file_operations rpmsg_ctrldev_fops = {
452 .owner = THIS_MODULE,
453 .open = rpmsg_ctrldev_open,
454 .release = rpmsg_ctrldev_release,
455 .unlocked_ioctl = rpmsg_ctrldev_ioctl,
456};
457
458static void rpmsg_ctrldev_release_device(struct device *dev)
459{
460 struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
461
462 ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
463 ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
464 cdev_del(&ctrldev->cdev);
465 kfree(ctrldev);
466}
467
468static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
469{
470 struct rpmsg_ctrldev *ctrldev;
471 struct device *dev;
472 int ret;
473
474 ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL);
475 if (!ctrldev)
476 return -ENOMEM;
477
478 ctrldev->rpdev = rpdev;
479
480 dev = &ctrldev->dev;
481 device_initialize(dev);
482 dev->parent = &rpdev->dev;
483 dev->class = rpmsg_class;
484
485 cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
486 ctrldev->cdev.owner = THIS_MODULE;
487
488 ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
489 if (ret < 0)
490 goto free_ctrldev;
491 dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
492
493 ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
494 if (ret < 0)
495 goto free_minor_ida;
496 dev->id = ret;
497 dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
498
499 ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
500 if (ret)
501 goto free_ctrl_ida;
502
503 /* We can now rely on the release function for cleanup */
504 dev->release = rpmsg_ctrldev_release_device;
505
506 ret = device_add(dev);
507 if (ret) {
508 dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
509 put_device(dev);
510 }
511
512 dev_set_drvdata(&rpdev->dev, ctrldev);
513
514 return ret;
515
516free_ctrl_ida:
517 ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
518free_minor_ida:
519 ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
520free_ctrldev:
521 put_device(dev);
522 kfree(ctrldev);
523
524 return ret;
525}
526
527static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
528{
529 struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev);
530 int ret;
531
532 /* Destroy all endpoints */
533 ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_eptdev_destroy);
534 if (ret)
535 dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
536
537 device_del(&ctrldev->dev);
538 put_device(&ctrldev->dev);
539}
540
541static struct rpmsg_driver rpmsg_chrdev_driver = {
542 .probe = rpmsg_chrdev_probe,
543 .remove = rpmsg_chrdev_remove,
544 .drv = {
545 .name = "rpmsg_chrdev",
546 },
547};
548
549static int rpmsg_char_init(void)
550{
551 int ret;
552
553 ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg");
554 if (ret < 0) {
555 pr_err("rpmsg: failed to allocate char dev region\n");
556 return ret;
557 }
558
559 rpmsg_class = class_create(THIS_MODULE, "rpmsg");
560 if (IS_ERR(rpmsg_class)) {
561 pr_err("failed to create rpmsg class\n");
562 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
563 return PTR_ERR(rpmsg_class);
564 }
565
566 ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
567 if (ret < 0) {
568 pr_err("rpmsgchr: failed to register rpmsg driver\n");
569 class_destroy(rpmsg_class);
570 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
571 }
572
573 return ret;
574}
575postcore_initcall(rpmsg_char_init);
576
577static void rpmsg_chrdev_exit(void)
578{
579 unregister_rpmsg_driver(&rpmsg_chrdev_driver);
580 class_destroy(rpmsg_class);
581 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
582}
583module_exit(rpmsg_chrdev_exit);
584
585MODULE_ALIAS("rpmsg:rpmsg_chrdev");
586MODULE_LICENSE("GPL v2");