Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022, STMicroelectronics
4 * Copyright (c) 2016, Linaro Ltd.
5 * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012, PetaLogix
7 * Copyright (c) 2011, Texas Instruments, Inc.
8 * Copyright (c) 2011, Google, Inc.
9 *
10 * Based on rpmsg performance statistics driver by Michal Simek, which in turn
11 * was based on TI & Google OMX rpmsg driver.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/fs.h>
19#include <linux/idr.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/poll.h>
23#include <linux/rpmsg.h>
24#include <linux/skbuff.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <uapi/linux/rpmsg.h>
28
29#include "rpmsg_char.h"
30#include "rpmsg_internal.h"
31
32#define RPMSG_DEV_MAX (MINORMASK + 1)
33
34static dev_t rpmsg_major;
35
36static DEFINE_IDA(rpmsg_ept_ida);
37static DEFINE_IDA(rpmsg_minor_ida);
38
39#define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
40#define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
41
42/**
43 * struct rpmsg_eptdev - endpoint device context
44 * @dev: endpoint device
45 * @cdev: cdev for the endpoint device
46 * @rpdev: underlaying rpmsg device
47 * @chinfo: info used to open the endpoint
48 * @ept_lock: synchronization of @ept modifications
49 * @ept: rpmsg endpoint reference, when open
50 * @queue_lock: synchronization of @queue operations
51 * @queue: incoming message queue
52 * @readq: wait object for incoming queue
53 * @default_ept: set to channel default endpoint if the default endpoint should be re-used
54 * on device open to prevent endpoint address update.
55 */
56struct rpmsg_eptdev {
57 struct device dev;
58 struct cdev cdev;
59
60 struct rpmsg_device *rpdev;
61 struct rpmsg_channel_info chinfo;
62
63 struct mutex ept_lock;
64 struct rpmsg_endpoint *ept;
65 struct rpmsg_endpoint *default_ept;
66
67 spinlock_t queue_lock;
68 struct sk_buff_head queue;
69 wait_queue_head_t readq;
70
71};
72
73int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
74{
75 struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
76
77 mutex_lock(&eptdev->ept_lock);
78 if (eptdev->ept) {
79 /* The default endpoint is released by the rpmsg core */
80 if (!eptdev->default_ept)
81 rpmsg_destroy_ept(eptdev->ept);
82 eptdev->ept = NULL;
83 }
84 mutex_unlock(&eptdev->ept_lock);
85
86 /* wake up any blocked readers */
87 wake_up_interruptible(&eptdev->readq);
88
89 cdev_device_del(&eptdev->cdev, &eptdev->dev);
90 put_device(&eptdev->dev);
91
92 return 0;
93}
94EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
95
96static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
97 void *priv, u32 addr)
98{
99 struct rpmsg_eptdev *eptdev = priv;
100 struct sk_buff *skb;
101
102 skb = alloc_skb(len, GFP_ATOMIC);
103 if (!skb)
104 return -ENOMEM;
105
106 skb_put_data(skb, buf, len);
107
108 spin_lock(&eptdev->queue_lock);
109 skb_queue_tail(&eptdev->queue, skb);
110 spin_unlock(&eptdev->queue_lock);
111
112 /* wake up any blocking processes, waiting for new data */
113 wake_up_interruptible(&eptdev->readq);
114
115 return 0;
116}
117
118static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
119{
120 struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
121 struct rpmsg_endpoint *ept;
122 struct rpmsg_device *rpdev = eptdev->rpdev;
123 struct device *dev = &eptdev->dev;
124
125 mutex_lock(&eptdev->ept_lock);
126 if (eptdev->ept) {
127 mutex_unlock(&eptdev->ept_lock);
128 return -EBUSY;
129 }
130
131 get_device(dev);
132
133 /*
134 * If the default_ept is set, the rpmsg device default endpoint is used.
135 * Else a new endpoint is created on open that will be destroyed on release.
136 */
137 if (eptdev->default_ept)
138 ept = eptdev->default_ept;
139 else
140 ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
141
142 if (!ept) {
143 dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
144 put_device(dev);
145 mutex_unlock(&eptdev->ept_lock);
146 return -EINVAL;
147 }
148
149 eptdev->ept = ept;
150 filp->private_data = eptdev;
151 mutex_unlock(&eptdev->ept_lock);
152
153 return 0;
154}
155
156static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
157{
158 struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
159 struct device *dev = &eptdev->dev;
160
161 /* Close the endpoint, if it's not already destroyed by the parent */
162 mutex_lock(&eptdev->ept_lock);
163 if (eptdev->ept) {
164 if (!eptdev->default_ept)
165 rpmsg_destroy_ept(eptdev->ept);
166 eptdev->ept = NULL;
167 }
168 mutex_unlock(&eptdev->ept_lock);
169
170 /* Discard all SKBs */
171 skb_queue_purge(&eptdev->queue);
172
173 put_device(dev);
174
175 return 0;
176}
177
178static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
179{
180 struct file *filp = iocb->ki_filp;
181 struct rpmsg_eptdev *eptdev = filp->private_data;
182 unsigned long flags;
183 struct sk_buff *skb;
184 int use;
185
186 if (!eptdev->ept)
187 return -EPIPE;
188
189 spin_lock_irqsave(&eptdev->queue_lock, flags);
190
191 /* Wait for data in the queue */
192 if (skb_queue_empty(&eptdev->queue)) {
193 spin_unlock_irqrestore(&eptdev->queue_lock, flags);
194
195 if (filp->f_flags & O_NONBLOCK)
196 return -EAGAIN;
197
198 /* Wait until we get data or the endpoint goes away */
199 if (wait_event_interruptible(eptdev->readq,
200 !skb_queue_empty(&eptdev->queue) ||
201 !eptdev->ept))
202 return -ERESTARTSYS;
203
204 /* We lost the endpoint while waiting */
205 if (!eptdev->ept)
206 return -EPIPE;
207
208 spin_lock_irqsave(&eptdev->queue_lock, flags);
209 }
210
211 skb = skb_dequeue(&eptdev->queue);
212 spin_unlock_irqrestore(&eptdev->queue_lock, flags);
213 if (!skb)
214 return -EFAULT;
215
216 use = min_t(size_t, iov_iter_count(to), skb->len);
217 if (copy_to_iter(skb->data, use, to) != use)
218 use = -EFAULT;
219
220 kfree_skb(skb);
221
222 return use;
223}
224
225static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
226 struct iov_iter *from)
227{
228 struct file *filp = iocb->ki_filp;
229 struct rpmsg_eptdev *eptdev = filp->private_data;
230 size_t len = iov_iter_count(from);
231 void *kbuf;
232 int ret;
233
234 kbuf = kzalloc(len, GFP_KERNEL);
235 if (!kbuf)
236 return -ENOMEM;
237
238 if (!copy_from_iter_full(kbuf, len, from)) {
239 ret = -EFAULT;
240 goto free_kbuf;
241 }
242
243 if (mutex_lock_interruptible(&eptdev->ept_lock)) {
244 ret = -ERESTARTSYS;
245 goto free_kbuf;
246 }
247
248 if (!eptdev->ept) {
249 ret = -EPIPE;
250 goto unlock_eptdev;
251 }
252
253 if (filp->f_flags & O_NONBLOCK) {
254 ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
255 if (ret == -ENOMEM)
256 ret = -EAGAIN;
257 } else {
258 ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
259 }
260
261unlock_eptdev:
262 mutex_unlock(&eptdev->ept_lock);
263
264free_kbuf:
265 kfree(kbuf);
266 return ret < 0 ? ret : len;
267}
268
269static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
270{
271 struct rpmsg_eptdev *eptdev = filp->private_data;
272 __poll_t mask = 0;
273
274 if (!eptdev->ept)
275 return EPOLLERR;
276
277 poll_wait(filp, &eptdev->readq, wait);
278
279 if (!skb_queue_empty(&eptdev->queue))
280 mask |= EPOLLIN | EPOLLRDNORM;
281
282 mask |= rpmsg_poll(eptdev->ept, filp, wait);
283
284 return mask;
285}
286
287static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
288 unsigned long arg)
289{
290 struct rpmsg_eptdev *eptdev = fp->private_data;
291
292 if (cmd != RPMSG_DESTROY_EPT_IOCTL)
293 return -EINVAL;
294
295 /* Don't allow to destroy a default endpoint. */
296 if (eptdev->default_ept)
297 return -EINVAL;
298
299 return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
300}
301
302static const struct file_operations rpmsg_eptdev_fops = {
303 .owner = THIS_MODULE,
304 .open = rpmsg_eptdev_open,
305 .release = rpmsg_eptdev_release,
306 .read_iter = rpmsg_eptdev_read_iter,
307 .write_iter = rpmsg_eptdev_write_iter,
308 .poll = rpmsg_eptdev_poll,
309 .unlocked_ioctl = rpmsg_eptdev_ioctl,
310 .compat_ioctl = compat_ptr_ioctl,
311};
312
313static ssize_t name_show(struct device *dev, struct device_attribute *attr,
314 char *buf)
315{
316 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
317
318 return sprintf(buf, "%s\n", eptdev->chinfo.name);
319}
320static DEVICE_ATTR_RO(name);
321
322static ssize_t src_show(struct device *dev, struct device_attribute *attr,
323 char *buf)
324{
325 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
326
327 return sprintf(buf, "%d\n", eptdev->chinfo.src);
328}
329static DEVICE_ATTR_RO(src);
330
331static ssize_t dst_show(struct device *dev, struct device_attribute *attr,
332 char *buf)
333{
334 struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
335
336 return sprintf(buf, "%d\n", eptdev->chinfo.dst);
337}
338static DEVICE_ATTR_RO(dst);
339
340static struct attribute *rpmsg_eptdev_attrs[] = {
341 &dev_attr_name.attr,
342 &dev_attr_src.attr,
343 &dev_attr_dst.attr,
344 NULL
345};
346ATTRIBUTE_GROUPS(rpmsg_eptdev);
347
348static void rpmsg_eptdev_release_device(struct device *dev)
349{
350 struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
351
352 ida_simple_remove(&rpmsg_ept_ida, dev->id);
353 ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
354 kfree(eptdev);
355}
356
357static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
358 struct device *parent)
359{
360 struct rpmsg_eptdev *eptdev;
361 struct device *dev;
362
363 eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
364 if (!eptdev)
365 return ERR_PTR(-ENOMEM);
366
367 dev = &eptdev->dev;
368 eptdev->rpdev = rpdev;
369
370 mutex_init(&eptdev->ept_lock);
371 spin_lock_init(&eptdev->queue_lock);
372 skb_queue_head_init(&eptdev->queue);
373 init_waitqueue_head(&eptdev->readq);
374
375 device_initialize(dev);
376 dev->class = rpmsg_class;
377 dev->parent = parent;
378 dev->groups = rpmsg_eptdev_groups;
379 dev_set_drvdata(dev, eptdev);
380
381 cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
382 eptdev->cdev.owner = THIS_MODULE;
383
384 return eptdev;
385}
386
387static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
388{
389 struct device *dev = &eptdev->dev;
390 int ret;
391
392 eptdev->chinfo = chinfo;
393
394 ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
395 if (ret < 0)
396 goto free_eptdev;
397 dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
398
399 ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
400 if (ret < 0)
401 goto free_minor_ida;
402 dev->id = ret;
403 dev_set_name(dev, "rpmsg%d", ret);
404
405 ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
406 if (ret)
407 goto free_ept_ida;
408
409 /* We can now rely on the release function for cleanup */
410 dev->release = rpmsg_eptdev_release_device;
411
412 return ret;
413
414free_ept_ida:
415 ida_simple_remove(&rpmsg_ept_ida, dev->id);
416free_minor_ida:
417 ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
418free_eptdev:
419 put_device(dev);
420 kfree(eptdev);
421
422 return ret;
423}
424
425int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
426 struct rpmsg_channel_info chinfo)
427{
428 struct rpmsg_eptdev *eptdev;
429
430 eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
431 if (IS_ERR(eptdev))
432 return PTR_ERR(eptdev);
433
434 return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
435}
436EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
437
438static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
439{
440 struct rpmsg_channel_info chinfo;
441 struct rpmsg_eptdev *eptdev;
442 struct device *dev = &rpdev->dev;
443
444 memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
445 chinfo.src = rpdev->src;
446 chinfo.dst = rpdev->dst;
447
448 eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
449 if (IS_ERR(eptdev))
450 return PTR_ERR(eptdev);
451
452 /* Set the default_ept to the rpmsg device endpoint */
453 eptdev->default_ept = rpdev->ept;
454
455 /*
456 * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
457 * Storedit in default_ept *priv field.
458 */
459 eptdev->default_ept->priv = eptdev;
460
461 return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
462}
463
464static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
465{
466 int ret;
467
468 ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
469 if (ret)
470 dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
471}
472
473static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
474 { .name = "rpmsg-raw" },
475 { },
476};
477
478static struct rpmsg_driver rpmsg_chrdev_driver = {
479 .probe = rpmsg_chrdev_probe,
480 .remove = rpmsg_chrdev_remove,
481 .callback = rpmsg_ept_cb,
482 .id_table = rpmsg_chrdev_id_table,
483 .drv.name = "rpmsg_chrdev",
484};
485
486static int rpmsg_chrdev_init(void)
487{
488 int ret;
489
490 ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
491 if (ret < 0) {
492 pr_err("failed to allocate char dev region\n");
493 return ret;
494 }
495
496 ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
497 if (ret < 0) {
498 pr_err("rpmsg: failed to register rpmsg raw driver\n");
499 goto free_region;
500 }
501
502 return 0;
503
504free_region:
505 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
506
507 return ret;
508}
509postcore_initcall(rpmsg_chrdev_init);
510
511static void rpmsg_chrdev_exit(void)
512{
513 unregister_rpmsg_driver(&rpmsg_chrdev_driver);
514 unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
515}
516module_exit(rpmsg_chrdev_exit);
517
518MODULE_ALIAS("rpmsg:rpmsg_chrdev");
519MODULE_LICENSE("GPL v2");