Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Remote processor messaging transport (OMAP platform-specific bits)
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 * Copyright (C) 2011 Google, Inc.
7 *
8 * Ohad Ben-Cohen <ohad@wizery.com>
9 * Brian Swetland <swetland@google.com>
10 */
11
12#include <linux/dma-direct.h>
13#include <linux/dma-map-ops.h>
14#include <linux/dma-mapping.h>
15#include <linux/export.h>
16#include <linux/of_reserved_mem.h>
17#include <linux/platform_device.h>
18#include <linux/remoteproc.h>
19#include <linux/virtio.h>
20#include <linux/virtio_config.h>
21#include <linux/virtio_ids.h>
22#include <linux/virtio_ring.h>
23#include <linux/err.h>
24#include <linux/kref.h>
25#include <linux/slab.h>
26
27#include "remoteproc_internal.h"
28
29static int copy_dma_range_map(struct device *to, struct device *from)
30{
31 const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
32 int num_ranges = 0;
33
34 if (!map)
35 return 0;
36
37 for (r = map; r->size; r++)
38 num_ranges++;
39
40 new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
41 GFP_KERNEL);
42 if (!new_map)
43 return -ENOMEM;
44 to->dma_range_map = new_map;
45 return 0;
46}
47
48static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
49{
50 struct platform_device *pdev;
51
52 pdev = container_of(vdev->dev.parent, struct platform_device, dev);
53
54 return platform_get_drvdata(pdev);
55}
56
57static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
58{
59 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
60
61 return rvdev->rproc;
62}
63
64/* kick the remote processor, and let it know which virtqueue to poke at */
65static bool rproc_virtio_notify(struct virtqueue *vq)
66{
67 struct rproc_vring *rvring = vq->priv;
68 struct rproc *rproc = rvring->rvdev->rproc;
69 int notifyid = rvring->notifyid;
70
71 dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
72
73 rproc->ops->kick(rproc, notifyid);
74 return true;
75}
76
77/**
78 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
79 * @rproc: handle to the remote processor
80 * @notifyid: index of the signalled virtqueue (unique per this @rproc)
81 *
82 * This function should be called by the platform-specific rproc driver,
83 * when the remote processor signals that a specific virtqueue has pending
84 * messages available.
85 *
86 * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
87 * and otherwise returns IRQ_HANDLED.
88 */
89irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
90{
91 struct rproc_vring *rvring;
92
93 dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
94
95 rvring = idr_find(&rproc->notifyids, notifyid);
96 if (!rvring || !rvring->vq)
97 return IRQ_NONE;
98
99 return vring_interrupt(0, rvring->vq);
100}
101EXPORT_SYMBOL(rproc_vq_interrupt);
102
103static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
104 unsigned int id,
105 void (*callback)(struct virtqueue *vq),
106 const char *name, bool ctx)
107{
108 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
109 struct rproc *rproc = vdev_to_rproc(vdev);
110 struct device *dev = &rproc->dev;
111 struct rproc_mem_entry *mem;
112 struct rproc_vring *rvring;
113 struct fw_rsc_vdev *rsc;
114 struct virtqueue *vq;
115 void *addr;
116 int num, size;
117
118 /* we're temporarily limited to two virtqueues per rvdev */
119 if (id >= ARRAY_SIZE(rvdev->vring))
120 return ERR_PTR(-EINVAL);
121
122 if (!name)
123 return NULL;
124
125 /* Search allocated memory region by name */
126 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
127 id);
128 if (!mem || !mem->va)
129 return ERR_PTR(-ENOMEM);
130
131 rvring = &rvdev->vring[id];
132 addr = mem->va;
133 num = rvring->num;
134
135 /* zero vring */
136 size = vring_size(num, rvring->align);
137 memset(addr, 0, size);
138
139 dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
140 id, addr, num, rvring->notifyid);
141
142 /*
143 * Create the new vq, and tell virtio we're not interested in
144 * the 'weak' smp barriers, since we're talking with a real device.
145 */
146 vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
147 addr, rproc_virtio_notify, callback, name);
148 if (!vq) {
149 dev_err(dev, "vring_new_virtqueue %s failed\n", name);
150 rproc_free_vring(rvring);
151 return ERR_PTR(-ENOMEM);
152 }
153
154 vq->num_max = num;
155
156 rvring->vq = vq;
157 vq->priv = rvring;
158
159 /* Update vring in resource table */
160 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
161 rsc->vring[id].da = mem->da;
162
163 return vq;
164}
165
166static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
167{
168 struct virtqueue *vq, *n;
169 struct rproc_vring *rvring;
170
171 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
172 rvring = vq->priv;
173 rvring->vq = NULL;
174 vring_del_virtqueue(vq);
175 }
176}
177
178static void rproc_virtio_del_vqs(struct virtio_device *vdev)
179{
180 __rproc_virtio_del_vqs(vdev);
181}
182
183static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
184 struct virtqueue *vqs[],
185 vq_callback_t *callbacks[],
186 const char * const names[],
187 const bool * ctx,
188 struct irq_affinity *desc)
189{
190 int i, ret, queue_idx = 0;
191
192 for (i = 0; i < nvqs; ++i) {
193 if (!names[i]) {
194 vqs[i] = NULL;
195 continue;
196 }
197
198 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
199 ctx ? ctx[i] : false);
200 if (IS_ERR(vqs[i])) {
201 ret = PTR_ERR(vqs[i]);
202 goto error;
203 }
204 }
205
206 return 0;
207
208error:
209 __rproc_virtio_del_vqs(vdev);
210 return ret;
211}
212
213static u8 rproc_virtio_get_status(struct virtio_device *vdev)
214{
215 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
216 struct fw_rsc_vdev *rsc;
217
218 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
219
220 return rsc->status;
221}
222
223static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
224{
225 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
226 struct fw_rsc_vdev *rsc;
227
228 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
229
230 rsc->status = status;
231 dev_dbg(&vdev->dev, "status: %d\n", status);
232}
233
234static void rproc_virtio_reset(struct virtio_device *vdev)
235{
236 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
237 struct fw_rsc_vdev *rsc;
238
239 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
240
241 rsc->status = 0;
242 dev_dbg(&vdev->dev, "reset !\n");
243}
244
245/* provide the vdev features as retrieved from the firmware */
246static u64 rproc_virtio_get_features(struct virtio_device *vdev)
247{
248 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
249 struct fw_rsc_vdev *rsc;
250
251 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
252
253 return rsc->dfeatures;
254}
255
256static void rproc_transport_features(struct virtio_device *vdev)
257{
258 /*
259 * Packed ring isn't enabled on remoteproc for now,
260 * because remoteproc uses vring_new_virtqueue() which
261 * creates virtio rings on preallocated memory.
262 */
263 __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
264}
265
266static int rproc_virtio_finalize_features(struct virtio_device *vdev)
267{
268 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
269 struct fw_rsc_vdev *rsc;
270
271 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
272
273 /* Give virtio_ring a chance to accept features */
274 vring_transport_features(vdev);
275
276 /* Give virtio_rproc a chance to accept features. */
277 rproc_transport_features(vdev);
278
279 /* Make sure we don't have any features > 32 bits! */
280 BUG_ON((u32)vdev->features != vdev->features);
281
282 /*
283 * Remember the finalized features of our vdev, and provide it
284 * to the remote processor once it is powered on.
285 */
286 rsc->gfeatures = vdev->features;
287
288 return 0;
289}
290
291static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
292 void *buf, unsigned int len)
293{
294 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
295 struct fw_rsc_vdev *rsc;
296 void *cfg;
297
298 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
299 cfg = &rsc->vring[rsc->num_of_vrings];
300
301 if (offset + len > rsc->config_len || offset + len < len) {
302 dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
303 return;
304 }
305
306 memcpy(buf, cfg + offset, len);
307}
308
309static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
310 const void *buf, unsigned int len)
311{
312 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
313 struct fw_rsc_vdev *rsc;
314 void *cfg;
315
316 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
317 cfg = &rsc->vring[rsc->num_of_vrings];
318
319 if (offset + len > rsc->config_len || offset + len < len) {
320 dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
321 return;
322 }
323
324 memcpy(cfg + offset, buf, len);
325}
326
327static const struct virtio_config_ops rproc_virtio_config_ops = {
328 .get_features = rproc_virtio_get_features,
329 .finalize_features = rproc_virtio_finalize_features,
330 .find_vqs = rproc_virtio_find_vqs,
331 .del_vqs = rproc_virtio_del_vqs,
332 .reset = rproc_virtio_reset,
333 .set_status = rproc_virtio_set_status,
334 .get_status = rproc_virtio_get_status,
335 .get = rproc_virtio_get,
336 .set = rproc_virtio_set,
337};
338
339/*
340 * This function is called whenever vdev is released, and is responsible
341 * to decrement the remote processor's refcount which was taken when vdev was
342 * added.
343 *
344 * Never call this function directly; it will be called by the driver
345 * core when needed.
346 */
347static void rproc_virtio_dev_release(struct device *dev)
348{
349 struct virtio_device *vdev = dev_to_virtio(dev);
350 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
351
352 kfree(vdev);
353
354 put_device(&rvdev->pdev->dev);
355}
356
357/**
358 * rproc_add_virtio_dev() - register an rproc-induced virtio device
359 * @rvdev: the remote vdev
360 * @id: the device type identification (used to match it with a driver).
361 *
362 * This function registers a virtio device. This vdev's partent is
363 * the rproc device.
364 *
365 * Return: 0 on success or an appropriate error value otherwise
366 */
367static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
368{
369 struct rproc *rproc = rvdev->rproc;
370 struct device *dev = &rvdev->pdev->dev;
371 struct virtio_device *vdev;
372 struct rproc_mem_entry *mem;
373 int ret;
374
375 if (rproc->ops->kick == NULL) {
376 ret = -EINVAL;
377 dev_err(dev, ".kick method not defined for %s\n", rproc->name);
378 goto out;
379 }
380
381 /* Try to find dedicated vdev buffer carveout */
382 mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
383 if (mem) {
384 phys_addr_t pa;
385
386 if (mem->of_resm_idx != -1) {
387 struct device_node *np = rproc->dev.parent->of_node;
388
389 /* Associate reserved memory to vdev device */
390 ret = of_reserved_mem_device_init_by_idx(dev, np,
391 mem->of_resm_idx);
392 if (ret) {
393 dev_err(dev, "Can't associate reserved memory\n");
394 goto out;
395 }
396 } else {
397 if (mem->va) {
398 dev_warn(dev, "vdev %d buffer already mapped\n",
399 rvdev->index);
400 pa = rproc_va_to_pa(mem->va);
401 } else {
402 /* Use dma address as carveout no memmapped yet */
403 pa = (phys_addr_t)mem->dma;
404 }
405
406 /* Associate vdev buffer memory pool to vdev subdev */
407 ret = dma_declare_coherent_memory(dev, pa,
408 mem->da,
409 mem->len);
410 if (ret < 0) {
411 dev_err(dev, "Failed to associate buffer\n");
412 goto out;
413 }
414 }
415 } else {
416 struct device_node *np = rproc->dev.parent->of_node;
417
418 /*
419 * If we don't have dedicated buffer, just attempt to re-assign
420 * the reserved memory from our parent. A default memory-region
421 * at index 0 from the parent's memory-regions is assigned for
422 * the rvdev dev to allocate from. Failure is non-critical and
423 * the allocations will fall back to global pools, so don't
424 * check return value either.
425 */
426 of_reserved_mem_device_init_by_idx(dev, np, 0);
427 }
428
429 /* Allocate virtio device */
430 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
431 if (!vdev) {
432 ret = -ENOMEM;
433 goto out;
434 }
435 vdev->id.device = id,
436 vdev->config = &rproc_virtio_config_ops,
437 vdev->dev.parent = dev;
438 vdev->dev.release = rproc_virtio_dev_release;
439
440 /* Reference the vdev and vring allocations */
441 get_device(dev);
442
443 ret = register_virtio_device(vdev);
444 if (ret) {
445 put_device(&vdev->dev);
446 dev_err(dev, "failed to register vdev: %d\n", ret);
447 goto out;
448 }
449
450 dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
451
452out:
453 return ret;
454}
455
456/**
457 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
458 * @dev: the virtio device
459 * @data: must be null
460 *
461 * This function unregisters an existing virtio device.
462 *
463 * Return: 0
464 */
465static int rproc_remove_virtio_dev(struct device *dev, void *data)
466{
467 struct virtio_device *vdev = dev_to_virtio(dev);
468
469 unregister_virtio_device(vdev);
470 return 0;
471}
472
473static int rproc_vdev_do_start(struct rproc_subdev *subdev)
474{
475 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
476
477 return rproc_add_virtio_dev(rvdev, rvdev->id);
478}
479
480static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
481{
482 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
483 struct device *dev = &rvdev->pdev->dev;
484 int ret;
485
486 ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
487 if (ret)
488 dev_warn(dev, "can't remove vdev child device: %d\n", ret);
489}
490
491static int rproc_virtio_probe(struct platform_device *pdev)
492{
493 struct device *dev = &pdev->dev;
494 struct rproc_vdev_data *rvdev_data = dev->platform_data;
495 struct rproc_vdev *rvdev;
496 struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
497 struct fw_rsc_vdev *rsc;
498 int i, ret;
499
500 if (!rvdev_data)
501 return -EINVAL;
502
503 rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
504 if (!rvdev)
505 return -ENOMEM;
506
507 rvdev->id = rvdev_data->id;
508 rvdev->rproc = rproc;
509 rvdev->index = rvdev_data->index;
510
511 ret = copy_dma_range_map(dev, rproc->dev.parent);
512 if (ret)
513 return ret;
514
515 /* Make device dma capable by inheriting from parent's capabilities */
516 set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
517
518 ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
519 if (ret) {
520 dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
521 dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
522 }
523
524 platform_set_drvdata(pdev, rvdev);
525 rvdev->pdev = pdev;
526
527 rsc = rvdev_data->rsc;
528
529 /* parse the vrings */
530 for (i = 0; i < rsc->num_of_vrings; i++) {
531 ret = rproc_parse_vring(rvdev, rsc, i);
532 if (ret)
533 return ret;
534 }
535
536 /* remember the resource offset*/
537 rvdev->rsc_offset = rvdev_data->rsc_offset;
538
539 /* allocate the vring resources */
540 for (i = 0; i < rsc->num_of_vrings; i++) {
541 ret = rproc_alloc_vring(rvdev, i);
542 if (ret)
543 goto unwind_vring_allocations;
544 }
545
546 rproc_add_rvdev(rproc, rvdev);
547
548 rvdev->subdev.start = rproc_vdev_do_start;
549 rvdev->subdev.stop = rproc_vdev_do_stop;
550
551 rproc_add_subdev(rproc, &rvdev->subdev);
552
553 /*
554 * We're indirectly making a non-temporary copy of the rproc pointer
555 * here, because the platform device or the vdev device will indirectly
556 * access the wrapping rproc.
557 *
558 * Therefore we must increment the rproc refcount here, and decrement
559 * it _only_ on platform remove.
560 */
561 get_device(&rproc->dev);
562
563 return 0;
564
565unwind_vring_allocations:
566 for (i--; i >= 0; i--)
567 rproc_free_vring(&rvdev->vring[i]);
568
569 return ret;
570}
571
572static void rproc_virtio_remove(struct platform_device *pdev)
573{
574 struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
575 struct rproc *rproc = rvdev->rproc;
576 struct rproc_vring *rvring;
577 int id;
578
579 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
580 rvring = &rvdev->vring[id];
581 rproc_free_vring(rvring);
582 }
583
584 rproc_remove_subdev(rproc, &rvdev->subdev);
585 rproc_remove_rvdev(rvdev);
586
587 of_reserved_mem_device_release(&pdev->dev);
588 dma_release_coherent_memory(&pdev->dev);
589
590 put_device(&rproc->dev);
591}
592
593/* Platform driver */
594static struct platform_driver rproc_virtio_driver = {
595 .probe = rproc_virtio_probe,
596 .remove_new = rproc_virtio_remove,
597 .driver = {
598 .name = "rproc-virtio",
599 },
600};
601builtin_platform_driver(rproc_virtio_driver);
1/*
2 * Remote processor messaging transport (OMAP platform-specific bits)
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * Ohad Ben-Cohen <ohad@wizery.com>
8 * Brian Swetland <swetland@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/export.h>
21#include <linux/remoteproc.h>
22#include <linux/virtio.h>
23#include <linux/virtio_config.h>
24#include <linux/virtio_ids.h>
25#include <linux/virtio_ring.h>
26#include <linux/err.h>
27#include <linux/kref.h>
28#include <linux/slab.h>
29
30#include "remoteproc_internal.h"
31
32/* kick the remote processor, and let it know which virtqueue to poke at */
33static bool rproc_virtio_notify(struct virtqueue *vq)
34{
35 struct rproc_vring *rvring = vq->priv;
36 struct rproc *rproc = rvring->rvdev->rproc;
37 int notifyid = rvring->notifyid;
38
39 dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
40
41 rproc->ops->kick(rproc, notifyid);
42 return true;
43}
44
45/**
46 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
47 * @rproc: handle to the remote processor
48 * @notifyid: index of the signalled virtqueue (unique per this @rproc)
49 *
50 * This function should be called by the platform-specific rproc driver,
51 * when the remote processor signals that a specific virtqueue has pending
52 * messages available.
53 *
54 * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
55 * and otherwise returns IRQ_HANDLED.
56 */
57irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
58{
59 struct rproc_vring *rvring;
60
61 dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
62
63 rvring = idr_find(&rproc->notifyids, notifyid);
64 if (!rvring || !rvring->vq)
65 return IRQ_NONE;
66
67 return vring_interrupt(0, rvring->vq);
68}
69EXPORT_SYMBOL(rproc_vq_interrupt);
70
71static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
72 unsigned id,
73 void (*callback)(struct virtqueue *vq),
74 const char *name)
75{
76 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
77 struct rproc *rproc = vdev_to_rproc(vdev);
78 struct device *dev = &rproc->dev;
79 struct rproc_vring *rvring;
80 struct virtqueue *vq;
81 void *addr;
82 int len, size, ret;
83
84 /* we're temporarily limited to two virtqueues per rvdev */
85 if (id >= ARRAY_SIZE(rvdev->vring))
86 return ERR_PTR(-EINVAL);
87
88 if (!name)
89 return NULL;
90
91 ret = rproc_alloc_vring(rvdev, id);
92 if (ret)
93 return ERR_PTR(ret);
94
95 rvring = &rvdev->vring[id];
96 addr = rvring->va;
97 len = rvring->len;
98
99 /* zero vring */
100 size = vring_size(len, rvring->align);
101 memset(addr, 0, size);
102
103 dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
104 id, addr, len, rvring->notifyid);
105
106 /*
107 * Create the new vq, and tell virtio we're not interested in
108 * the 'weak' smp barriers, since we're talking with a real device.
109 */
110 vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr,
111 rproc_virtio_notify, callback, name);
112 if (!vq) {
113 dev_err(dev, "vring_new_virtqueue %s failed\n", name);
114 rproc_free_vring(rvring);
115 return ERR_PTR(-ENOMEM);
116 }
117
118 rvring->vq = vq;
119 vq->priv = rvring;
120
121 return vq;
122}
123
124static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
125{
126 struct virtqueue *vq, *n;
127 struct rproc_vring *rvring;
128
129 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
130 rvring = vq->priv;
131 rvring->vq = NULL;
132 vring_del_virtqueue(vq);
133 rproc_free_vring(rvring);
134 }
135}
136
137static void rproc_virtio_del_vqs(struct virtio_device *vdev)
138{
139 struct rproc *rproc = vdev_to_rproc(vdev);
140
141 /* power down the remote processor before deleting vqs */
142 rproc_shutdown(rproc);
143
144 __rproc_virtio_del_vqs(vdev);
145}
146
147static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
148 struct virtqueue *vqs[],
149 vq_callback_t *callbacks[],
150 const char * const names[])
151{
152 struct rproc *rproc = vdev_to_rproc(vdev);
153 int i, ret;
154
155 for (i = 0; i < nvqs; ++i) {
156 vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
157 if (IS_ERR(vqs[i])) {
158 ret = PTR_ERR(vqs[i]);
159 goto error;
160 }
161 }
162
163 /* now that the vqs are all set, boot the remote processor */
164 ret = rproc_boot(rproc);
165 if (ret) {
166 dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
167 goto error;
168 }
169
170 return 0;
171
172error:
173 __rproc_virtio_del_vqs(vdev);
174 return ret;
175}
176
177static u8 rproc_virtio_get_status(struct virtio_device *vdev)
178{
179 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
180 struct fw_rsc_vdev *rsc;
181
182 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
183
184 return rsc->status;
185}
186
187static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
188{
189 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
190 struct fw_rsc_vdev *rsc;
191
192 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
193
194 rsc->status = status;
195 dev_dbg(&vdev->dev, "status: %d\n", status);
196}
197
198static void rproc_virtio_reset(struct virtio_device *vdev)
199{
200 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
201 struct fw_rsc_vdev *rsc;
202
203 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
204
205 rsc->status = 0;
206 dev_dbg(&vdev->dev, "reset !\n");
207}
208
209/* provide the vdev features as retrieved from the firmware */
210static u64 rproc_virtio_get_features(struct virtio_device *vdev)
211{
212 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
213 struct fw_rsc_vdev *rsc;
214
215 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
216
217 return rsc->dfeatures;
218}
219
220static int rproc_virtio_finalize_features(struct virtio_device *vdev)
221{
222 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
223 struct fw_rsc_vdev *rsc;
224
225 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
226
227 /* Give virtio_ring a chance to accept features */
228 vring_transport_features(vdev);
229
230 /* Make sure we don't have any features > 32 bits! */
231 BUG_ON((u32)vdev->features != vdev->features);
232
233 /*
234 * Remember the finalized features of our vdev, and provide it
235 * to the remote processor once it is powered on.
236 */
237 rsc->gfeatures = vdev->features;
238
239 return 0;
240}
241
242static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset,
243 void *buf, unsigned len)
244{
245 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
246 struct fw_rsc_vdev *rsc;
247 void *cfg;
248
249 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
250 cfg = &rsc->vring[rsc->num_of_vrings];
251
252 if (offset + len > rsc->config_len || offset + len < len) {
253 dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
254 return;
255 }
256
257 memcpy(buf, cfg + offset, len);
258}
259
260static void rproc_virtio_set(struct virtio_device *vdev, unsigned offset,
261 const void *buf, unsigned len)
262{
263 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
264 struct fw_rsc_vdev *rsc;
265 void *cfg;
266
267 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
268 cfg = &rsc->vring[rsc->num_of_vrings];
269
270 if (offset + len > rsc->config_len || offset + len < len) {
271 dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
272 return;
273 }
274
275 memcpy(cfg + offset, buf, len);
276}
277
278static const struct virtio_config_ops rproc_virtio_config_ops = {
279 .get_features = rproc_virtio_get_features,
280 .finalize_features = rproc_virtio_finalize_features,
281 .find_vqs = rproc_virtio_find_vqs,
282 .del_vqs = rproc_virtio_del_vqs,
283 .reset = rproc_virtio_reset,
284 .set_status = rproc_virtio_set_status,
285 .get_status = rproc_virtio_get_status,
286 .get = rproc_virtio_get,
287 .set = rproc_virtio_set,
288};
289
290/*
291 * This function is called whenever vdev is released, and is responsible
292 * to decrement the remote processor's refcount which was taken when vdev was
293 * added.
294 *
295 * Never call this function directly; it will be called by the driver
296 * core when needed.
297 */
298static void rproc_vdev_release(struct device *dev)
299{
300 struct virtio_device *vdev = dev_to_virtio(dev);
301 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
302 struct rproc *rproc = vdev_to_rproc(vdev);
303
304 list_del(&rvdev->node);
305 kfree(rvdev);
306
307 put_device(&rproc->dev);
308}
309
310/**
311 * rproc_add_virtio_dev() - register an rproc-induced virtio device
312 * @rvdev: the remote vdev
313 *
314 * This function registers a virtio device. This vdev's partent is
315 * the rproc device.
316 *
317 * Returns 0 on success or an appropriate error value otherwise.
318 */
319int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
320{
321 struct rproc *rproc = rvdev->rproc;
322 struct device *dev = &rproc->dev;
323 struct virtio_device *vdev = &rvdev->vdev;
324 int ret;
325
326 vdev->id.device = id,
327 vdev->config = &rproc_virtio_config_ops,
328 vdev->dev.parent = dev;
329 vdev->dev.release = rproc_vdev_release;
330
331 /*
332 * We're indirectly making a non-temporary copy of the rproc pointer
333 * here, because drivers probed with this vdev will indirectly
334 * access the wrapping rproc.
335 *
336 * Therefore we must increment the rproc refcount here, and decrement
337 * it _only_ when the vdev is released.
338 */
339 get_device(&rproc->dev);
340
341 ret = register_virtio_device(vdev);
342 if (ret) {
343 put_device(&rproc->dev);
344 dev_err(dev, "failed to register vdev: %d\n", ret);
345 goto out;
346 }
347
348 dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
349
350out:
351 return ret;
352}
353
354/**
355 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
356 * @rvdev: the remote vdev
357 *
358 * This function unregisters an existing virtio device.
359 */
360void rproc_remove_virtio_dev(struct rproc_vdev *rvdev)
361{
362 unregister_virtio_device(&rvdev->vdev);
363}