Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Remote processor messaging transport (OMAP platform-specific bits)
  4 *
  5 * Copyright (C) 2011 Texas Instruments, Inc.
  6 * Copyright (C) 2011 Google, Inc.
  7 *
  8 * Ohad Ben-Cohen <ohad@wizery.com>
  9 * Brian Swetland <swetland@google.com>
 
 
 
 
 
 
 
 
 
 10 */
 11
 12#include <linux/dma-direct.h>
 13#include <linux/dma-map-ops.h>
 14#include <linux/dma-mapping.h>
 15#include <linux/export.h>
 16#include <linux/of_reserved_mem.h>
 17#include <linux/platform_device.h>
 18#include <linux/remoteproc.h>
 19#include <linux/virtio.h>
 20#include <linux/virtio_config.h>
 21#include <linux/virtio_ids.h>
 22#include <linux/virtio_ring.h>
 23#include <linux/err.h>
 24#include <linux/kref.h>
 25#include <linux/slab.h>
 26
 27#include "remoteproc_internal.h"
 28
 29static int copy_dma_range_map(struct device *to, struct device *from)
 30{
 31	const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
 32	int num_ranges = 0;
 33
 34	if (!map)
 35		return 0;
 36
 37	for (r = map; r->size; r++)
 38		num_ranges++;
 39
 40	new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
 41			  GFP_KERNEL);
 42	if (!new_map)
 43		return -ENOMEM;
 44	to->dma_range_map = new_map;
 45	return 0;
 46}
 47
 48static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
 49{
 50	struct platform_device *pdev;
 51
 52	pdev = container_of(vdev->dev.parent, struct platform_device, dev);
 53
 54	return platform_get_drvdata(pdev);
 55}
 56
 57static  struct rproc *vdev_to_rproc(struct virtio_device *vdev)
 58{
 59	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 60
 61	return rvdev->rproc;
 62}
 63
 64/* kick the remote processor, and let it know which virtqueue to poke at */
 65static bool rproc_virtio_notify(struct virtqueue *vq)
 66{
 67	struct rproc_vring *rvring = vq->priv;
 68	struct rproc *rproc = rvring->rvdev->rproc;
 69	int notifyid = rvring->notifyid;
 70
 71	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
 72
 73	rproc->ops->kick(rproc, notifyid);
 74	return true;
 75}
 76
 77/**
 78 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
 79 * @rproc: handle to the remote processor
 80 * @notifyid: index of the signalled virtqueue (unique per this @rproc)
 81 *
 82 * This function should be called by the platform-specific rproc driver,
 83 * when the remote processor signals that a specific virtqueue has pending
 84 * messages available.
 85 *
 86 * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
 87 * and otherwise returns IRQ_HANDLED.
 88 */
 89irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
 90{
 91	struct rproc_vring *rvring;
 92
 93	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
 94
 95	rvring = idr_find(&rproc->notifyids, notifyid);
 96	if (!rvring || !rvring->vq)
 97		return IRQ_NONE;
 98
 99	return vring_interrupt(0, rvring->vq);
100}
101EXPORT_SYMBOL(rproc_vq_interrupt);
102
103static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
104				    unsigned int id,
105				    void (*callback)(struct virtqueue *vq),
106				    const char *name, bool ctx)
107{
108	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
109	struct rproc *rproc = vdev_to_rproc(vdev);
110	struct device *dev = &rproc->dev;
111	struct rproc_mem_entry *mem;
112	struct rproc_vring *rvring;
113	struct fw_rsc_vdev *rsc;
114	struct virtqueue *vq;
115	void *addr;
116	int num, size;
117
118	/* we're temporarily limited to two virtqueues per rvdev */
119	if (id >= ARRAY_SIZE(rvdev->vring))
120		return ERR_PTR(-EINVAL);
121
122	if (!name)
123		return NULL;
124
125	/* Search allocated memory region by name */
126	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
127					  id);
128	if (!mem || !mem->va)
129		return ERR_PTR(-ENOMEM);
130
131	rvring = &rvdev->vring[id];
132	addr = mem->va;
133	num = rvring->num;
134
135	/* zero vring */
136	size = vring_size(num, rvring->align);
137	memset(addr, 0, size);
138
139	dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
140		id, addr, num, rvring->notifyid);
141
142	/*
143	 * Create the new vq, and tell virtio we're not interested in
144	 * the 'weak' smp barriers, since we're talking with a real device.
145	 */
146	vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
147				 addr, rproc_virtio_notify, callback, name);
148	if (!vq) {
149		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
150		rproc_free_vring(rvring);
151		return ERR_PTR(-ENOMEM);
152	}
153
154	vq->num_max = num;
155
156	rvring->vq = vq;
157	vq->priv = rvring;
158
159	/* Update vring in resource table */
160	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
161	rsc->vring[id].da = mem->da;
162
163	return vq;
164}
165
166static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
167{
168	struct virtqueue *vq, *n;
169	struct rproc_vring *rvring;
170
171	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
172		rvring = vq->priv;
173		rvring->vq = NULL;
174		vring_del_virtqueue(vq);
175	}
176}
177
178static void rproc_virtio_del_vqs(struct virtio_device *vdev)
179{
180	__rproc_virtio_del_vqs(vdev);
181}
182
183static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
184				 struct virtqueue *vqs[],
185				 vq_callback_t *callbacks[],
186				 const char * const names[],
187				 const bool * ctx,
188				 struct irq_affinity *desc)
189{
190	int i, ret, queue_idx = 0;
191
192	for (i = 0; i < nvqs; ++i) {
193		if (!names[i]) {
194			vqs[i] = NULL;
195			continue;
196		}
197
198		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
199				    ctx ? ctx[i] : false);
200		if (IS_ERR(vqs[i])) {
201			ret = PTR_ERR(vqs[i]);
202			goto error;
203		}
204	}
205
206	return 0;
207
208error:
209	__rproc_virtio_del_vqs(vdev);
210	return ret;
211}
212
213static u8 rproc_virtio_get_status(struct virtio_device *vdev)
214{
215	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
216	struct fw_rsc_vdev *rsc;
217
218	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
219
220	return rsc->status;
221}
222
223static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
224{
225	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
226	struct fw_rsc_vdev *rsc;
227
228	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
229
230	rsc->status = status;
231	dev_dbg(&vdev->dev, "status: %d\n", status);
232}
233
234static void rproc_virtio_reset(struct virtio_device *vdev)
235{
236	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
237	struct fw_rsc_vdev *rsc;
238
239	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
240
241	rsc->status = 0;
242	dev_dbg(&vdev->dev, "reset !\n");
243}
244
245/* provide the vdev features as retrieved from the firmware */
246static u64 rproc_virtio_get_features(struct virtio_device *vdev)
247{
248	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
249	struct fw_rsc_vdev *rsc;
250
251	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
252
253	return rsc->dfeatures;
254}
255
256static void rproc_transport_features(struct virtio_device *vdev)
257{
258	/*
259	 * Packed ring isn't enabled on remoteproc for now,
260	 * because remoteproc uses vring_new_virtqueue() which
261	 * creates virtio rings on preallocated memory.
262	 */
263	__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
264}
265
266static int rproc_virtio_finalize_features(struct virtio_device *vdev)
267{
268	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
269	struct fw_rsc_vdev *rsc;
270
271	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
272
273	/* Give virtio_ring a chance to accept features */
274	vring_transport_features(vdev);
275
276	/* Give virtio_rproc a chance to accept features. */
277	rproc_transport_features(vdev);
278
279	/* Make sure we don't have any features > 32 bits! */
280	BUG_ON((u32)vdev->features != vdev->features);
281
282	/*
283	 * Remember the finalized features of our vdev, and provide it
284	 * to the remote processor once it is powered on.
285	 */
286	rsc->gfeatures = vdev->features;
287
288	return 0;
289}
290
291static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
292			     void *buf, unsigned int len)
293{
294	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
295	struct fw_rsc_vdev *rsc;
296	void *cfg;
297
298	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
299	cfg = &rsc->vring[rsc->num_of_vrings];
300
301	if (offset + len > rsc->config_len || offset + len < len) {
302		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
303		return;
304	}
305
306	memcpy(buf, cfg + offset, len);
307}
308
309static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
310			     const void *buf, unsigned int len)
311{
312	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
313	struct fw_rsc_vdev *rsc;
314	void *cfg;
315
316	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
317	cfg = &rsc->vring[rsc->num_of_vrings];
318
319	if (offset + len > rsc->config_len || offset + len < len) {
320		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
321		return;
322	}
323
324	memcpy(cfg + offset, buf, len);
325}
326
327static const struct virtio_config_ops rproc_virtio_config_ops = {
328	.get_features	= rproc_virtio_get_features,
329	.finalize_features = rproc_virtio_finalize_features,
330	.find_vqs	= rproc_virtio_find_vqs,
331	.del_vqs	= rproc_virtio_del_vqs,
332	.reset		= rproc_virtio_reset,
333	.set_status	= rproc_virtio_set_status,
334	.get_status	= rproc_virtio_get_status,
335	.get		= rproc_virtio_get,
336	.set		= rproc_virtio_set,
337};
338
339/*
340 * This function is called whenever vdev is released, and is responsible
341 * to decrement the remote processor's refcount which was taken when vdev was
342 * added.
343 *
344 * Never call this function directly; it will be called by the driver
345 * core when needed.
346 */
347static void rproc_virtio_dev_release(struct device *dev)
348{
349	struct virtio_device *vdev = dev_to_virtio(dev);
350	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 
351
352	kfree(vdev);
353
354	put_device(&rvdev->pdev->dev);
355}
356
357/**
358 * rproc_add_virtio_dev() - register an rproc-induced virtio device
359 * @rvdev: the remote vdev
360 * @id: the device type identification (used to match it with a driver).
361 *
362 * This function registers a virtio device. This vdev's partent is
363 * the rproc device.
364 *
365 * Return: 0 on success or an appropriate error value otherwise
366 */
367static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
368{
369	struct rproc *rproc = rvdev->rproc;
370	struct device *dev = &rvdev->pdev->dev;
371	struct virtio_device *vdev;
372	struct rproc_mem_entry *mem;
373	int ret;
374
375	if (rproc->ops->kick == NULL) {
376		ret = -EINVAL;
377		dev_err(dev, ".kick method not defined for %s\n", rproc->name);
378		goto out;
379	}
380
381	/* Try to find dedicated vdev buffer carveout */
382	mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
383	if (mem) {
384		phys_addr_t pa;
385
386		if (mem->of_resm_idx != -1) {
387			struct device_node *np = rproc->dev.parent->of_node;
388
389			/* Associate reserved memory to vdev device */
390			ret = of_reserved_mem_device_init_by_idx(dev, np,
391								 mem->of_resm_idx);
392			if (ret) {
393				dev_err(dev, "Can't associate reserved memory\n");
394				goto out;
395			}
396		} else {
397			if (mem->va) {
398				dev_warn(dev, "vdev %d buffer already mapped\n",
399					 rvdev->index);
400				pa = rproc_va_to_pa(mem->va);
401			} else {
402				/* Use dma address as carveout no memmapped yet */
403				pa = (phys_addr_t)mem->dma;
404			}
405
406			/* Associate vdev buffer memory pool to vdev subdev */
407			ret = dma_declare_coherent_memory(dev, pa,
408							   mem->da,
409							   mem->len);
410			if (ret < 0) {
411				dev_err(dev, "Failed to associate buffer\n");
412				goto out;
413			}
414		}
415	} else {
416		struct device_node *np = rproc->dev.parent->of_node;
417
418		/*
419		 * If we don't have dedicated buffer, just attempt to re-assign
420		 * the reserved memory from our parent. A default memory-region
421		 * at index 0 from the parent's memory-regions is assigned for
422		 * the rvdev dev to allocate from. Failure is non-critical and
423		 * the allocations will fall back to global pools, so don't
424		 * check return value either.
425		 */
426		of_reserved_mem_device_init_by_idx(dev, np, 0);
427	}
428
429	/* Allocate virtio device */
430	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
431	if (!vdev) {
432		ret = -ENOMEM;
433		goto out;
434	}
435	vdev->id.device	= id,
436	vdev->config = &rproc_virtio_config_ops,
437	vdev->dev.parent = dev;
438	vdev->dev.release = rproc_virtio_dev_release;
439
 
 
 
 
 
 
 
 
 
 
440	/* Reference the vdev and vring allocations */
441	get_device(dev);
442
443	ret = register_virtio_device(vdev);
444	if (ret) {
445		put_device(&vdev->dev);
446		dev_err(dev, "failed to register vdev: %d\n", ret);
447		goto out;
448	}
449
450	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
451
452out:
453	return ret;
454}
455
456/**
457 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
458 * @dev: the virtio device
459 * @data: must be null
460 *
461 * This function unregisters an existing virtio device.
462 *
463 * Return: 0
464 */
465static int rproc_remove_virtio_dev(struct device *dev, void *data)
466{
467	struct virtio_device *vdev = dev_to_virtio(dev);
468
469	unregister_virtio_device(vdev);
470	return 0;
471}
472
473static int rproc_vdev_do_start(struct rproc_subdev *subdev)
474{
475	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
476
477	return rproc_add_virtio_dev(rvdev, rvdev->id);
478}
479
480static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
481{
482	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
483	struct device *dev = &rvdev->pdev->dev;
484	int ret;
485
486	ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
487	if (ret)
488		dev_warn(dev, "can't remove vdev child device: %d\n", ret);
489}
490
491static int rproc_virtio_probe(struct platform_device *pdev)
492{
493	struct device *dev = &pdev->dev;
494	struct rproc_vdev_data *rvdev_data = dev->platform_data;
495	struct rproc_vdev *rvdev;
496	struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
497	struct fw_rsc_vdev *rsc;
498	int i, ret;
499
500	if (!rvdev_data)
501		return -EINVAL;
502
503	rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
504	if (!rvdev)
505		return -ENOMEM;
506
507	rvdev->id = rvdev_data->id;
508	rvdev->rproc = rproc;
509	rvdev->index = rvdev_data->index;
510
511	ret = copy_dma_range_map(dev, rproc->dev.parent);
512	if (ret)
513		return ret;
514
515	/* Make device dma capable by inheriting from parent's capabilities */
516	set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
517
518	ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
519	if (ret) {
520		dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
521			 dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
522	}
523
524	platform_set_drvdata(pdev, rvdev);
525	rvdev->pdev = pdev;
526
527	rsc = rvdev_data->rsc;
528
529	/* parse the vrings */
530	for (i = 0; i < rsc->num_of_vrings; i++) {
531		ret = rproc_parse_vring(rvdev, rsc, i);
532		if (ret)
533			return ret;
534	}
535
536	/* remember the resource offset*/
537	rvdev->rsc_offset = rvdev_data->rsc_offset;
538
539	/* allocate the vring resources */
540	for (i = 0; i < rsc->num_of_vrings; i++) {
541		ret = rproc_alloc_vring(rvdev, i);
542		if (ret)
543			goto unwind_vring_allocations;
544	}
545
546	rproc_add_rvdev(rproc, rvdev);
547
548	rvdev->subdev.start = rproc_vdev_do_start;
549	rvdev->subdev.stop = rproc_vdev_do_stop;
550
551	rproc_add_subdev(rproc, &rvdev->subdev);
552
553	/*
554	 * We're indirectly making a non-temporary copy of the rproc pointer
555	 * here, because the platform device or the vdev device will indirectly
556	 * access the wrapping rproc.
557	 *
558	 * Therefore we must increment the rproc refcount here, and decrement
559	 * it _only_ on platform remove.
560	 */
561	get_device(&rproc->dev);
562
563	return 0;
564
565unwind_vring_allocations:
566	for (i--; i >= 0; i--)
567		rproc_free_vring(&rvdev->vring[i]);
568
569	return ret;
570}
571
572static void rproc_virtio_remove(struct platform_device *pdev)
573{
574	struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
575	struct rproc *rproc = rvdev->rproc;
576	struct rproc_vring *rvring;
577	int id;
578
579	for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
580		rvring = &rvdev->vring[id];
581		rproc_free_vring(rvring);
582	}
583
584	rproc_remove_subdev(rproc, &rvdev->subdev);
585	rproc_remove_rvdev(rvdev);
586
587	of_reserved_mem_device_release(&pdev->dev);
588	dma_release_coherent_memory(&pdev->dev);
589
590	put_device(&rproc->dev);
591}
592
593/* Platform driver */
594static struct platform_driver rproc_virtio_driver = {
595	.probe		= rproc_virtio_probe,
596	.remove_new	= rproc_virtio_remove,
597	.driver		= {
598		.name	= "rproc-virtio",
599	},
600};
601builtin_platform_driver(rproc_virtio_driver);
v4.17
 
  1/*
  2 * Remote processor messaging transport (OMAP platform-specific bits)
  3 *
  4 * Copyright (C) 2011 Texas Instruments, Inc.
  5 * Copyright (C) 2011 Google, Inc.
  6 *
  7 * Ohad Ben-Cohen <ohad@wizery.com>
  8 * Brian Swetland <swetland@google.com>
  9 *
 10 * This software is licensed under the terms of the GNU General Public
 11 * License version 2, as published by the Free Software Foundation, and
 12 * may be copied, distributed, and modified under those terms.
 13 *
 14 * This program is distributed in the hope that it will be useful,
 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17 * GNU General Public License for more details.
 18 */
 19
 
 
 
 20#include <linux/export.h>
 
 
 21#include <linux/remoteproc.h>
 22#include <linux/virtio.h>
 23#include <linux/virtio_config.h>
 24#include <linux/virtio_ids.h>
 25#include <linux/virtio_ring.h>
 26#include <linux/err.h>
 27#include <linux/kref.h>
 28#include <linux/slab.h>
 29
 30#include "remoteproc_internal.h"
 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32/* kick the remote processor, and let it know which virtqueue to poke at */
 33static bool rproc_virtio_notify(struct virtqueue *vq)
 34{
 35	struct rproc_vring *rvring = vq->priv;
 36	struct rproc *rproc = rvring->rvdev->rproc;
 37	int notifyid = rvring->notifyid;
 38
 39	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
 40
 41	rproc->ops->kick(rproc, notifyid);
 42	return true;
 43}
 44
 45/**
 46 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
 47 * @rproc: handle to the remote processor
 48 * @notifyid: index of the signalled virtqueue (unique per this @rproc)
 49 *
 50 * This function should be called by the platform-specific rproc driver,
 51 * when the remote processor signals that a specific virtqueue has pending
 52 * messages available.
 53 *
 54 * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
 55 * and otherwise returns IRQ_HANDLED.
 56 */
 57irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
 58{
 59	struct rproc_vring *rvring;
 60
 61	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
 62
 63	rvring = idr_find(&rproc->notifyids, notifyid);
 64	if (!rvring || !rvring->vq)
 65		return IRQ_NONE;
 66
 67	return vring_interrupt(0, rvring->vq);
 68}
 69EXPORT_SYMBOL(rproc_vq_interrupt);
 70
 71static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
 72				    unsigned int id,
 73				    void (*callback)(struct virtqueue *vq),
 74				    const char *name, bool ctx)
 75{
 76	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 77	struct rproc *rproc = vdev_to_rproc(vdev);
 78	struct device *dev = &rproc->dev;
 
 79	struct rproc_vring *rvring;
 
 80	struct virtqueue *vq;
 81	void *addr;
 82	int len, size;
 83
 84	/* we're temporarily limited to two virtqueues per rvdev */
 85	if (id >= ARRAY_SIZE(rvdev->vring))
 86		return ERR_PTR(-EINVAL);
 87
 88	if (!name)
 89		return NULL;
 90
 
 
 
 
 
 
 91	rvring = &rvdev->vring[id];
 92	addr = rvring->va;
 93	len = rvring->len;
 94
 95	/* zero vring */
 96	size = vring_size(len, rvring->align);
 97	memset(addr, 0, size);
 98
 99	dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
100		id, addr, len, rvring->notifyid);
101
102	/*
103	 * Create the new vq, and tell virtio we're not interested in
104	 * the 'weak' smp barriers, since we're talking with a real device.
105	 */
106	vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
107				 addr, rproc_virtio_notify, callback, name);
108	if (!vq) {
109		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
110		rproc_free_vring(rvring);
111		return ERR_PTR(-ENOMEM);
112	}
113
 
 
114	rvring->vq = vq;
115	vq->priv = rvring;
116
 
 
 
 
117	return vq;
118}
119
120static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
121{
122	struct virtqueue *vq, *n;
123	struct rproc_vring *rvring;
124
125	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
126		rvring = vq->priv;
127		rvring->vq = NULL;
128		vring_del_virtqueue(vq);
129	}
130}
131
132static void rproc_virtio_del_vqs(struct virtio_device *vdev)
133{
134	__rproc_virtio_del_vqs(vdev);
135}
136
137static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
138				 struct virtqueue *vqs[],
139				 vq_callback_t *callbacks[],
140				 const char * const names[],
141				 const bool * ctx,
142				 struct irq_affinity *desc)
143{
144	int i, ret;
145
146	for (i = 0; i < nvqs; ++i) {
147		vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
 
 
 
 
 
148				    ctx ? ctx[i] : false);
149		if (IS_ERR(vqs[i])) {
150			ret = PTR_ERR(vqs[i]);
151			goto error;
152		}
153	}
154
155	return 0;
156
157error:
158	__rproc_virtio_del_vqs(vdev);
159	return ret;
160}
161
162static u8 rproc_virtio_get_status(struct virtio_device *vdev)
163{
164	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
165	struct fw_rsc_vdev *rsc;
166
167	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
168
169	return rsc->status;
170}
171
172static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
173{
174	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
175	struct fw_rsc_vdev *rsc;
176
177	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
178
179	rsc->status = status;
180	dev_dbg(&vdev->dev, "status: %d\n", status);
181}
182
183static void rproc_virtio_reset(struct virtio_device *vdev)
184{
185	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
186	struct fw_rsc_vdev *rsc;
187
188	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
189
190	rsc->status = 0;
191	dev_dbg(&vdev->dev, "reset !\n");
192}
193
194/* provide the vdev features as retrieved from the firmware */
195static u64 rproc_virtio_get_features(struct virtio_device *vdev)
196{
197	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
198	struct fw_rsc_vdev *rsc;
199
200	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
201
202	return rsc->dfeatures;
203}
204
 
 
 
 
 
 
 
 
 
 
205static int rproc_virtio_finalize_features(struct virtio_device *vdev)
206{
207	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
208	struct fw_rsc_vdev *rsc;
209
210	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
211
212	/* Give virtio_ring a chance to accept features */
213	vring_transport_features(vdev);
214
 
 
 
215	/* Make sure we don't have any features > 32 bits! */
216	BUG_ON((u32)vdev->features != vdev->features);
217
218	/*
219	 * Remember the finalized features of our vdev, and provide it
220	 * to the remote processor once it is powered on.
221	 */
222	rsc->gfeatures = vdev->features;
223
224	return 0;
225}
226
227static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
228			     void *buf, unsigned int len)
229{
230	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
231	struct fw_rsc_vdev *rsc;
232	void *cfg;
233
234	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
235	cfg = &rsc->vring[rsc->num_of_vrings];
236
237	if (offset + len > rsc->config_len || offset + len < len) {
238		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
239		return;
240	}
241
242	memcpy(buf, cfg + offset, len);
243}
244
245static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
246			     const void *buf, unsigned int len)
247{
248	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
249	struct fw_rsc_vdev *rsc;
250	void *cfg;
251
252	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
253	cfg = &rsc->vring[rsc->num_of_vrings];
254
255	if (offset + len > rsc->config_len || offset + len < len) {
256		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
257		return;
258	}
259
260	memcpy(cfg + offset, buf, len);
261}
262
263static const struct virtio_config_ops rproc_virtio_config_ops = {
264	.get_features	= rproc_virtio_get_features,
265	.finalize_features = rproc_virtio_finalize_features,
266	.find_vqs	= rproc_virtio_find_vqs,
267	.del_vqs	= rproc_virtio_del_vqs,
268	.reset		= rproc_virtio_reset,
269	.set_status	= rproc_virtio_set_status,
270	.get_status	= rproc_virtio_get_status,
271	.get		= rproc_virtio_get,
272	.set		= rproc_virtio_set,
273};
274
275/*
276 * This function is called whenever vdev is released, and is responsible
277 * to decrement the remote processor's refcount which was taken when vdev was
278 * added.
279 *
280 * Never call this function directly; it will be called by the driver
281 * core when needed.
282 */
283static void rproc_virtio_dev_release(struct device *dev)
284{
285	struct virtio_device *vdev = dev_to_virtio(dev);
286	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
287	struct rproc *rproc = vdev_to_rproc(vdev);
288
289	kref_put(&rvdev->refcount, rproc_vdev_release);
290
291	put_device(&rproc->dev);
292}
293
294/**
295 * rproc_add_virtio_dev() - register an rproc-induced virtio device
296 * @rvdev: the remote vdev
 
297 *
298 * This function registers a virtio device. This vdev's partent is
299 * the rproc device.
300 *
301 * Returns 0 on success or an appropriate error value otherwise.
302 */
303int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
304{
305	struct rproc *rproc = rvdev->rproc;
306	struct device *dev = &rproc->dev;
307	struct virtio_device *vdev = &rvdev->vdev;
 
308	int ret;
309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310	vdev->id.device	= id,
311	vdev->config = &rproc_virtio_config_ops,
312	vdev->dev.parent = dev;
313	vdev->dev.release = rproc_virtio_dev_release;
314
315	/*
316	 * We're indirectly making a non-temporary copy of the rproc pointer
317	 * here, because drivers probed with this vdev will indirectly
318	 * access the wrapping rproc.
319	 *
320	 * Therefore we must increment the rproc refcount here, and decrement
321	 * it _only_ when the vdev is released.
322	 */
323	get_device(&rproc->dev);
324
325	/* Reference the vdev and vring allocations */
326	kref_get(&rvdev->refcount);
327
328	ret = register_virtio_device(vdev);
329	if (ret) {
330		put_device(&vdev->dev);
331		dev_err(dev, "failed to register vdev: %d\n", ret);
332		goto out;
333	}
334
335	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
336
337out:
338	return ret;
339}
340
341/**
342 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
343 * @rvdev: the remote vdev
 
344 *
345 * This function unregisters an existing virtio device.
 
 
346 */
347void rproc_remove_virtio_dev(struct rproc_vdev *rvdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348{
349	unregister_virtio_device(&rvdev->vdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350}