Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * TI K3 Cortex-M4 Remote Processor(s) driver
  4 *
  5 * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
  6 *	Hari Nagalla <hnagalla@ti.com>
  7 */
  8
  9#include <linux/io.h>
 10#include <linux/mailbox_client.h>
 11#include <linux/module.h>
 12#include <linux/of_address.h>
 13#include <linux/of_reserved_mem.h>
 14#include <linux/platform_device.h>
 15#include <linux/remoteproc.h>
 16#include <linux/reset.h>
 17#include <linux/slab.h>
 18
 19#include "omap_remoteproc.h"
 20#include "remoteproc_internal.h"
 21#include "ti_sci_proc.h"
 22
 23#define K3_M4_IRAM_DEV_ADDR 0x00000
 24#define K3_M4_DRAM_DEV_ADDR 0x30000
 25
 26/**
 27 * struct k3_m4_rproc_mem - internal memory structure
 28 * @cpu_addr: MPU virtual address of the memory region
 29 * @bus_addr: Bus address used to access the memory region
 30 * @dev_addr: Device address of the memory region from remote processor view
 31 * @size: Size of the memory region
 32 */
 33struct k3_m4_rproc_mem {
 34	void __iomem *cpu_addr;
 35	phys_addr_t bus_addr;
 36	u32 dev_addr;
 37	size_t size;
 38};
 39
 40/**
 41 * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
 42 * @name: name for this memory entry
 43 * @dev_addr: device address for the memory entry
 44 */
 45struct k3_m4_rproc_mem_data {
 46	const char *name;
 47	const u32 dev_addr;
 48};
 49
 50/**
 51 * struct k3_m4_rproc - k3 remote processor driver structure
 52 * @dev: cached device pointer
 53 * @mem: internal memory regions data
 54 * @num_mems: number of internal memory regions
 55 * @rmem: reserved memory regions data
 56 * @num_rmems: number of reserved memory regions
 57 * @reset: reset control handle
 58 * @tsp: TI-SCI processor control handle
 59 * @ti_sci: TI-SCI handle
 60 * @ti_sci_id: TI-SCI device identifier
 61 * @mbox: mailbox channel handle
 62 * @client: mailbox client to request the mailbox channel
 63 */
 64struct k3_m4_rproc {
 65	struct device *dev;
 66	struct k3_m4_rproc_mem *mem;
 67	int num_mems;
 68	struct k3_m4_rproc_mem *rmem;
 69	int num_rmems;
 70	struct reset_control *reset;
 71	struct ti_sci_proc *tsp;
 72	const struct ti_sci_handle *ti_sci;
 73	u32 ti_sci_id;
 74	struct mbox_chan *mbox;
 75	struct mbox_client client;
 76};
 77
 78/**
 79 * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
 80 * @client: mailbox client pointer used for requesting the mailbox channel
 81 * @data: mailbox payload
 82 *
 83 * This handler is invoked by the K3 mailbox driver whenever a mailbox
 84 * message is received. Usually, the mailbox payload simply contains
 85 * the index of the virtqueue that is kicked by the remote processor,
 86 * and we let remoteproc core handle it.
 87 *
 88 * In addition to virtqueue indices, we also have some out-of-band values
 89 * that indicate different events. Those values are deliberately very
 90 * large so they don't coincide with virtqueue indices.
 91 */
 92static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
 93{
 94	struct device *dev = client->dev;
 95	struct rproc *rproc = dev_get_drvdata(dev);
 96	u32 msg = (u32)(uintptr_t)(data);
 97
 98	dev_dbg(dev, "mbox msg: 0x%x\n", msg);
 99
100	switch (msg) {
101	case RP_MBOX_CRASH:
102		/*
103		 * remoteproc detected an exception, but error recovery is not
104		 * supported. So, just log this for now
105		 */
106		dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
107		break;
108	case RP_MBOX_ECHO_REPLY:
109		dev_info(dev, "received echo reply from %s\n", rproc->name);
110		break;
111	default:
112		/* silently handle all other valid messages */
113		if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
114			return;
115		if (msg > rproc->max_notifyid) {
116			dev_dbg(dev, "dropping unknown message 0x%x", msg);
117			return;
118		}
119		/* msg contains the index of the triggered vring */
120		if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
121			dev_dbg(dev, "no message was found in vqid %d\n", msg);
122	}
123}
124
125/*
126 * Kick the remote processor to notify about pending unprocessed messages.
127 * The vqid usage is not used and is inconsequential, as the kick is performed
128 * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
129 * the remote processor is expected to process both its Tx and Rx virtqueues.
130 */
131static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
132{
133	struct k3_m4_rproc *kproc = rproc->priv;
134	struct device *dev = kproc->dev;
135	u32 msg = (u32)vqid;
136	int ret;
137
138	/*
139	 * Send the index of the triggered virtqueue in the mailbox payload.
140	 * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
141	 * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
142	 */
143	ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
144	if (ret < 0)
145		dev_err(dev, "failed to send mailbox message, status = %d\n",
146			ret);
147}
148
149static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
150{
151	struct device *dev = kproc->dev;
152	int ret;
153
154	/*
155	 * Ping the remote processor, this is only for sanity-sake for now;
156	 * there is no functional effect whatsoever.
157	 *
158	 * Note that the reply will _not_ arrive immediately: this message
159	 * will wait in the mailbox fifo until the remote processor is booted.
160	 */
161	ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
162	if (ret < 0) {
163		dev_err(dev, "mbox_send_message failed: %d\n", ret);
164		return ret;
165	}
166
167	return 0;
168}
169
170/*
171 * The M4 cores have a local reset that affects only the CPU, and a
172 * generic module reset that powers on the device and allows the internal
173 * memories to be accessed while the local reset is asserted. This function is
174 * used to release the global reset on remote cores to allow loading into the
175 * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
176 * firmware loading, and is followed by the .start() ops after loading to
177 * actually let the remote cores to run.
178 */
179static int k3_m4_rproc_prepare(struct rproc *rproc)
180{
181	struct k3_m4_rproc *kproc = rproc->priv;
182	struct device *dev = kproc->dev;
183	int ret;
184
185	/* If the core is running already no need to deassert the module reset */
186	if (rproc->state == RPROC_DETACHED)
187		return 0;
188
189	/*
190	 * Ensure the local reset is asserted so the core doesn't
191	 * execute bogus code when the module reset is released.
192	 */
193	ret = reset_control_assert(kproc->reset);
194	if (ret) {
195		dev_err(dev, "could not assert local reset\n");
196		return ret;
197	}
198
199	ret = reset_control_status(kproc->reset);
200	if (ret <= 0) {
201		dev_err(dev, "local reset still not asserted\n");
202		return ret;
203	}
204
205	ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
206						    kproc->ti_sci_id);
207	if (ret) {
208		dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
209		return ret;
210	}
211
212	return 0;
213}
214
215/*
216 * This function implements the .unprepare() ops and performs the complimentary
217 * operations to that of the .prepare() ops. The function is used to assert the
218 * global reset on applicable cores. This completes the second portion of
219 * powering down the remote core. The cores themselves are only halted in the
220 * .stop() callback through the local reset, and the .unprepare() ops is invoked
221 * by the remoteproc core after the remoteproc is stopped to balance the global
222 * reset.
223 */
224static int k3_m4_rproc_unprepare(struct rproc *rproc)
225{
226	struct k3_m4_rproc *kproc = rproc->priv;
227	struct device *dev = kproc->dev;
228	int ret;
229
230	/* If the core is going to be detached do not assert the module reset */
231	if (rproc->state == RPROC_ATTACHED)
232		return 0;
233
234	ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
235						    kproc->ti_sci_id);
236	if (ret) {
237		dev_err(dev, "module-reset assert failed\n");
238		return ret;
239	}
240
241	return 0;
242}
243
244/*
245 * This function implements the .get_loaded_rsc_table() callback and is used
246 * to provide the resource table for a booted remote processor in IPC-only
247 * mode. The remote processor firmwares follow a design-by-contract approach
248 * and are expected to have the resource table at the base of the DDR region
249 * reserved for firmware usage. This provides flexibility for the remote
250 * processor to be booted by different bootloaders that may or may not have the
251 * ability to publish the resource table address and size through a DT
252 * property.
253 */
254static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
255							 size_t *rsc_table_sz)
256{
257	struct k3_m4_rproc *kproc = rproc->priv;
258	struct device *dev = kproc->dev;
259
260	if (!kproc->rmem[0].cpu_addr) {
261		dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
262		return ERR_PTR(-ENOMEM);
263	}
264
265	/*
266	 * NOTE: The resource table size is currently hard-coded to a maximum
267	 * of 256 bytes. The most common resource table usage for K3 firmwares
268	 * is to only have the vdev resource entry and an optional trace entry.
269	 * The exact size could be computed based on resource table address, but
270	 * the hard-coded value suffices to support the IPC-only mode.
271	 */
272	*rsc_table_sz = 256;
273	return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
274}
275
276/*
277 * Custom function to translate a remote processor device address (internal
278 * RAMs only) to a kernel virtual address.  The remote processors can access
279 * their RAMs at either an internal address visible only from a remote
280 * processor, or at the SoC-level bus address. Both these addresses need to be
281 * looked through for translation. The translated addresses can be used either
282 * by the remoteproc core for loading (when using kernel remoteproc loader), or
283 * by any rpmsg bus drivers.
284 */
285static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
286{
287	struct k3_m4_rproc *kproc = rproc->priv;
288	void __iomem *va = NULL;
289	phys_addr_t bus_addr;
290	u32 dev_addr, offset;
291	size_t size;
292	int i;
293
294	if (len == 0)
295		return NULL;
296
297	for (i = 0; i < kproc->num_mems; i++) {
298		bus_addr = kproc->mem[i].bus_addr;
299		dev_addr = kproc->mem[i].dev_addr;
300		size = kproc->mem[i].size;
301
302		/* handle M4-view addresses */
303		if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
304			offset = da - dev_addr;
305			va = kproc->mem[i].cpu_addr + offset;
306			return (__force void *)va;
307		}
308
309		/* handle SoC-view addresses */
310		if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
311			offset = da - bus_addr;
312			va = kproc->mem[i].cpu_addr + offset;
313			return (__force void *)va;
314		}
315	}
316
317	/* handle static DDR reserved memory regions */
318	for (i = 0; i < kproc->num_rmems; i++) {
319		dev_addr = kproc->rmem[i].dev_addr;
320		size = kproc->rmem[i].size;
321
322		if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
323			offset = da - dev_addr;
324			va = kproc->rmem[i].cpu_addr + offset;
325			return (__force void *)va;
326		}
327	}
328
329	return NULL;
330}
331
332static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
333				       struct k3_m4_rproc *kproc)
334{
335	static const char * const mem_names[] = { "iram", "dram" };
336	static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
337	struct device *dev = &pdev->dev;
338	struct resource *res;
339	int num_mems;
340	int i;
341
342	num_mems = ARRAY_SIZE(mem_names);
343	kproc->mem = devm_kcalloc(kproc->dev, num_mems,
344				  sizeof(*kproc->mem), GFP_KERNEL);
345	if (!kproc->mem)
346		return -ENOMEM;
347
348	for (i = 0; i < num_mems; i++) {
349		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
350						   mem_names[i]);
351		if (!res) {
352			dev_err(dev, "found no memory resource for %s\n",
353				mem_names[i]);
354			return -EINVAL;
355		}
356		if (!devm_request_mem_region(dev, res->start,
357					     resource_size(res),
358					     dev_name(dev))) {
359			dev_err(dev, "could not request %s region for resource\n",
360				mem_names[i]);
361			return -EBUSY;
362		}
363
364		kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
365							 resource_size(res));
366		if (!kproc->mem[i].cpu_addr) {
367			dev_err(dev, "failed to map %s memory\n",
368				mem_names[i]);
369			return -ENOMEM;
370		}
371		kproc->mem[i].bus_addr = res->start;
372		kproc->mem[i].dev_addr = mem_addrs[i];
373		kproc->mem[i].size = resource_size(res);
374
375		dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
376			mem_names[i], &kproc->mem[i].bus_addr,
377			kproc->mem[i].size, kproc->mem[i].cpu_addr,
378			kproc->mem[i].dev_addr);
379	}
380	kproc->num_mems = num_mems;
381
382	return 0;
383}
384
385static void k3_m4_rproc_dev_mem_release(void *data)
386{
387	struct device *dev = data;
388
389	of_reserved_mem_device_release(dev);
390}
391
392static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
393{
394	struct device *dev = kproc->dev;
395	struct device_node *np = dev->of_node;
396	struct device_node *rmem_np;
397	struct reserved_mem *rmem;
398	int num_rmems;
399	int ret, i;
400
401	num_rmems = of_property_count_elems_of_size(np, "memory-region",
402						    sizeof(phandle));
403	if (num_rmems < 0) {
404		dev_err(dev, "device does not reserved memory regions (%d)\n",
405			num_rmems);
406		return -EINVAL;
407	}
408	if (num_rmems < 2) {
409		dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
410			num_rmems);
411		return -EINVAL;
412	}
413
414	/* use reserved memory region 0 for vring DMA allocations */
415	ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
416	if (ret) {
417		dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
418		return ret;
419	}
420	ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
421	if (ret)
422		return ret;
423
424	num_rmems--;
425	kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
426	if (!kproc->rmem)
427		return -ENOMEM;
428
429	/* use remaining reserved memory regions for static carveouts */
430	for (i = 0; i < num_rmems; i++) {
431		rmem_np = of_parse_phandle(np, "memory-region", i + 1);
432		if (!rmem_np)
433			return -EINVAL;
434
435		rmem = of_reserved_mem_lookup(rmem_np);
436		of_node_put(rmem_np);
437		if (!rmem)
438			return -EINVAL;
439
440		kproc->rmem[i].bus_addr = rmem->base;
441		/* 64-bit address regions currently not supported */
442		kproc->rmem[i].dev_addr = (u32)rmem->base;
443		kproc->rmem[i].size = rmem->size;
444		kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
445		if (!kproc->rmem[i].cpu_addr) {
446			dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
447				i + 1, &rmem->base, &rmem->size);
448			return -ENOMEM;
449		}
450
451		dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
452			i + 1, &kproc->rmem[i].bus_addr,
453			kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
454			kproc->rmem[i].dev_addr);
455	}
456	kproc->num_rmems = num_rmems;
457
458	return 0;
459}
460
461static void k3_m4_release_tsp(void *data)
462{
463	struct ti_sci_proc *tsp = data;
464
465	ti_sci_proc_release(tsp);
466}
467
468/*
469 * Power up the M4 remote processor.
470 *
471 * This function will be invoked only after the firmware for this rproc
472 * was loaded, parsed successfully, and all of its resource requirements
473 * were met. This callback is invoked only in remoteproc mode.
474 */
475static int k3_m4_rproc_start(struct rproc *rproc)
476{
477	struct k3_m4_rproc *kproc = rproc->priv;
478	struct device *dev = kproc->dev;
479	int ret;
480
481	ret = k3_m4_rproc_ping_mbox(kproc);
482	if (ret)
483		return ret;
484
485	ret = reset_control_deassert(kproc->reset);
486	if (ret) {
487		dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
488		return ret;
489	}
490
491	return 0;
492}
493
494/*
495 * Stop the M4 remote processor.
496 *
497 * This function puts the M4 processor into reset, and finishes processing
498 * of any pending messages. This callback is invoked only in remoteproc mode.
499 */
500static int k3_m4_rproc_stop(struct rproc *rproc)
501{
502	struct k3_m4_rproc *kproc = rproc->priv;
503	struct device *dev = kproc->dev;
504	int ret;
505
506	ret = reset_control_assert(kproc->reset);
507	if (ret) {
508		dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
509		return ret;
510	}
511
512	return 0;
513}
514
515/*
516 * Attach to a running M4 remote processor (IPC-only mode)
517 *
518 * The remote processor is already booted, so there is no need to issue any
519 * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
520 * mode.
521 */
522static int k3_m4_rproc_attach(struct rproc *rproc)
523{
524	struct k3_m4_rproc *kproc = rproc->priv;
525	int ret;
526
527	ret = k3_m4_rproc_ping_mbox(kproc);
528	if (ret)
529		return ret;
530
531	return 0;
532}
533
534/*
535 * Detach from a running M4 remote processor (IPC-only mode)
536 *
537 * This rproc detach callback performs the opposite operation to attach
538 * callback, the M4 core is not stopped and will be left to continue to
539 * run its booted firmware. This callback is invoked only in IPC-only mode.
540 */
541static int k3_m4_rproc_detach(struct rproc *rproc)
542{
543	return 0;
544}
545
546static const struct rproc_ops k3_m4_rproc_ops = {
547	.prepare = k3_m4_rproc_prepare,
548	.unprepare = k3_m4_rproc_unprepare,
549	.start = k3_m4_rproc_start,
550	.stop = k3_m4_rproc_stop,
551	.attach = k3_m4_rproc_attach,
552	.detach = k3_m4_rproc_detach,
553	.kick = k3_m4_rproc_kick,
554	.da_to_va = k3_m4_rproc_da_to_va,
555	.get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
556};
557
558static int k3_m4_rproc_probe(struct platform_device *pdev)
559{
560	struct device *dev = &pdev->dev;
561	struct k3_m4_rproc *kproc;
562	struct rproc *rproc;
563	const char *fw_name;
564	bool r_state = false;
565	bool p_state = false;
566	int ret;
567
568	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
569	if (ret)
570		return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
571
572	rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name,
573				 sizeof(*kproc));
574	if (!rproc)
575		return -ENOMEM;
576
577	rproc->has_iommu = false;
578	rproc->recovery_disabled = true;
579	kproc = rproc->priv;
580	kproc->dev = dev;
581	platform_set_drvdata(pdev, rproc);
582
583	kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
584	if (IS_ERR(kproc->ti_sci))
585		return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
586				     "failed to get ti-sci handle\n");
587
588	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &kproc->ti_sci_id);
589	if (ret)
590		return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
591
592	kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
593	if (IS_ERR(kproc->reset))
594		return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n");
595
596	kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
597	if (IS_ERR(kproc->tsp))
598		return dev_err_probe(dev, PTR_ERR(kproc->tsp),
599				     "failed to construct ti-sci proc control\n");
600
601	ret = ti_sci_proc_request(kproc->tsp);
602	if (ret < 0)
603		return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
604	ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
605	if (ret)
606		return ret;
607
608	ret = k3_m4_rproc_of_get_memories(pdev, kproc);
609	if (ret)
610		return ret;
611
612	ret = k3_m4_reserved_mem_init(kproc);
613	if (ret)
614		return dev_err_probe(dev, ret, "reserved memory init failed\n");
615
616	ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
617					       &r_state, &p_state);
618	if (ret)
619		return dev_err_probe(dev, ret,
620				     "failed to get initial state, mode cannot be determined\n");
621
622	/* configure devices for either remoteproc or IPC-only mode */
623	if (p_state) {
624		rproc->state = RPROC_DETACHED;
625		dev_info(dev, "configured M4F for IPC-only mode\n");
626	} else {
627		dev_info(dev, "configured M4F for remoteproc mode\n");
628	}
629
630	kproc->client.dev = dev;
631	kproc->client.tx_done = NULL;
632	kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
633	kproc->client.tx_block = false;
634	kproc->client.knows_txdone = false;
635	kproc->mbox = mbox_request_channel(&kproc->client, 0);
636	if (IS_ERR(kproc->mbox))
637		return dev_err_probe(dev, PTR_ERR(kproc->mbox),
638				     "mbox_request_channel failed\n");
639
640	ret = devm_rproc_add(dev, rproc);
641	if (ret)
642		return dev_err_probe(dev, ret,
643				     "failed to register device with remoteproc core\n");
644
645	return 0;
646}
647
648static const struct of_device_id k3_m4_of_match[] = {
649	{ .compatible = "ti,am64-m4fss", },
650	{ /* sentinel */ },
651};
652MODULE_DEVICE_TABLE(of, k3_m4_of_match);
653
654static struct platform_driver k3_m4_rproc_driver = {
655	.probe	= k3_m4_rproc_probe,
656	.driver	= {
657		.name = "k3-m4-rproc",
658		.of_match_table = k3_m4_of_match,
659	},
660};
661module_platform_driver(k3_m4_rproc_driver);
662
663MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
664MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
665MODULE_LICENSE("GPL");