Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ZynqMP R5 Remote Processor driver
   4 *
   5 */
   6
   7#include <dt-bindings/power/xlnx-zynqmp-power.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/firmware/xlnx-zynqmp.h>
  10#include <linux/kernel.h>
  11#include <linux/mailbox_client.h>
  12#include <linux/mailbox/zynqmp-ipi-message.h>
  13#include <linux/module.h>
  14#include <linux/of_address.h>
  15#include <linux/of_platform.h>
  16#include <linux/of_reserved_mem.h>
  17#include <linux/platform_device.h>
  18#include <linux/remoteproc.h>
 
  19
  20#include "remoteproc_internal.h"
  21
  22/* IPI buffer MAX length */
  23#define IPI_BUF_LEN_MAX	32U
  24
  25/* RX mailbox client buffer max length */
  26#define MBOX_CLIENT_BUF_MAX	(IPI_BUF_LEN_MAX + \
  27				 sizeof(struct zynqmp_ipi_message))
  28/*
  29 * settings for RPU cluster mode which
  30 * reflects possible values of xlnx,cluster-mode dt-property
  31 */
  32enum zynqmp_r5_cluster_mode {
  33	SPLIT_MODE = 0, /* When cores run as separate processor */
  34	LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */
  35	SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */
  36};
  37
  38/**
  39 * struct mem_bank_data - Memory Bank description
  40 *
  41 * @addr: Start address of memory bank
  42 * @da: device address
  43 * @size: Size of Memory bank
  44 * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
  45 * @bank_name: name of the bank for remoteproc framework
  46 */
  47struct mem_bank_data {
  48	phys_addr_t addr;
  49	u32 da;
  50	size_t size;
  51	u32 pm_domain_id;
  52	char *bank_name;
  53};
  54
  55/**
  56 * struct mbox_info
  57 *
  58 * @rx_mc_buf: to copy data from mailbox rx channel
  59 * @tx_mc_buf: to copy data to mailbox tx channel
  60 * @r5_core: this mailbox's corresponding r5_core pointer
  61 * @mbox_work: schedule work after receiving data from mailbox
  62 * @mbox_cl: mailbox client
  63 * @tx_chan: mailbox tx channel
  64 * @rx_chan: mailbox rx channel
  65 */
  66struct mbox_info {
  67	unsigned char rx_mc_buf[MBOX_CLIENT_BUF_MAX];
  68	unsigned char tx_mc_buf[MBOX_CLIENT_BUF_MAX];
  69	struct zynqmp_r5_core *r5_core;
  70	struct work_struct mbox_work;
  71	struct mbox_client mbox_cl;
  72	struct mbox_chan *tx_chan;
  73	struct mbox_chan *rx_chan;
  74};
  75
  76/*
  77 * Hardcoded TCM bank values. This will be removed once TCM bindings are
  78 * accepted for system-dt specifications and upstreamed in linux kernel
  79 */
  80static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
  81	{0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
  82	{0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
  83	{0xffe90000UL, 0x0, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
  84	{0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
  85};
  86
  87/* In lockstep mode cluster combines each 64KB TCM and makes 128KB TCM */
  88static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
  89	{0xffe00000UL, 0x0, 0x20000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 128KB each */
  90	{0xffe20000UL, 0x20000, 0x20000UL, PD_R5_0_BTCM, "btcm0"},
  91	{0, 0, 0, PD_R5_1_ATCM, ""},
  92	{0, 0, 0, PD_R5_1_BTCM, ""},
  93};
  94
  95/**
  96 * struct zynqmp_r5_core
  97 *
  98 * @dev: device of RPU instance
  99 * @np: device node of RPU instance
 100 * @tcm_bank_count: number TCM banks accessible to this RPU
 101 * @tcm_banks: array of each TCM bank data
 
 
 102 * @rproc: rproc handle
 103 * @pm_domain_id: RPU CPU power domain id
 104 * @ipi: pointer to mailbox information
 105 */
 106struct zynqmp_r5_core {
 107	struct device *dev;
 108	struct device_node *np;
 109	int tcm_bank_count;
 110	struct mem_bank_data **tcm_banks;
 
 
 111	struct rproc *rproc;
 112	u32 pm_domain_id;
 113	struct mbox_info *ipi;
 114};
 115
 116/**
 117 * struct zynqmp_r5_cluster
 118 *
 119 * @dev: r5f subsystem cluster device node
 120 * @mode: cluster mode of type zynqmp_r5_cluster_mode
 121 * @core_count: number of r5 cores used for this cluster mode
 122 * @r5_cores: Array of pointers pointing to r5 core
 123 */
 124struct zynqmp_r5_cluster {
 125	struct device *dev;
 126	enum  zynqmp_r5_cluster_mode mode;
 127	int core_count;
 128	struct zynqmp_r5_core **r5_cores;
 129};
 130
 131/**
 132 * event_notified_idr_cb() - callback for vq_interrupt per notifyid
 133 * @id: rproc->notify id
 134 * @ptr: pointer to idr private data
 135 * @data: data passed to idr_for_each callback
 136 *
 137 * Pass notification to remoteproc virtio
 138 *
 139 * Return: 0. having return is to satisfy the idr_for_each() function
 140 *          pointer input argument requirement.
 141 **/
 142static int event_notified_idr_cb(int id, void *ptr, void *data)
 143{
 144	struct rproc *rproc = data;
 145
 146	if (rproc_vq_interrupt(rproc, id) == IRQ_NONE)
 147		dev_dbg(&rproc->dev, "data not found for vqid=%d\n", id);
 148
 149	return 0;
 150}
 151
 152/**
 153 * handle_event_notified() - remoteproc notification work function
 154 * @work: pointer to the work structure
 155 *
 156 * It checks each registered remoteproc notify IDs.
 157 */
 158static void handle_event_notified(struct work_struct *work)
 159{
 160	struct mbox_info *ipi;
 161	struct rproc *rproc;
 162
 163	ipi = container_of(work, struct mbox_info, mbox_work);
 164	rproc = ipi->r5_core->rproc;
 165
 166	/*
 167	 * We only use IPI for interrupt. The RPU firmware side may or may
 168	 * not write the notifyid when it trigger IPI.
 169	 * And thus, we scan through all the registered notifyids and
 170	 * find which one is valid to get the message.
 171	 * Even if message from firmware is NULL, we attempt to get vqid
 172	 */
 173	idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc);
 174}
 175
 176/**
 177 * zynqmp_r5_mb_rx_cb() - receive channel mailbox callback
 178 * @cl: mailbox client
 179 * @msg: message pointer
 180 *
 181 * Receive data from ipi buffer, ack interrupt and then
 182 * it will schedule the R5 notification work.
 183 */
 184static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg)
 185{
 186	struct zynqmp_ipi_message *ipi_msg, *buf_msg;
 187	struct mbox_info *ipi;
 188	size_t len;
 189
 190	ipi = container_of(cl, struct mbox_info, mbox_cl);
 191
 192	/* copy data from ipi buffer to r5_core */
 193	ipi_msg = (struct zynqmp_ipi_message *)msg;
 194	buf_msg = (struct zynqmp_ipi_message *)ipi->rx_mc_buf;
 195	len = ipi_msg->len;
 196	if (len > IPI_BUF_LEN_MAX) {
 197		dev_warn(cl->dev, "msg size exceeded than %d\n",
 198			 IPI_BUF_LEN_MAX);
 199		len = IPI_BUF_LEN_MAX;
 200	}
 201	buf_msg->len = len;
 202	memcpy(buf_msg->data, ipi_msg->data, len);
 203
 204	/* received and processed interrupt ack */
 205	if (mbox_send_message(ipi->rx_chan, NULL) < 0)
 206		dev_err(cl->dev, "ack failed to mbox rx_chan\n");
 207
 208	schedule_work(&ipi->mbox_work);
 209}
 210
 211/**
 212 * zynqmp_r5_setup_mbox() - Setup mailboxes related properties
 213 *			    this is used for each individual R5 core
 214 *
 215 * @cdev: child node device
 216 *
 217 * Function to setup mailboxes related properties
 218 * return : NULL if failed else pointer to mbox_info
 219 */
 220static struct mbox_info *zynqmp_r5_setup_mbox(struct device *cdev)
 221{
 222	struct mbox_client *mbox_cl;
 223	struct mbox_info *ipi;
 224
 225	ipi = kzalloc(sizeof(*ipi), GFP_KERNEL);
 226	if (!ipi)
 227		return NULL;
 228
 229	mbox_cl = &ipi->mbox_cl;
 230	mbox_cl->rx_callback = zynqmp_r5_mb_rx_cb;
 231	mbox_cl->tx_block = false;
 232	mbox_cl->knows_txdone = false;
 233	mbox_cl->tx_done = NULL;
 234	mbox_cl->dev = cdev;
 235
 236	/* Request TX and RX channels */
 237	ipi->tx_chan = mbox_request_channel_byname(mbox_cl, "tx");
 238	if (IS_ERR(ipi->tx_chan)) {
 239		ipi->tx_chan = NULL;
 240		kfree(ipi);
 241		dev_warn(cdev, "mbox tx channel request failed\n");
 242		return NULL;
 243	}
 244
 245	ipi->rx_chan = mbox_request_channel_byname(mbox_cl, "rx");
 246	if (IS_ERR(ipi->rx_chan)) {
 247		mbox_free_channel(ipi->tx_chan);
 248		ipi->rx_chan = NULL;
 249		ipi->tx_chan = NULL;
 250		kfree(ipi);
 251		dev_warn(cdev, "mbox rx channel request failed\n");
 252		return NULL;
 253	}
 254
 255	INIT_WORK(&ipi->mbox_work, handle_event_notified);
 256
 257	return ipi;
 258}
 259
 260static void zynqmp_r5_free_mbox(struct mbox_info *ipi)
 261{
 262	if (!ipi)
 263		return;
 264
 265	if (ipi->tx_chan) {
 266		mbox_free_channel(ipi->tx_chan);
 267		ipi->tx_chan = NULL;
 268	}
 269
 270	if (ipi->rx_chan) {
 271		mbox_free_channel(ipi->rx_chan);
 272		ipi->rx_chan = NULL;
 273	}
 274
 275	kfree(ipi);
 276}
 277
 278/*
 279 * zynqmp_r5_core_kick() - kick a firmware if mbox is provided
 280 * @rproc: r5 core's corresponding rproc structure
 281 * @vqid: virtqueue ID
 282 */
 283static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
 284{
 285	struct zynqmp_r5_core *r5_core = rproc->priv;
 286	struct device *dev = r5_core->dev;
 287	struct zynqmp_ipi_message *mb_msg;
 288	struct mbox_info *ipi;
 289	int ret;
 290
 291	ipi = r5_core->ipi;
 292	if (!ipi)
 293		return;
 294
 295	mb_msg = (struct zynqmp_ipi_message *)ipi->tx_mc_buf;
 296	memcpy(mb_msg->data, &vqid, sizeof(vqid));
 297	mb_msg->len = sizeof(vqid);
 298	ret = mbox_send_message(ipi->tx_chan, mb_msg);
 299	if (ret < 0)
 300		dev_warn(dev, "failed to send message\n");
 301}
 302
 303/*
 304 * zynqmp_r5_set_mode()
 305 *
 306 * set RPU cluster and TCM operation mode
 307 *
 308 * @r5_core: pointer to zynqmp_r5_core type object
 309 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
 310 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
 311 *
 312 * Return: 0 for success and < 0 for failure
 313 */
 314static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core,
 315			      enum rpu_oper_mode fw_reg_val,
 316			      enum rpu_tcm_comb tcm_mode)
 317{
 318	int ret;
 319
 320	ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
 321	if (ret < 0) {
 322		dev_err(r5_core->dev, "failed to set RPU mode\n");
 323		return ret;
 324	}
 325
 326	ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode);
 327	if (ret < 0)
 328		dev_err(r5_core->dev, "failed to configure TCM\n");
 329
 330	return ret;
 331}
 332
 333/*
 334 * zynqmp_r5_rproc_start()
 335 * @rproc: single R5 core's corresponding rproc instance
 336 *
 337 * Start R5 Core from designated boot address.
 338 *
 339 * return 0 on success, otherwise non-zero value on failure
 340 */
 341static int zynqmp_r5_rproc_start(struct rproc *rproc)
 342{
 343	struct zynqmp_r5_core *r5_core = rproc->priv;
 344	enum rpu_boot_mem bootmem;
 345	int ret;
 346
 347	/*
 348	 * The exception vector pointers (EVP) refer to the base-address of
 349	 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector
 350	 * starts at the base-address and subsequent vectors are on 4-byte
 351	 * boundaries.
 352	 *
 353	 * Exception vectors can start either from 0x0000_0000 (LOVEC) or
 354	 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory)
 355	 *
 356	 * Usually firmware will put Exception vectors at LOVEC.
 357	 *
 358	 * It is not recommend that you change the exception vector.
 359	 * Changing the EVP to HIVEC will result in increased interrupt latency
 360	 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor
 361	 * is non-secured, then the Cortex-R5F processor cannot access the
 362	 * HIVEC exception vectors in the OCM.
 363	 */
 364	bootmem = (rproc->bootaddr >= 0xFFFC0000) ?
 365		   PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC;
 366
 367	dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
 368		bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
 369
 370	ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
 371				     bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
 372	if (ret)
 373		dev_err(r5_core->dev,
 374			"failed to start RPU = 0x%x\n", r5_core->pm_domain_id);
 375	return ret;
 376}
 377
 378/*
 379 * zynqmp_r5_rproc_stop()
 380 * @rproc: single R5 core's corresponding rproc instance
 381 *
 382 * Power down  R5 Core.
 383 *
 384 * return 0 on success, otherwise non-zero value on failure
 385 */
 386static int zynqmp_r5_rproc_stop(struct rproc *rproc)
 387{
 388	struct zynqmp_r5_core *r5_core = rproc->priv;
 389	int ret;
 390
 391	ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
 392				     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
 393	if (ret)
 394		dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
 395
 396	return ret;
 397}
 398
 399/*
 400 * zynqmp_r5_mem_region_map()
 401 * @rproc: single R5 core's corresponding rproc instance
 402 * @mem: mem descriptor to map reserved memory-regions
 403 *
 404 * Callback to map va for memory-region's carveout.
 405 *
 406 * return 0 on success, otherwise non-zero value on failure
 407 */
 408static int zynqmp_r5_mem_region_map(struct rproc *rproc,
 409				    struct rproc_mem_entry *mem)
 410{
 411	void __iomem *va;
 412
 413	va = ioremap_wc(mem->dma, mem->len);
 414	if (IS_ERR_OR_NULL(va))
 415		return -ENOMEM;
 416
 417	mem->va = (void *)va;
 418
 419	return 0;
 420}
 421
 422/*
 423 * zynqmp_r5_rproc_mem_unmap
 424 * @rproc: single R5 core's corresponding rproc instance
 425 * @mem: mem entry to unmap
 426 *
 427 * Unmap memory-region carveout
 428 *
 429 * return: always returns 0
 430 */
 431static int zynqmp_r5_mem_region_unmap(struct rproc *rproc,
 432				      struct rproc_mem_entry *mem)
 433{
 434	iounmap((void __iomem *)mem->va);
 435	return 0;
 436}
 437
 438/*
 439 * add_mem_regions_carveout()
 440 * @rproc: single R5 core's corresponding rproc instance
 441 *
 442 * Construct rproc mem carveouts from memory-region property nodes
 443 *
 444 * return 0 on success, otherwise non-zero value on failure
 445 */
 446static int add_mem_regions_carveout(struct rproc *rproc)
 447{
 448	struct rproc_mem_entry *rproc_mem;
 449	struct zynqmp_r5_core *r5_core;
 450	struct of_phandle_iterator it;
 451	struct reserved_mem *rmem;
 452	int i = 0;
 453
 454	r5_core = rproc->priv;
 
 455
 456	/* Register associated reserved memory regions */
 457	of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0);
 458
 459	while (of_phandle_iterator_next(&it) == 0) {
 460		rmem = of_reserved_mem_lookup(it.node);
 461		if (!rmem) {
 462			of_node_put(it.node);
 463			dev_err(&rproc->dev, "unable to acquire memory-region\n");
 464			return -EINVAL;
 465		}
 466
 467		if (!strcmp(it.node->name, "vdev0buffer")) {
 468			/* Init reserved memory for vdev buffer */
 469			rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
 470								 rmem->size,
 471								 rmem->base,
 472								 it.node->name);
 473		} else {
 474			/* Register associated reserved memory regions */
 475			rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
 476							 (dma_addr_t)rmem->base,
 477							 rmem->size, rmem->base,
 478							 zynqmp_r5_mem_region_map,
 479							 zynqmp_r5_mem_region_unmap,
 480							 it.node->name);
 481		}
 482
 483		if (!rproc_mem) {
 484			of_node_put(it.node);
 485			return -ENOMEM;
 486		}
 487
 488		rproc_add_carveout(rproc, rproc_mem);
 489
 490		dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
 491			it.node->name, rmem->base, rmem->size);
 492		i++;
 493	}
 494
 495	return 0;
 496}
 497
 498/*
 499 * tcm_mem_unmap()
 500 * @rproc: single R5 core's corresponding rproc instance
 501 * @mem: tcm mem entry to unmap
 502 *
 503 * Unmap TCM banks when powering down R5 core.
 504 *
 505 * return always 0
 506 */
 507static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem)
 508{
 509	iounmap((void __iomem *)mem->va);
 510
 511	return 0;
 512}
 513
 514/*
 515 * tcm_mem_map()
 516 * @rproc: single R5 core's corresponding rproc instance
 517 * @mem: tcm memory entry descriptor
 518 *
 519 * Given TCM bank entry, this func setup virtual address for TCM bank
 520 * remoteproc carveout. It also takes care of va to da address translation
 521 *
 522 * return 0 on success, otherwise non-zero value on failure
 523 */
 524static int tcm_mem_map(struct rproc *rproc,
 525		       struct rproc_mem_entry *mem)
 526{
 527	void __iomem *va;
 528
 529	va = ioremap_wc(mem->dma, mem->len);
 530	if (IS_ERR_OR_NULL(va))
 531		return -ENOMEM;
 532
 533	/* Update memory entry va */
 534	mem->va = (void *)va;
 535
 536	/* clear TCMs */
 537	memset_io(va, 0, mem->len);
 538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539	return 0;
 540}
 541
 542/*
 543 * add_tcm_carveout_split_mode()
 544 * @rproc: single R5 core's corresponding rproc instance
 545 *
 546 * allocate and add remoteproc carveout for TCM memory in split mode
 547 *
 548 * return 0 on success, otherwise non-zero value on failure
 549 */
 550static int add_tcm_carveout_split_mode(struct rproc *rproc)
 551{
 552	struct rproc_mem_entry *rproc_mem;
 553	struct zynqmp_r5_core *r5_core;
 554	int i, num_banks, ret;
 555	phys_addr_t bank_addr;
 556	struct device *dev;
 557	u32 pm_domain_id;
 558	size_t bank_size;
 559	char *bank_name;
 560	u32 da;
 561
 562	r5_core = rproc->priv;
 563	dev = r5_core->dev;
 564	num_banks = r5_core->tcm_bank_count;
 565
 566	/*
 567	 * Power-on Each 64KB TCM,
 568	 * register its address space, map and unmap functions
 569	 * and add carveouts accordingly
 570	 */
 571	for (i = 0; i < num_banks; i++) {
 572		bank_addr = r5_core->tcm_banks[i]->addr;
 573		da = r5_core->tcm_banks[i]->da;
 574		bank_name = r5_core->tcm_banks[i]->bank_name;
 575		bank_size = r5_core->tcm_banks[i]->size;
 576		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 577
 578		ret = zynqmp_pm_request_node(pm_domain_id,
 579					     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
 580					     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
 581		if (ret < 0) {
 582			dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
 583			goto release_tcm_split;
 584		}
 585
 586		dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, size=0x%lx",
 587			bank_name, bank_addr, da, bank_size);
 588
 589		rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
 590						 bank_size, da,
 591						 tcm_mem_map, tcm_mem_unmap,
 592						 bank_name);
 593		if (!rproc_mem) {
 594			ret = -ENOMEM;
 595			zynqmp_pm_release_node(pm_domain_id);
 596			goto release_tcm_split;
 597		}
 598
 599		rproc_add_carveout(rproc, rproc_mem);
 600	}
 601
 602	return 0;
 603
 604release_tcm_split:
 605	/* If failed, Turn off all TCM banks turned on before */
 606	for (i--; i >= 0; i--) {
 607		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 608		zynqmp_pm_release_node(pm_domain_id);
 609	}
 610	return ret;
 611}
 612
 613/*
 614 * add_tcm_carveout_lockstep_mode()
 615 * @rproc: single R5 core's corresponding rproc instance
 616 *
 617 * allocate and add remoteproc carveout for TCM memory in lockstep mode
 618 *
 619 * return 0 on success, otherwise non-zero value on failure
 620 */
 621static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
 622{
 623	struct rproc_mem_entry *rproc_mem;
 624	struct zynqmp_r5_core *r5_core;
 625	int i, num_banks, ret;
 626	phys_addr_t bank_addr;
 627	size_t bank_size = 0;
 628	struct device *dev;
 629	u32 pm_domain_id;
 630	char *bank_name;
 631	u32 da;
 632
 633	r5_core = rproc->priv;
 634	dev = r5_core->dev;
 635
 636	/* Go through zynqmp banks for r5 node */
 637	num_banks = r5_core->tcm_bank_count;
 638
 639	/*
 640	 * In lockstep mode, TCM is contiguous memory block
 641	 * However, each TCM block still needs to be enabled individually.
 642	 * So, Enable each TCM block individually.
 643	 * Although ATCM and BTCM is contiguous memory block, add two separate
 644	 * carveouts for both.
 645	 */
 
 
 
 646	for (i = 0; i < num_banks; i++) {
 
 647		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 648
 649		/* Turn on each TCM bank individually */
 650		ret = zynqmp_pm_request_node(pm_domain_id,
 651					     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
 652					     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
 653		if (ret < 0) {
 654			dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
 655			goto release_tcm_lockstep;
 656		}
 
 657
 658		bank_size = r5_core->tcm_banks[i]->size;
 659		if (bank_size == 0)
 660			continue;
 661
 662		bank_addr = r5_core->tcm_banks[i]->addr;
 663		da = r5_core->tcm_banks[i]->da;
 664		bank_name = r5_core->tcm_banks[i]->bank_name;
 665
 666		/* Register TCM address range, TCM map and unmap functions */
 667		rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
 668						 bank_size, da,
 669						 tcm_mem_map, tcm_mem_unmap,
 670						 bank_name);
 671		if (!rproc_mem) {
 672			ret = -ENOMEM;
 673			zynqmp_pm_release_node(pm_domain_id);
 674			goto release_tcm_lockstep;
 675		}
 676
 677		/* If registration is success, add carveouts */
 678		rproc_add_carveout(rproc, rproc_mem);
 679
 680		dev_dbg(dev, "TCM carveout lockstep mode %s addr=0x%llx, da=0x%x, size=0x%lx",
 681			bank_name, bank_addr, da, bank_size);
 
 
 
 
 
 
 682	}
 683
 
 
 
 684	return 0;
 685
 686release_tcm_lockstep:
 687	/* If failed, Turn off all TCM banks turned on before */
 688	for (i--; i >= 0; i--) {
 689		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 690		zynqmp_pm_release_node(pm_domain_id);
 691	}
 692	return ret;
 693}
 694
 695/*
 696 * add_tcm_banks()
 697 * @rproc: single R5 core's corresponding rproc instance
 698 *
 699 * allocate and add remoteproc carveouts for TCM memory based on cluster mode
 700 *
 701 * return 0 on success, otherwise non-zero value on failure
 702 */
 703static int add_tcm_banks(struct rproc *rproc)
 704{
 705	struct zynqmp_r5_cluster *cluster;
 706	struct zynqmp_r5_core *r5_core;
 707	struct device *dev;
 708
 709	r5_core = rproc->priv;
 710	if (!r5_core)
 711		return -EINVAL;
 712
 713	dev = r5_core->dev;
 714
 715	cluster = dev_get_drvdata(dev->parent);
 716	if (!cluster) {
 717		dev_err(dev->parent, "Invalid driver data\n");
 718		return -EINVAL;
 719	}
 720
 721	/*
 722	 * In lockstep mode TCM banks are one contiguous memory region of 256Kb
 723	 * In split mode, each TCM bank is 64Kb and not contiguous.
 724	 * We add memory carveouts accordingly.
 725	 */
 726	if (cluster->mode == SPLIT_MODE)
 727		return add_tcm_carveout_split_mode(rproc);
 728	else if (cluster->mode == LOCKSTEP_MODE)
 729		return add_tcm_carveout_lockstep_mode(rproc);
 730
 731	return -EINVAL;
 732}
 733
 734/*
 735 * zynqmp_r5_parse_fw()
 736 * @rproc: single R5 core's corresponding rproc instance
 737 * @fw: ptr to firmware to be loaded onto r5 core
 738 *
 739 * get resource table if available
 740 *
 741 * return 0 on success, otherwise non-zero value on failure
 742 */
 743static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
 744{
 745	int ret;
 746
 747	ret = rproc_elf_load_rsc_table(rproc, fw);
 748	if (ret == -EINVAL) {
 749		/*
 750		 * resource table only required for IPC.
 751		 * if not present, this is not necessarily an error;
 752		 * for example, loading r5 hello world application
 753		 * so simply inform user and keep going.
 754		 */
 755		dev_info(&rproc->dev, "no resource table found.\n");
 756		ret = 0;
 757	}
 758	return ret;
 759}
 760
 761/**
 762 * zynqmp_r5_rproc_prepare()
 763 * adds carveouts for TCM bank and reserved memory regions
 764 *
 765 * @rproc: Device node of each rproc
 766 *
 767 * Return: 0 for success else < 0 error code
 768 */
 769static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
 770{
 771	int ret;
 772
 773	ret = add_tcm_banks(rproc);
 774	if (ret) {
 775		dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
 776		return ret;
 777	}
 778
 779	ret = add_mem_regions_carveout(rproc);
 780	if (ret) {
 781		dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret);
 782		return ret;
 783	}
 784
 785	return 0;
 786}
 787
 788/**
 789 * zynqmp_r5_rproc_unprepare()
 790 * Turns off TCM banks using power-domain id
 791 *
 792 * @rproc: Device node of each rproc
 793 *
 794 * Return: always 0
 795 */
 796static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
 797{
 798	struct zynqmp_r5_core *r5_core;
 799	u32 pm_domain_id;
 800	int i;
 801
 802	r5_core = rproc->priv;
 803
 804	for (i = 0; i < r5_core->tcm_bank_count; i++) {
 805		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 806		if (zynqmp_pm_release_node(pm_domain_id))
 807			dev_warn(r5_core->dev,
 808				 "can't turn off TCM bank 0x%x", pm_domain_id);
 809	}
 810
 811	return 0;
 812}
 813
 814static const struct rproc_ops zynqmp_r5_rproc_ops = {
 815	.prepare	= zynqmp_r5_rproc_prepare,
 816	.unprepare	= zynqmp_r5_rproc_unprepare,
 817	.start		= zynqmp_r5_rproc_start,
 818	.stop		= zynqmp_r5_rproc_stop,
 819	.load		= rproc_elf_load_segments,
 820	.parse_fw	= zynqmp_r5_parse_fw,
 821	.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
 822	.sanity_check	= rproc_elf_sanity_check,
 823	.get_boot_addr	= rproc_elf_get_boot_addr,
 824	.kick		= zynqmp_r5_rproc_kick,
 825};
 826
 827/**
 828 * zynqmp_r5_add_rproc_core()
 829 * Allocate and add struct rproc object for each r5f core
 830 * This is called for each individual r5f core
 831 *
 832 * @cdev: Device node of each r5 core
 833 *
 834 * Return: zynqmp_r5_core object for success else error code pointer
 835 */
 836static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
 837{
 838	struct zynqmp_r5_core *r5_core;
 839	struct rproc *r5_rproc;
 840	int ret;
 841
 842	/* Set up DMA mask */
 843	ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
 844	if (ret)
 845		return ERR_PTR(ret);
 846
 847	/* Allocate remoteproc instance */
 848	r5_rproc = rproc_alloc(cdev, dev_name(cdev),
 849			       &zynqmp_r5_rproc_ops,
 850			       NULL, sizeof(struct zynqmp_r5_core));
 851	if (!r5_rproc) {
 852		dev_err(cdev, "failed to allocate memory for rproc instance\n");
 853		return ERR_PTR(-ENOMEM);
 854	}
 855
 856	r5_rproc->auto_boot = false;
 857	r5_core = r5_rproc->priv;
 858	r5_core->dev = cdev;
 859	r5_core->np = dev_of_node(cdev);
 860	if (!r5_core->np) {
 861		dev_err(cdev, "can't get device node for r5 core\n");
 862		ret = -EINVAL;
 863		goto free_rproc;
 864	}
 865
 866	/* Add R5 remoteproc core */
 867	ret = rproc_add(r5_rproc);
 868	if (ret) {
 869		dev_err(cdev, "failed to add r5 remoteproc\n");
 870		goto free_rproc;
 871	}
 872
 873	r5_core->rproc = r5_rproc;
 874	return r5_core;
 875
 876free_rproc:
 877	rproc_free(r5_rproc);
 878	return ERR_PTR(ret);
 879}
 880
 881/**
 882 * zynqmp_r5_get_tcm_node()
 883 * Ideally this function should parse tcm node and store information
 884 * in r5_core instance. For now, Hardcoded TCM information is used.
 885 * This approach is used as TCM bindings for system-dt is being developed
 886 *
 887 * @cluster: pointer to zynqmp_r5_cluster type object
 888 *
 889 * Return: 0 for success and < 0 error code for failure.
 890 */
 891static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
 892{
 893	const struct mem_bank_data *zynqmp_tcm_banks;
 894	struct device *dev = cluster->dev;
 895	struct zynqmp_r5_core *r5_core;
 896	int tcm_bank_count, tcm_node;
 897	int i, j;
 898
 899	if (cluster->mode == SPLIT_MODE) {
 900		zynqmp_tcm_banks = zynqmp_tcm_banks_split;
 901		tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_split);
 902	} else {
 903		zynqmp_tcm_banks = zynqmp_tcm_banks_lockstep;
 904		tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_lockstep);
 905	}
 906
 907	/* count per core tcm banks */
 908	tcm_bank_count = tcm_bank_count / cluster->core_count;
 909
 910	/*
 911	 * r5 core 0 will use all of TCM banks in lockstep mode.
 912	 * In split mode, r5 core0 will use 128k and r5 core1 will use another
 913	 * 128k. Assign TCM banks to each core accordingly
 914	 */
 915	tcm_node = 0;
 916	for (i = 0; i < cluster->core_count; i++) {
 917		r5_core = cluster->r5_cores[i];
 918		r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
 919						  sizeof(struct mem_bank_data *),
 920						  GFP_KERNEL);
 921		if (!r5_core->tcm_banks)
 922			return -ENOMEM;
 923
 924		for (j = 0; j < tcm_bank_count; j++) {
 925			/*
 926			 * Use pre-defined TCM reg values.
 927			 * Eventually this should be replaced by values
 928			 * parsed from dts.
 929			 */
 930			r5_core->tcm_banks[j] =
 931				(struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node];
 932			tcm_node++;
 933		}
 934
 935		r5_core->tcm_bank_count = tcm_bank_count;
 936	}
 937
 938	return 0;
 939}
 940
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 941/*
 942 * zynqmp_r5_core_init()
 943 * Create and initialize zynqmp_r5_core type object
 944 *
 945 * @cluster: pointer to zynqmp_r5_cluster type object
 946 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
 947 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
 948 *
 949 * Return: 0 for success and error code for failure.
 950 */
 951static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
 952			       enum rpu_oper_mode fw_reg_val,
 953			       enum rpu_tcm_comb tcm_mode)
 954{
 955	struct device *dev = cluster->dev;
 956	struct zynqmp_r5_core *r5_core;
 957	int ret, i;
 958
 959	ret = zynqmp_r5_get_tcm_node(cluster);
 960	if (ret < 0) {
 961		dev_err(dev, "can't get tcm node, err %d\n", ret);
 962		return ret;
 963	}
 964
 965	for (i = 0; i < cluster->core_count; i++) {
 966		r5_core = cluster->r5_cores[i];
 967
 
 
 
 
 968		/* Initialize r5 cores with power-domains parsed from dts */
 969		ret = of_property_read_u32_index(r5_core->np, "power-domains",
 970						 1, &r5_core->pm_domain_id);
 971		if (ret) {
 972			dev_err(dev, "failed to get power-domains property\n");
 973			return ret;
 974		}
 975
 976		ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode);
 977		if (ret) {
 978			dev_err(dev, "failed to set r5 cluster mode %d, err %d\n",
 979				cluster->mode, ret);
 980			return ret;
 981		}
 982	}
 983
 984	return 0;
 985}
 986
 987/*
 988 * zynqmp_r5_cluster_init()
 989 * Create and initialize zynqmp_r5_cluster type object
 990 *
 991 * @cluster: pointer to zynqmp_r5_cluster type object
 992 *
 993 * Return: 0 for success and error code for failure.
 994 */
 995static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
 996{
 997	enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE;
 998	struct device *dev = cluster->dev;
 999	struct device_node *dev_node = dev_of_node(dev);
1000	struct platform_device *child_pdev;
1001	struct zynqmp_r5_core **r5_cores;
1002	enum rpu_oper_mode fw_reg_val;
1003	struct device **child_devs;
1004	struct device_node *child;
1005	enum rpu_tcm_comb tcm_mode;
1006	int core_count, ret, i;
1007	struct mbox_info *ipi;
1008
1009	ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode);
1010
1011	/*
1012	 * on success returns 0, if not defined then returns -EINVAL,
1013	 * In that case, default is LOCKSTEP mode. Other than that
1014	 * returns relative error code < 0.
1015	 */
1016	if (ret != -EINVAL && ret != 0) {
1017		dev_err(dev, "Invalid xlnx,cluster-mode property\n");
1018		return ret;
1019	}
1020
1021	/*
1022	 * For now driver only supports split mode and lockstep mode.
1023	 * fail driver probe if either of that is not set in dts.
1024	 */
1025	if (cluster_mode == LOCKSTEP_MODE) {
1026		tcm_mode = PM_RPU_TCM_COMB;
1027		fw_reg_val = PM_RPU_MODE_LOCKSTEP;
1028	} else if (cluster_mode == SPLIT_MODE) {
1029		tcm_mode = PM_RPU_TCM_SPLIT;
1030		fw_reg_val = PM_RPU_MODE_SPLIT;
1031	} else {
1032		dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode);
1033		return -EINVAL;
1034	}
1035
1036	/*
1037	 * Number of cores is decided by number of child nodes of
1038	 * r5f subsystem node in dts. If Split mode is used in dts
1039	 * 2 child nodes are expected.
1040	 * In lockstep mode if two child nodes are available,
1041	 * only use first child node and consider it as core0
1042	 * and ignore core1 dt node.
1043	 */
1044	core_count = of_get_available_child_count(dev_node);
1045	if (core_count == 0) {
1046		dev_err(dev, "Invalid number of r5 cores %d", core_count);
1047		return -EINVAL;
1048	} else if (cluster_mode == SPLIT_MODE && core_count != 2) {
1049		dev_err(dev, "Invalid number of r5 cores for split mode\n");
1050		return -EINVAL;
1051	} else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) {
1052		dev_warn(dev, "Only r5 core0 will be used\n");
1053		core_count = 1;
1054	}
1055
1056	child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL);
1057	if (!child_devs)
1058		return -ENOMEM;
1059
1060	r5_cores = kcalloc(core_count,
1061			   sizeof(struct zynqmp_r5_core *), GFP_KERNEL);
1062	if (!r5_cores) {
1063		kfree(child_devs);
1064		return -ENOMEM;
1065	}
1066
1067	i = 0;
1068	for_each_available_child_of_node(dev_node, child) {
1069		child_pdev = of_find_device_by_node(child);
1070		if (!child_pdev) {
1071			of_node_put(child);
1072			ret = -ENODEV;
1073			goto release_r5_cores;
1074		}
1075
1076		child_devs[i] = &child_pdev->dev;
1077
1078		/* create and add remoteproc instance of type struct rproc */
1079		r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev);
1080		if (IS_ERR(r5_cores[i])) {
1081			of_node_put(child);
1082			ret = PTR_ERR(r5_cores[i]);
1083			r5_cores[i] = NULL;
1084			goto release_r5_cores;
1085		}
1086
1087		/*
1088		 * If mailbox nodes are disabled using "status" property then
1089		 * setting up mailbox channels will fail.
1090		 */
1091		ipi = zynqmp_r5_setup_mbox(&child_pdev->dev);
1092		if (ipi) {
1093			r5_cores[i]->ipi = ipi;
1094			ipi->r5_core = r5_cores[i];
1095		}
1096
1097		/*
1098		 * If two child nodes are available in dts in lockstep mode,
1099		 * then ignore second child node.
1100		 */
1101		if (cluster_mode == LOCKSTEP_MODE) {
1102			of_node_put(child);
1103			break;
1104		}
1105
1106		i++;
1107	}
1108
1109	cluster->mode = cluster_mode;
1110	cluster->core_count = core_count;
1111	cluster->r5_cores = r5_cores;
1112
1113	ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode);
1114	if (ret < 0) {
1115		dev_err(dev, "failed to init r5 core err %d\n", ret);
1116		cluster->core_count = 0;
1117		cluster->r5_cores = NULL;
1118
1119		/*
1120		 * at this point rproc resources for each core are allocated.
1121		 * adjust index to free resources in reverse order
1122		 */
1123		i = core_count - 1;
1124		goto release_r5_cores;
1125	}
1126
1127	kfree(child_devs);
1128	return 0;
1129
1130release_r5_cores:
1131	while (i >= 0) {
1132		put_device(child_devs[i]);
1133		if (r5_cores[i]) {
1134			zynqmp_r5_free_mbox(r5_cores[i]->ipi);
1135			of_reserved_mem_device_release(r5_cores[i]->dev);
1136			rproc_del(r5_cores[i]->rproc);
1137			rproc_free(r5_cores[i]->rproc);
1138		}
1139		i--;
1140	}
1141	kfree(r5_cores);
1142	kfree(child_devs);
1143	return ret;
1144}
1145
1146static void zynqmp_r5_cluster_exit(void *data)
1147{
1148	struct platform_device *pdev = data;
1149	struct zynqmp_r5_cluster *cluster;
1150	struct zynqmp_r5_core *r5_core;
1151	int i;
1152
1153	cluster = platform_get_drvdata(pdev);
1154	if (!cluster)
1155		return;
1156
1157	for (i = 0; i < cluster->core_count; i++) {
1158		r5_core = cluster->r5_cores[i];
1159		zynqmp_r5_free_mbox(r5_core->ipi);
1160		of_reserved_mem_device_release(r5_core->dev);
1161		put_device(r5_core->dev);
1162		rproc_del(r5_core->rproc);
1163		rproc_free(r5_core->rproc);
1164	}
1165
1166	kfree(cluster->r5_cores);
1167	kfree(cluster);
1168	platform_set_drvdata(pdev, NULL);
1169}
1170
1171/*
1172 * zynqmp_r5_remoteproc_probe()
1173 * parse device-tree, initialize hardware and allocate required resources
1174 * and remoteproc ops
1175 *
1176 * @pdev: domain platform device for R5 cluster
1177 *
1178 * Return: 0 for success and < 0 for failure.
1179 */
1180static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
1181{
1182	struct zynqmp_r5_cluster *cluster;
1183	struct device *dev = &pdev->dev;
1184	int ret;
1185
1186	cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
1187	if (!cluster)
1188		return -ENOMEM;
1189
1190	cluster->dev = dev;
1191
1192	ret = devm_of_platform_populate(dev);
1193	if (ret) {
1194		dev_err_probe(dev, ret, "failed to populate platform dev\n");
1195		kfree(cluster);
1196		return ret;
1197	}
1198
1199	/* wire in so each core can be cleaned up at driver remove */
1200	platform_set_drvdata(pdev, cluster);
1201
1202	ret = zynqmp_r5_cluster_init(cluster);
1203	if (ret) {
1204		kfree(cluster);
1205		platform_set_drvdata(pdev, NULL);
1206		dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n");
1207		return ret;
1208	}
1209
1210	ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev);
1211	if (ret)
1212		return ret;
1213
1214	return 0;
1215}
1216
1217/* Match table for OF platform binding */
1218static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
1219	{ .compatible = "xlnx,zynqmp-r5fss", },
1220	{ /* end of list */ },
1221};
1222MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
1223
1224static struct platform_driver zynqmp_r5_remoteproc_driver = {
1225	.probe = zynqmp_r5_remoteproc_probe,
1226	.driver = {
1227		.name = "zynqmp_r5_remoteproc",
1228		.of_match_table = zynqmp_r5_remoteproc_match,
1229	},
1230};
1231module_platform_driver(zynqmp_r5_remoteproc_driver);
1232
1233MODULE_DESCRIPTION("Xilinx R5F remote processor driver");
1234MODULE_AUTHOR("Xilinx Inc.");
1235MODULE_LICENSE("GPL");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ZynqMP R5 Remote Processor driver
   4 *
   5 */
   6
   7#include <dt-bindings/power/xlnx-zynqmp-power.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/firmware/xlnx-zynqmp.h>
  10#include <linux/kernel.h>
 
 
  11#include <linux/module.h>
  12#include <linux/of_address.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/platform_device.h>
  16#include <linux/remoteproc.h>
  17#include <linux/slab.h>
  18
  19#include "remoteproc_internal.h"
  20
 
 
 
 
 
 
  21/*
  22 * settings for RPU cluster mode which
  23 * reflects possible values of xlnx,cluster-mode dt-property
  24 */
  25enum zynqmp_r5_cluster_mode {
  26	SPLIT_MODE = 0, /* When cores run as separate processor */
  27	LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */
  28	SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */
  29};
  30
  31/**
  32 * struct mem_bank_data - Memory Bank description
  33 *
  34 * @addr: Start address of memory bank
 
  35 * @size: Size of Memory bank
  36 * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
  37 * @bank_name: name of the bank for remoteproc framework
  38 */
  39struct mem_bank_data {
  40	phys_addr_t addr;
 
  41	size_t size;
  42	u32 pm_domain_id;
  43	char *bank_name;
  44};
  45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46/*
  47 * Hardcoded TCM bank values. This will be removed once TCM bindings are
  48 * accepted for system-dt specifications and upstreamed in linux kernel
  49 */
  50static const struct mem_bank_data zynqmp_tcm_banks[] = {
  51	{0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
  52	{0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
  53	{0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
  54	{0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
 
 
 
 
 
 
 
 
  55};
  56
  57/**
  58 * struct zynqmp_r5_core
  59 *
  60 * @dev: device of RPU instance
  61 * @np: device node of RPU instance
  62 * @tcm_bank_count: number TCM banks accessible to this RPU
  63 * @tcm_banks: array of each TCM bank data
  64 * @rmem_count: Number of reserved mem regions
  65 * @rmem: reserved memory region nodes from device tree
  66 * @rproc: rproc handle
  67 * @pm_domain_id: RPU CPU power domain id
 
  68 */
  69struct zynqmp_r5_core {
  70	struct device *dev;
  71	struct device_node *np;
  72	int tcm_bank_count;
  73	struct mem_bank_data **tcm_banks;
  74	int rmem_count;
  75	struct reserved_mem **rmem;
  76	struct rproc *rproc;
  77	u32 pm_domain_id;
 
  78};
  79
  80/**
  81 * struct zynqmp_r5_cluster
  82 *
  83 * @dev: r5f subsystem cluster device node
  84 * @mode: cluster mode of type zynqmp_r5_cluster_mode
  85 * @core_count: number of r5 cores used for this cluster mode
  86 * @r5_cores: Array of pointers pointing to r5 core
  87 */
  88struct zynqmp_r5_cluster {
  89	struct device *dev;
  90	enum  zynqmp_r5_cluster_mode mode;
  91	int core_count;
  92	struct zynqmp_r5_core **r5_cores;
  93};
  94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95/*
  96 * zynqmp_r5_set_mode()
  97 *
  98 * set RPU cluster and TCM operation mode
  99 *
 100 * @r5_core: pointer to zynqmp_r5_core type object
 101 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
 102 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
 103 *
 104 * Return: 0 for success and < 0 for failure
 105 */
 106static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core,
 107			      enum rpu_oper_mode fw_reg_val,
 108			      enum rpu_tcm_comb tcm_mode)
 109{
 110	int ret;
 111
 112	ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
 113	if (ret < 0) {
 114		dev_err(r5_core->dev, "failed to set RPU mode\n");
 115		return ret;
 116	}
 117
 118	ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode);
 119	if (ret < 0)
 120		dev_err(r5_core->dev, "failed to configure TCM\n");
 121
 122	return ret;
 123}
 124
 125/*
 126 * zynqmp_r5_rproc_start()
 127 * @rproc: single R5 core's corresponding rproc instance
 128 *
 129 * Start R5 Core from designated boot address.
 130 *
 131 * return 0 on success, otherwise non-zero value on failure
 132 */
 133static int zynqmp_r5_rproc_start(struct rproc *rproc)
 134{
 135	struct zynqmp_r5_core *r5_core = rproc->priv;
 136	enum rpu_boot_mem bootmem;
 137	int ret;
 138
 139	/*
 140	 * The exception vector pointers (EVP) refer to the base-address of
 141	 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector
 142	 * starts at the base-address and subsequent vectors are on 4-byte
 143	 * boundaries.
 144	 *
 145	 * Exception vectors can start either from 0x0000_0000 (LOVEC) or
 146	 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory)
 147	 *
 148	 * Usually firmware will put Exception vectors at LOVEC.
 149	 *
 150	 * It is not recommend that you change the exception vector.
 151	 * Changing the EVP to HIVEC will result in increased interrupt latency
 152	 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor
 153	 * is non-secured, then the Cortex-R5F processor cannot access the
 154	 * HIVEC exception vectors in the OCM.
 155	 */
 156	bootmem = (rproc->bootaddr >= 0xFFFC0000) ?
 157		   PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC;
 158
 159	dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
 160		bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
 161
 162	ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
 163				     bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
 164	if (ret)
 165		dev_err(r5_core->dev,
 166			"failed to start RPU = 0x%x\n", r5_core->pm_domain_id);
 167	return ret;
 168}
 169
 170/*
 171 * zynqmp_r5_rproc_stop()
 172 * @rproc: single R5 core's corresponding rproc instance
 173 *
 174 * Power down  R5 Core.
 175 *
 176 * return 0 on success, otherwise non-zero value on failure
 177 */
 178static int zynqmp_r5_rproc_stop(struct rproc *rproc)
 179{
 180	struct zynqmp_r5_core *r5_core = rproc->priv;
 181	int ret;
 182
 183	ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
 184				     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
 185	if (ret)
 186		dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
 187
 188	return ret;
 189}
 190
 191/*
 192 * zynqmp_r5_mem_region_map()
 193 * @rproc: single R5 core's corresponding rproc instance
 194 * @mem: mem descriptor to map reserved memory-regions
 195 *
 196 * Callback to map va for memory-region's carveout.
 197 *
 198 * return 0 on success, otherwise non-zero value on failure
 199 */
 200static int zynqmp_r5_mem_region_map(struct rproc *rproc,
 201				    struct rproc_mem_entry *mem)
 202{
 203	void __iomem *va;
 204
 205	va = ioremap_wc(mem->dma, mem->len);
 206	if (IS_ERR_OR_NULL(va))
 207		return -ENOMEM;
 208
 209	mem->va = (void *)va;
 210
 211	return 0;
 212}
 213
 214/*
 215 * zynqmp_r5_rproc_mem_unmap
 216 * @rproc: single R5 core's corresponding rproc instance
 217 * @mem: mem entry to unmap
 218 *
 219 * Unmap memory-region carveout
 220 *
 221 * return: always returns 0
 222 */
 223static int zynqmp_r5_mem_region_unmap(struct rproc *rproc,
 224				      struct rproc_mem_entry *mem)
 225{
 226	iounmap((void __iomem *)mem->va);
 227	return 0;
 228}
 229
 230/*
 231 * add_mem_regions_carveout()
 232 * @rproc: single R5 core's corresponding rproc instance
 233 *
 234 * Construct rproc mem carveouts from memory-region property nodes
 235 *
 236 * return 0 on success, otherwise non-zero value on failure
 237 */
 238static int add_mem_regions_carveout(struct rproc *rproc)
 239{
 240	struct rproc_mem_entry *rproc_mem;
 241	struct zynqmp_r5_core *r5_core;
 
 242	struct reserved_mem *rmem;
 243	int i, num_mem_regions;
 244
 245	r5_core = (struct zynqmp_r5_core *)rproc->priv;
 246	num_mem_regions = r5_core->rmem_count;
 247
 248	for (i = 0; i < num_mem_regions; i++) {
 249		rmem = r5_core->rmem[i];
 250
 251		if (!strncmp(rmem->name, "vdev0buffer", strlen("vdev0buffer"))) {
 
 
 
 
 
 
 
 
 252			/* Init reserved memory for vdev buffer */
 253			rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
 254								 rmem->size,
 255								 rmem->base,
 256								 rmem->name);
 257		} else {
 258			/* Register associated reserved memory regions */
 259			rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
 260							 (dma_addr_t)rmem->base,
 261							 rmem->size, rmem->base,
 262							 zynqmp_r5_mem_region_map,
 263							 zynqmp_r5_mem_region_unmap,
 264							 rmem->name);
 265		}
 266
 267		if (!rproc_mem)
 
 268			return -ENOMEM;
 
 269
 270		rproc_add_carveout(rproc, rproc_mem);
 271
 272		dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
 273			rmem->name, rmem->base, rmem->size);
 
 274	}
 275
 276	return 0;
 277}
 278
 279/*
 280 * tcm_mem_unmap()
 281 * @rproc: single R5 core's corresponding rproc instance
 282 * @mem: tcm mem entry to unmap
 283 *
 284 * Unmap TCM banks when powering down R5 core.
 285 *
 286 * return always 0
 287 */
 288static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem)
 289{
 290	iounmap((void __iomem *)mem->va);
 291
 292	return 0;
 293}
 294
 295/*
 296 * tcm_mem_map()
 297 * @rproc: single R5 core's corresponding rproc instance
 298 * @mem: tcm memory entry descriptor
 299 *
 300 * Given TCM bank entry, this func setup virtual address for TCM bank
 301 * remoteproc carveout. It also takes care of va to da address translation
 302 *
 303 * return 0 on success, otherwise non-zero value on failure
 304 */
 305static int tcm_mem_map(struct rproc *rproc,
 306		       struct rproc_mem_entry *mem)
 307{
 308	void __iomem *va;
 309
 310	va = ioremap_wc(mem->dma, mem->len);
 311	if (IS_ERR_OR_NULL(va))
 312		return -ENOMEM;
 313
 314	/* Update memory entry va */
 315	mem->va = (void *)va;
 316
 317	/* clear TCMs */
 318	memset_io(va, 0, mem->len);
 319
 320	/*
 321	 * The R5s expect their TCM banks to be at address 0x0 and 0x2000,
 322	 * while on the Linux side they are at 0xffexxxxx.
 323	 *
 324	 * Zero out the high 12 bits of the address. This will give
 325	 * expected values for TCM Banks 0A and 0B (0x0 and 0x20000).
 326	 */
 327	mem->da &= 0x000fffff;
 328
 329	/*
 330	 * TCM Banks 1A and 1B still have to be translated.
 331	 *
 332	 * Below handle these two banks' absolute addresses (0xffe90000 and
 333	 * 0xffeb0000) and convert to the expected relative addresses
 334	 * (0x0 and 0x20000).
 335	 */
 336	if (mem->da == 0x90000 || mem->da == 0xB0000)
 337		mem->da -= 0x90000;
 338
 339	/* if translated TCM bank address is not valid report error */
 340	if (mem->da != 0x0 && mem->da != 0x20000) {
 341		dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da);
 342		return -EINVAL;
 343	}
 344	return 0;
 345}
 346
 347/*
 348 * add_tcm_carveout_split_mode()
 349 * @rproc: single R5 core's corresponding rproc instance
 350 *
 351 * allocate and add remoteproc carveout for TCM memory in split mode
 352 *
 353 * return 0 on success, otherwise non-zero value on failure
 354 */
 355static int add_tcm_carveout_split_mode(struct rproc *rproc)
 356{
 357	struct rproc_mem_entry *rproc_mem;
 358	struct zynqmp_r5_core *r5_core;
 359	int i, num_banks, ret;
 360	phys_addr_t bank_addr;
 361	struct device *dev;
 362	u32 pm_domain_id;
 363	size_t bank_size;
 364	char *bank_name;
 
 365
 366	r5_core = (struct zynqmp_r5_core *)rproc->priv;
 367	dev = r5_core->dev;
 368	num_banks = r5_core->tcm_bank_count;
 369
 370	/*
 371	 * Power-on Each 64KB TCM,
 372	 * register its address space, map and unmap functions
 373	 * and add carveouts accordingly
 374	 */
 375	for (i = 0; i < num_banks; i++) {
 376		bank_addr = r5_core->tcm_banks[i]->addr;
 
 377		bank_name = r5_core->tcm_banks[i]->bank_name;
 378		bank_size = r5_core->tcm_banks[i]->size;
 379		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 380
 381		ret = zynqmp_pm_request_node(pm_domain_id,
 382					     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
 383					     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
 384		if (ret < 0) {
 385			dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
 386			goto release_tcm_split;
 387		}
 388
 389		dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx",
 390			bank_name, bank_addr, bank_size);
 391
 392		rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
 393						 bank_size, bank_addr,
 394						 tcm_mem_map, tcm_mem_unmap,
 395						 bank_name);
 396		if (!rproc_mem) {
 397			ret = -ENOMEM;
 398			zynqmp_pm_release_node(pm_domain_id);
 399			goto release_tcm_split;
 400		}
 401
 402		rproc_add_carveout(rproc, rproc_mem);
 403	}
 404
 405	return 0;
 406
 407release_tcm_split:
 408	/* If failed, Turn off all TCM banks turned on before */
 409	for (i--; i >= 0; i--) {
 410		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 411		zynqmp_pm_release_node(pm_domain_id);
 412	}
 413	return ret;
 414}
 415
 416/*
 417 * add_tcm_carveout_lockstep_mode()
 418 * @rproc: single R5 core's corresponding rproc instance
 419 *
 420 * allocate and add remoteproc carveout for TCM memory in lockstep mode
 421 *
 422 * return 0 on success, otherwise non-zero value on failure
 423 */
 424static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
 425{
 426	struct rproc_mem_entry *rproc_mem;
 427	struct zynqmp_r5_core *r5_core;
 428	int i, num_banks, ret;
 429	phys_addr_t bank_addr;
 430	size_t bank_size = 0;
 431	struct device *dev;
 432	u32 pm_domain_id;
 433	char *bank_name;
 
 434
 435	r5_core = (struct zynqmp_r5_core *)rproc->priv;
 436	dev = r5_core->dev;
 437
 438	/* Go through zynqmp banks for r5 node */
 439	num_banks = r5_core->tcm_bank_count;
 440
 441	/*
 442	 * In lockstep mode, TCM is contiguous memory block
 443	 * However, each TCM block still needs to be enabled individually.
 444	 * So, Enable each TCM block individually, but add their size
 445	 * to create contiguous memory region.
 
 446	 */
 447	bank_addr = r5_core->tcm_banks[0]->addr;
 448	bank_name = r5_core->tcm_banks[0]->bank_name;
 449
 450	for (i = 0; i < num_banks; i++) {
 451		bank_size += r5_core->tcm_banks[i]->size;
 452		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 453
 454		/* Turn on each TCM bank individually */
 455		ret = zynqmp_pm_request_node(pm_domain_id,
 456					     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
 457					     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
 458		if (ret < 0) {
 459			dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
 460			goto release_tcm_lockstep;
 461		}
 462	}
 463
 464	dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx",
 465		bank_name, bank_addr, bank_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 466
 467	/* Register TCM address range, TCM map and unmap functions */
 468	rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
 469					 bank_size, bank_addr,
 470					 tcm_mem_map, tcm_mem_unmap,
 471					 bank_name);
 472	if (!rproc_mem) {
 473		ret = -ENOMEM;
 474		goto release_tcm_lockstep;
 475	}
 476
 477	/* If registration is success, add carveouts */
 478	rproc_add_carveout(rproc, rproc_mem);
 479
 480	return 0;
 481
 482release_tcm_lockstep:
 483	/* If failed, Turn off all TCM banks turned on before */
 484	for (i--; i >= 0; i--) {
 485		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 486		zynqmp_pm_release_node(pm_domain_id);
 487	}
 488	return ret;
 489}
 490
 491/*
 492 * add_tcm_banks()
 493 * @rproc: single R5 core's corresponding rproc instance
 494 *
 495 * allocate and add remoteproc carveouts for TCM memory based on cluster mode
 496 *
 497 * return 0 on success, otherwise non-zero value on failure
 498 */
 499static int add_tcm_banks(struct rproc *rproc)
 500{
 501	struct zynqmp_r5_cluster *cluster;
 502	struct zynqmp_r5_core *r5_core;
 503	struct device *dev;
 504
 505	r5_core = (struct zynqmp_r5_core *)rproc->priv;
 506	if (!r5_core)
 507		return -EINVAL;
 508
 509	dev = r5_core->dev;
 510
 511	cluster = dev_get_drvdata(dev->parent);
 512	if (!cluster) {
 513		dev_err(dev->parent, "Invalid driver data\n");
 514		return -EINVAL;
 515	}
 516
 517	/*
 518	 * In lockstep mode TCM banks are one contiguous memory region of 256Kb
 519	 * In split mode, each TCM bank is 64Kb and not contiguous.
 520	 * We add memory carveouts accordingly.
 521	 */
 522	if (cluster->mode == SPLIT_MODE)
 523		return add_tcm_carveout_split_mode(rproc);
 524	else if (cluster->mode == LOCKSTEP_MODE)
 525		return add_tcm_carveout_lockstep_mode(rproc);
 526
 527	return -EINVAL;
 528}
 529
 530/*
 531 * zynqmp_r5_parse_fw()
 532 * @rproc: single R5 core's corresponding rproc instance
 533 * @fw: ptr to firmware to be loaded onto r5 core
 534 *
 535 * get resource table if available
 536 *
 537 * return 0 on success, otherwise non-zero value on failure
 538 */
 539static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
 540{
 541	int ret;
 542
 543	ret = rproc_elf_load_rsc_table(rproc, fw);
 544	if (ret == -EINVAL) {
 545		/*
 546		 * resource table only required for IPC.
 547		 * if not present, this is not necessarily an error;
 548		 * for example, loading r5 hello world application
 549		 * so simply inform user and keep going.
 550		 */
 551		dev_info(&rproc->dev, "no resource table found.\n");
 552		ret = 0;
 553	}
 554	return ret;
 555}
 556
 557/**
 558 * zynqmp_r5_rproc_prepare()
 559 * adds carveouts for TCM bank and reserved memory regions
 560 *
 561 * @rproc: Device node of each rproc
 562 *
 563 * Return: 0 for success else < 0 error code
 564 */
 565static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
 566{
 567	int ret;
 568
 569	ret = add_tcm_banks(rproc);
 570	if (ret) {
 571		dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
 572		return ret;
 573	}
 574
 575	ret = add_mem_regions_carveout(rproc);
 576	if (ret) {
 577		dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret);
 578		return ret;
 579	}
 580
 581	return 0;
 582}
 583
 584/**
 585 * zynqmp_r5_rproc_unprepare()
 586 * Turns off TCM banks using power-domain id
 587 *
 588 * @rproc: Device node of each rproc
 589 *
 590 * Return: always 0
 591 */
 592static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
 593{
 594	struct zynqmp_r5_core *r5_core;
 595	u32 pm_domain_id;
 596	int i;
 597
 598	r5_core = (struct zynqmp_r5_core *)rproc->priv;
 599
 600	for (i = 0; i < r5_core->tcm_bank_count; i++) {
 601		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 602		if (zynqmp_pm_release_node(pm_domain_id))
 603			dev_warn(r5_core->dev,
 604				 "can't turn off TCM bank 0x%x", pm_domain_id);
 605	}
 606
 607	return 0;
 608}
 609
 610static const struct rproc_ops zynqmp_r5_rproc_ops = {
 611	.prepare	= zynqmp_r5_rproc_prepare,
 612	.unprepare	= zynqmp_r5_rproc_unprepare,
 613	.start		= zynqmp_r5_rproc_start,
 614	.stop		= zynqmp_r5_rproc_stop,
 615	.load		= rproc_elf_load_segments,
 616	.parse_fw	= zynqmp_r5_parse_fw,
 617	.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
 618	.sanity_check	= rproc_elf_sanity_check,
 619	.get_boot_addr	= rproc_elf_get_boot_addr,
 
 620};
 621
 622/**
 623 * zynqmp_r5_add_rproc_core()
 624 * Allocate and add struct rproc object for each r5f core
 625 * This is called for each individual r5f core
 626 *
 627 * @cdev: Device node of each r5 core
 628 *
 629 * Return: zynqmp_r5_core object for success else error code pointer
 630 */
 631static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
 632{
 633	struct zynqmp_r5_core *r5_core;
 634	struct rproc *r5_rproc;
 635	int ret;
 636
 637	/* Set up DMA mask */
 638	ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
 639	if (ret)
 640		return ERR_PTR(ret);
 641
 642	/* Allocate remoteproc instance */
 643	r5_rproc = rproc_alloc(cdev, dev_name(cdev),
 644			       &zynqmp_r5_rproc_ops,
 645			       NULL, sizeof(struct zynqmp_r5_core));
 646	if (!r5_rproc) {
 647		dev_err(cdev, "failed to allocate memory for rproc instance\n");
 648		return ERR_PTR(-ENOMEM);
 649	}
 650
 651	r5_rproc->auto_boot = false;
 652	r5_core = (struct zynqmp_r5_core *)r5_rproc->priv;
 653	r5_core->dev = cdev;
 654	r5_core->np = dev_of_node(cdev);
 655	if (!r5_core->np) {
 656		dev_err(cdev, "can't get device node for r5 core\n");
 657		ret = -EINVAL;
 658		goto free_rproc;
 659	}
 660
 661	/* Add R5 remoteproc core */
 662	ret = rproc_add(r5_rproc);
 663	if (ret) {
 664		dev_err(cdev, "failed to add r5 remoteproc\n");
 665		goto free_rproc;
 666	}
 667
 668	r5_core->rproc = r5_rproc;
 669	return r5_core;
 670
 671free_rproc:
 672	rproc_free(r5_rproc);
 673	return ERR_PTR(ret);
 674}
 675
 676/**
 677 * zynqmp_r5_get_tcm_node()
 678 * Ideally this function should parse tcm node and store information
 679 * in r5_core instance. For now, Hardcoded TCM information is used.
 680 * This approach is used as TCM bindings for system-dt is being developed
 681 *
 682 * @cluster: pointer to zynqmp_r5_cluster type object
 683 *
 684 * Return: 0 for success and < 0 error code for failure.
 685 */
 686static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
 687{
 
 688	struct device *dev = cluster->dev;
 689	struct zynqmp_r5_core *r5_core;
 690	int tcm_bank_count, tcm_node;
 691	int i, j;
 692
 693	tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks);
 
 
 
 
 
 
 694
 695	/* count per core tcm banks */
 696	tcm_bank_count = tcm_bank_count / cluster->core_count;
 697
 698	/*
 699	 * r5 core 0 will use all of TCM banks in lockstep mode.
 700	 * In split mode, r5 core0 will use 128k and r5 core1 will use another
 701	 * 128k. Assign TCM banks to each core accordingly
 702	 */
 703	tcm_node = 0;
 704	for (i = 0; i < cluster->core_count; i++) {
 705		r5_core = cluster->r5_cores[i];
 706		r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
 707						  sizeof(struct mem_bank_data *),
 708						  GFP_KERNEL);
 709		if (!r5_core->tcm_banks)
 710			return -ENOMEM;
 711
 712		for (j = 0; j < tcm_bank_count; j++) {
 713			/*
 714			 * Use pre-defined TCM reg values.
 715			 * Eventually this should be replaced by values
 716			 * parsed from dts.
 717			 */
 718			r5_core->tcm_banks[j] =
 719				(struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node];
 720			tcm_node++;
 721		}
 722
 723		r5_core->tcm_bank_count = tcm_bank_count;
 724	}
 725
 726	return 0;
 727}
 728
 729/**
 730 * zynqmp_r5_get_mem_region_node()
 731 * parse memory-region property and get reserved mem regions
 732 *
 733 * @r5_core: pointer to zynqmp_r5_core type object
 734 *
 735 * Return: 0 for success and error code for failure.
 736 */
 737static int zynqmp_r5_get_mem_region_node(struct zynqmp_r5_core *r5_core)
 738{
 739	struct device_node *np, *rmem_np;
 740	struct reserved_mem **rmem;
 741	int res_mem_count, i;
 742	struct device *dev;
 743
 744	dev = r5_core->dev;
 745	np = r5_core->np;
 746
 747	res_mem_count = of_property_count_elems_of_size(np, "memory-region",
 748							sizeof(phandle));
 749	if (res_mem_count <= 0) {
 750		dev_warn(dev, "failed to get memory-region property %d\n",
 751			 res_mem_count);
 752		return 0;
 753	}
 754
 755	rmem = devm_kcalloc(dev, res_mem_count,
 756			    sizeof(struct reserved_mem *), GFP_KERNEL);
 757	if (!rmem)
 758		return -ENOMEM;
 759
 760	for (i = 0; i < res_mem_count; i++) {
 761		rmem_np = of_parse_phandle(np, "memory-region", i);
 762		if (!rmem_np)
 763			goto release_rmem;
 764
 765		rmem[i] = of_reserved_mem_lookup(rmem_np);
 766		if (!rmem[i]) {
 767			of_node_put(rmem_np);
 768			goto release_rmem;
 769		}
 770
 771		of_node_put(rmem_np);
 772	}
 773
 774	r5_core->rmem_count = res_mem_count;
 775	r5_core->rmem = rmem;
 776	return 0;
 777
 778release_rmem:
 779	return -EINVAL;
 780}
 781
 782/*
 783 * zynqmp_r5_core_init()
 784 * Create and initialize zynqmp_r5_core type object
 785 *
 786 * @cluster: pointer to zynqmp_r5_cluster type object
 787 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
 788 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
 789 *
 790 * Return: 0 for success and error code for failure.
 791 */
 792static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
 793			       enum rpu_oper_mode fw_reg_val,
 794			       enum rpu_tcm_comb tcm_mode)
 795{
 796	struct device *dev = cluster->dev;
 797	struct zynqmp_r5_core *r5_core;
 798	int ret, i;
 799
 800	ret = zynqmp_r5_get_tcm_node(cluster);
 801	if (ret < 0) {
 802		dev_err(dev, "can't get tcm node, err %d\n", ret);
 803		return ret;
 804	}
 805
 806	for (i = 0; i < cluster->core_count; i++) {
 807		r5_core = cluster->r5_cores[i];
 808
 809		ret = zynqmp_r5_get_mem_region_node(r5_core);
 810		if (ret)
 811			dev_warn(dev, "memory-region prop failed %d\n", ret);
 812
 813		/* Initialize r5 cores with power-domains parsed from dts */
 814		ret = of_property_read_u32_index(r5_core->np, "power-domains",
 815						 1, &r5_core->pm_domain_id);
 816		if (ret) {
 817			dev_err(dev, "failed to get power-domains property\n");
 818			return ret;
 819		}
 820
 821		ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode);
 822		if (ret) {
 823			dev_err(dev, "failed to set r5 cluster mode %d, err %d\n",
 824				cluster->mode, ret);
 825			return ret;
 826		}
 827	}
 828
 829	return 0;
 830}
 831
 832/*
 833 * zynqmp_r5_cluster_init()
 834 * Create and initialize zynqmp_r5_cluster type object
 835 *
 836 * @cluster: pointer to zynqmp_r5_cluster type object
 837 *
 838 * Return: 0 for success and error code for failure.
 839 */
 840static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
 841{
 842	enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE;
 843	struct device *dev = cluster->dev;
 844	struct device_node *dev_node = dev_of_node(dev);
 845	struct platform_device *child_pdev;
 846	struct zynqmp_r5_core **r5_cores;
 847	enum rpu_oper_mode fw_reg_val;
 848	struct device **child_devs;
 849	struct device_node *child;
 850	enum rpu_tcm_comb tcm_mode;
 851	int core_count, ret, i;
 
 852
 853	ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode);
 854
 855	/*
 856	 * on success returns 0, if not defined then returns -EINVAL,
 857	 * In that case, default is LOCKSTEP mode. Other than that
 858	 * returns relative error code < 0.
 859	 */
 860	if (ret != -EINVAL && ret != 0) {
 861		dev_err(dev, "Invalid xlnx,cluster-mode property\n");
 862		return ret;
 863	}
 864
 865	/*
 866	 * For now driver only supports split mode and lockstep mode.
 867	 * fail driver probe if either of that is not set in dts.
 868	 */
 869	if (cluster_mode == LOCKSTEP_MODE) {
 870		tcm_mode = PM_RPU_TCM_COMB;
 871		fw_reg_val = PM_RPU_MODE_LOCKSTEP;
 872	} else if (cluster_mode == SPLIT_MODE) {
 873		tcm_mode = PM_RPU_TCM_SPLIT;
 874		fw_reg_val = PM_RPU_MODE_SPLIT;
 875	} else {
 876		dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode);
 877		return -EINVAL;
 878	}
 879
 880	/*
 881	 * Number of cores is decided by number of child nodes of
 882	 * r5f subsystem node in dts. If Split mode is used in dts
 883	 * 2 child nodes are expected.
 884	 * In lockstep mode if two child nodes are available,
 885	 * only use first child node and consider it as core0
 886	 * and ignore core1 dt node.
 887	 */
 888	core_count = of_get_available_child_count(dev_node);
 889	if (core_count == 0) {
 890		dev_err(dev, "Invalid number of r5 cores %d", core_count);
 891		return -EINVAL;
 892	} else if (cluster_mode == SPLIT_MODE && core_count != 2) {
 893		dev_err(dev, "Invalid number of r5 cores for split mode\n");
 894		return -EINVAL;
 895	} else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) {
 896		dev_warn(dev, "Only r5 core0 will be used\n");
 897		core_count = 1;
 898	}
 899
 900	child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL);
 901	if (!child_devs)
 902		return -ENOMEM;
 903
 904	r5_cores = kcalloc(core_count,
 905			   sizeof(struct zynqmp_r5_core *), GFP_KERNEL);
 906	if (!r5_cores) {
 907		kfree(child_devs);
 908		return -ENOMEM;
 909	}
 910
 911	i = 0;
 912	for_each_available_child_of_node(dev_node, child) {
 913		child_pdev = of_find_device_by_node(child);
 914		if (!child_pdev) {
 915			of_node_put(child);
 916			ret = -ENODEV;
 917			goto release_r5_cores;
 918		}
 919
 920		child_devs[i] = &child_pdev->dev;
 921
 922		/* create and add remoteproc instance of type struct rproc */
 923		r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev);
 924		if (IS_ERR(r5_cores[i])) {
 925			of_node_put(child);
 926			ret = PTR_ERR(r5_cores[i]);
 927			r5_cores[i] = NULL;
 928			goto release_r5_cores;
 929		}
 930
 931		/*
 
 
 
 
 
 
 
 
 
 
 932		 * If two child nodes are available in dts in lockstep mode,
 933		 * then ignore second child node.
 934		 */
 935		if (cluster_mode == LOCKSTEP_MODE) {
 936			of_node_put(child);
 937			break;
 938		}
 939
 940		i++;
 941	}
 942
 943	cluster->mode = cluster_mode;
 944	cluster->core_count = core_count;
 945	cluster->r5_cores = r5_cores;
 946
 947	ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode);
 948	if (ret < 0) {
 949		dev_err(dev, "failed to init r5 core err %d\n", ret);
 950		cluster->core_count = 0;
 951		cluster->r5_cores = NULL;
 952
 953		/*
 954		 * at this point rproc resources for each core are allocated.
 955		 * adjust index to free resources in reverse order
 956		 */
 957		i = core_count - 1;
 958		goto release_r5_cores;
 959	}
 960
 961	kfree(child_devs);
 962	return 0;
 963
 964release_r5_cores:
 965	while (i >= 0) {
 966		put_device(child_devs[i]);
 967		if (r5_cores[i]) {
 
 968			of_reserved_mem_device_release(r5_cores[i]->dev);
 969			rproc_del(r5_cores[i]->rproc);
 970			rproc_free(r5_cores[i]->rproc);
 971		}
 972		i--;
 973	}
 974	kfree(r5_cores);
 975	kfree(child_devs);
 976	return ret;
 977}
 978
 979static void zynqmp_r5_cluster_exit(void *data)
 980{
 981	struct platform_device *pdev = (struct platform_device *)data;
 982	struct zynqmp_r5_cluster *cluster;
 983	struct zynqmp_r5_core *r5_core;
 984	int i;
 985
 986	cluster = (struct zynqmp_r5_cluster *)platform_get_drvdata(pdev);
 987	if (!cluster)
 988		return;
 989
 990	for (i = 0; i < cluster->core_count; i++) {
 991		r5_core = cluster->r5_cores[i];
 
 992		of_reserved_mem_device_release(r5_core->dev);
 993		put_device(r5_core->dev);
 994		rproc_del(r5_core->rproc);
 995		rproc_free(r5_core->rproc);
 996	}
 997
 998	kfree(cluster->r5_cores);
 999	kfree(cluster);
1000	platform_set_drvdata(pdev, NULL);
1001}
1002
1003/*
1004 * zynqmp_r5_remoteproc_probe()
1005 * parse device-tree, initialize hardware and allocate required resources
1006 * and remoteproc ops
1007 *
1008 * @pdev: domain platform device for R5 cluster
1009 *
1010 * Return: 0 for success and < 0 for failure.
1011 */
1012static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
1013{
1014	struct zynqmp_r5_cluster *cluster;
1015	struct device *dev = &pdev->dev;
1016	int ret;
1017
1018	cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
1019	if (!cluster)
1020		return -ENOMEM;
1021
1022	cluster->dev = dev;
1023
1024	ret = devm_of_platform_populate(dev);
1025	if (ret) {
1026		dev_err_probe(dev, ret, "failed to populate platform dev\n");
1027		kfree(cluster);
1028		return ret;
1029	}
1030
1031	/* wire in so each core can be cleaned up at driver remove */
1032	platform_set_drvdata(pdev, cluster);
1033
1034	ret = zynqmp_r5_cluster_init(cluster);
1035	if (ret) {
1036		kfree(cluster);
1037		platform_set_drvdata(pdev, NULL);
1038		dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n");
1039		return ret;
1040	}
1041
1042	ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev);
1043	if (ret)
1044		return ret;
1045
1046	return 0;
1047}
1048
1049/* Match table for OF platform binding */
1050static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
1051	{ .compatible = "xlnx,zynqmp-r5fss", },
1052	{ /* end of list */ },
1053};
1054MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
1055
1056static struct platform_driver zynqmp_r5_remoteproc_driver = {
1057	.probe = zynqmp_r5_remoteproc_probe,
1058	.driver = {
1059		.name = "zynqmp_r5_remoteproc",
1060		.of_match_table = zynqmp_r5_remoteproc_match,
1061	},
1062};
1063module_platform_driver(zynqmp_r5_remoteproc_driver);
1064
1065MODULE_DESCRIPTION("Xilinx R5F remote processor driver");
1066MODULE_AUTHOR("Xilinx Inc.");
1067MODULE_LICENSE("GPL");