Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2019 MediaTek Inc.
   4
   5#include <asm/barrier.h>
   6#include <linux/clk.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/interrupt.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/of_address.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/platform_device.h>
  16#include <linux/remoteproc.h>
  17#include <linux/remoteproc/mtk_scp.h>
  18#include <linux/rpmsg/mtk_rpmsg.h>
  19
  20#include "mtk_common.h"
  21#include "remoteproc_internal.h"
  22
  23#define MAX_CODE_SIZE 0x500000
  24#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
  25
  26/**
  27 * scp_get() - get a reference to SCP.
  28 *
  29 * @pdev:	the platform device of the module requesting SCP platform
  30 *		device for using SCP API.
  31 *
  32 * Return: Return NULL if failed.  otherwise reference to SCP.
  33 **/
  34struct mtk_scp *scp_get(struct platform_device *pdev)
  35{
  36	struct device *dev = &pdev->dev;
  37	struct device_node *scp_node;
  38	struct platform_device *scp_pdev;
  39
  40	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
  41	if (!scp_node) {
  42		dev_err(dev, "can't get SCP node\n");
  43		return NULL;
  44	}
  45
  46	scp_pdev = of_find_device_by_node(scp_node);
  47	of_node_put(scp_node);
  48
  49	if (WARN_ON(!scp_pdev)) {
  50		dev_err(dev, "SCP pdev failed\n");
  51		return NULL;
  52	}
  53
  54	return platform_get_drvdata(scp_pdev);
  55}
  56EXPORT_SYMBOL_GPL(scp_get);
  57
  58/**
  59 * scp_put() - "free" the SCP
  60 *
  61 * @scp:	mtk_scp structure from scp_get().
  62 **/
  63void scp_put(struct mtk_scp *scp)
  64{
  65	put_device(scp->dev);
  66}
  67EXPORT_SYMBOL_GPL(scp_put);
  68
  69static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
  70{
  71	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
  72	struct mtk_scp *scp_node;
  73
  74	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
  75
  76	/* report watchdog timeout to all cores */
  77	list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
  78		rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
  79}
  80
  81static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
  82{
  83	struct mtk_scp *scp = priv;
  84	struct scp_run *run = data;
  85
  86	scp->run.signaled = run->signaled;
  87	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
  88	scp->run.dec_capability = run->dec_capability;
  89	scp->run.enc_capability = run->enc_capability;
  90	wake_up_interruptible(&scp->run.wq);
  91}
  92
  93static void scp_ipi_handler(struct mtk_scp *scp)
  94{
  95	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
  96	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
  97	u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
  98	scp_ipi_handler_t handler;
  99	u32 id = readl(&rcv_obj->id);
 100	u32 len = readl(&rcv_obj->len);
 101
 102	if (len > SCP_SHARE_BUFFER_SIZE) {
 103		dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
 104			SCP_SHARE_BUFFER_SIZE);
 105		return;
 106	}
 107	if (id >= SCP_IPI_MAX) {
 108		dev_err(scp->dev, "No such ipi id = %d\n", id);
 109		return;
 110	}
 111
 112	scp_ipi_lock(scp, id);
 113	handler = ipi_desc[id].handler;
 114	if (!handler) {
 115		dev_err(scp->dev, "No handler for ipi id = %d\n", id);
 116		scp_ipi_unlock(scp, id);
 117		return;
 118	}
 119
 120	memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
 121	handler(tmp_data, len, ipi_desc[id].priv);
 122	scp_ipi_unlock(scp, id);
 123
 124	scp->ipi_id_ack[id] = true;
 125	wake_up(&scp->ack_wq);
 126}
 127
 128static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 129				     const struct firmware *fw,
 130				     size_t *offset);
 131
 132static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
 133{
 134	int ret;
 135	size_t offset;
 136
 137	/* read the ipi buf addr from FW itself first */
 138	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
 139	if (ret) {
 140		/* use default ipi buf addr if the FW doesn't have it */
 141		offset = scp->data->ipi_buf_offset;
 142		if (!offset)
 143			return ret;
 144	}
 145	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
 
 
 
 
 
 
 
 
 146
 147	scp->recv_buf = (struct mtk_share_obj __iomem *)
 148			(scp->sram_base + offset);
 149	scp->send_buf = (struct mtk_share_obj __iomem *)
 150			(scp->sram_base + offset + sizeof(*scp->recv_buf));
 151	memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
 152	memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
 153
 154	return 0;
 155}
 156
 157static void mt8183_scp_reset_assert(struct mtk_scp *scp)
 158{
 159	u32 val;
 160
 161	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 162	val &= ~MT8183_SW_RSTN_BIT;
 163	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 164}
 165
 166static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
 167{
 168	u32 val;
 169
 170	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 171	val |= MT8183_SW_RSTN_BIT;
 172	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 173}
 174
 175static void mt8192_scp_reset_assert(struct mtk_scp *scp)
 176{
 177	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 178}
 179
 180static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
 181{
 182	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
 183}
 184
 185static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
 186{
 187	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
 188}
 189
 190static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
 191{
 192	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
 193}
 194
 195static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 196{
 197	u32 scp_to_host;
 198
 199	scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 200	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
 201		scp_ipi_handler(scp);
 202	else
 203		scp_wdt_handler(scp, scp_to_host);
 204
 205	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
 206	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
 207	       scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 208}
 209
 210static void mt8192_scp_irq_handler(struct mtk_scp *scp)
 211{
 212	u32 scp_to_host;
 213
 214	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 215
 216	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 217		scp_ipi_handler(scp);
 218
 219		/*
 220		 * SCP won't send another interrupt until we clear
 221		 * MT8192_SCP2APMCU_IPC.
 222		 */
 223		writel(MT8192_SCP_IPC_INT_BIT,
 224		       scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 225	} else {
 226		scp_wdt_handler(scp, scp_to_host);
 227		writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 228	}
 229}
 230
 231static void mt8195_scp_irq_handler(struct mtk_scp *scp)
 232{
 233	u32 scp_to_host;
 234
 235	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 236
 237	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 238		scp_ipi_handler(scp);
 239	} else {
 240		u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
 241
 242		if (reason & MT8195_CORE0_WDT)
 243			writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 244
 245		if (reason & MT8195_CORE1_WDT)
 246			writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
 247
 248		scp_wdt_handler(scp, reason);
 249	}
 250
 251	writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 252}
 253
 254static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
 255{
 256	u32 scp_to_host;
 257
 258	scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
 259
 260	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
 261		scp_ipi_handler(scp);
 262
 263	writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
 264}
 265
 266static irqreturn_t scp_irq_handler(int irq, void *priv)
 267{
 268	struct mtk_scp *scp = priv;
 269	int ret;
 270
 271	ret = clk_prepare_enable(scp->clk);
 272	if (ret) {
 273		dev_err(scp->dev, "failed to enable clocks\n");
 274		return IRQ_NONE;
 275	}
 276
 277	scp->data->scp_irq_handler(scp);
 278
 279	clk_disable_unprepare(scp->clk);
 280
 281	return IRQ_HANDLED;
 282}
 283
 284static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
 285{
 286	struct device *dev = &rproc->dev;
 287	struct elf32_hdr *ehdr;
 288	struct elf32_phdr *phdr;
 289	int i, ret = 0;
 290	const u8 *elf_data = fw->data;
 291
 292	ehdr = (struct elf32_hdr *)elf_data;
 293	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
 294
 295	/* go through the available ELF segments */
 296	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
 297		u32 da = phdr->p_paddr;
 298		u32 memsz = phdr->p_memsz;
 299		u32 filesz = phdr->p_filesz;
 300		u32 offset = phdr->p_offset;
 301		void __iomem *ptr;
 302
 303		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
 304			phdr->p_type, da, memsz, filesz);
 305
 306		if (phdr->p_type != PT_LOAD)
 307			continue;
 308		if (!filesz)
 309			continue;
 310
 311		if (filesz > memsz) {
 312			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
 313				filesz, memsz);
 314			ret = -EINVAL;
 315			break;
 316		}
 317
 318		if (offset + filesz > fw->size) {
 319			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
 320				offset + filesz, fw->size);
 321			ret = -EINVAL;
 322			break;
 323		}
 324
 325		/* grab the kernel address for this device address */
 326		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
 327		if (!ptr) {
 328			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
 329			ret = -EINVAL;
 330			break;
 331		}
 332
 333		/* put the segment where the remote processor expects it */
 334		scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
 335	}
 336
 337	return ret;
 338}
 339
 340static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 341				     const struct firmware *fw,
 342				     size_t *offset)
 343{
 344	struct elf32_hdr *ehdr;
 345	struct elf32_shdr *shdr, *shdr_strtab;
 346	int i;
 347	const u8 *elf_data = fw->data;
 348	const char *strtab;
 349
 350	ehdr = (struct elf32_hdr *)elf_data;
 351	shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
 352	shdr_strtab = shdr + ehdr->e_shstrndx;
 353	strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
 354
 355	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
 356		if (strcmp(strtab + shdr->sh_name,
 357			   SECTION_NAME_IPI_BUFFER) == 0) {
 358			*offset = shdr->sh_addr;
 359			return 0;
 360		}
 361	}
 362
 363	return -ENOENT;
 364}
 365
 366static int mt8183_scp_clk_get(struct mtk_scp *scp)
 367{
 368	struct device *dev = scp->dev;
 369	int ret = 0;
 370
 371	scp->clk = devm_clk_get(dev, "main");
 372	if (IS_ERR(scp->clk)) {
 373		dev_err(dev, "Failed to get clock\n");
 374		ret = PTR_ERR(scp->clk);
 375	}
 376
 377	return ret;
 378}
 379
 380static int mt8192_scp_clk_get(struct mtk_scp *scp)
 381{
 382	return mt8183_scp_clk_get(scp);
 383}
 384
 385static int mt8195_scp_clk_get(struct mtk_scp *scp)
 386{
 387	scp->clk = NULL;
 388
 389	return 0;
 390}
 391
 392static int mt8183_scp_before_load(struct mtk_scp *scp)
 393{
 394	/* Clear SCP to host interrupt */
 395	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 396
 397	/* Reset clocks before loading FW */
 398	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 399	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 400
 401	/* Initialize TCM before loading FW. */
 402	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 403	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 404
 405	/* Turn on the power of SCP's SRAM before using it. */
 406	writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
 407
 408	/*
 409	 * Set I-cache and D-cache size before loading SCP FW.
 410	 * SCP SRAM logical address may change when cache size setting differs.
 411	 */
 412	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 413	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 414	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 415
 416	return 0;
 417}
 418
 419static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
 420{
 421	int i;
 422
 423	for (i = 31; i >= 0; i--)
 424		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 425	writel(0, addr);
 426}
 427
 428static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 429{
 430	int i;
 431
 432	writel(0, addr);
 433	for (i = 0; i < 32; i++)
 434		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 435}
 436
 437static int mt8186_scp_before_load(struct mtk_scp *scp)
 438{
 439	/* Clear SCP to host interrupt */
 440	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 441
 442	/* Reset clocks before loading FW */
 443	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 444	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 445
 446	/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
 447	scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
 448
 449	/* Initialize TCM before loading FW. */
 450	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 451	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 452	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
 453	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
 454
 455	/*
 456	 * Set I-cache and D-cache size before loading SCP FW.
 457	 * SCP SRAM logical address may change when cache size setting differs.
 458	 */
 459	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 460	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 461	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 462
 463	return 0;
 464}
 465
 466static int mt8192_scp_before_load(struct mtk_scp *scp)
 467{
 468	/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 469	writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 470
 471	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 472
 473	/* enable SRAM clock */
 474	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 475	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 476	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 477	scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 478	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 479
 480	/* enable MPU for all memory regions */
 481	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 482
 483	return 0;
 484}
 485
 486static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
 487{
 488	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 489
 490	mutex_lock(&scp_cluster->cluster_lock);
 491
 492	if (scp_cluster->l2tcm_refcnt == 0) {
 493		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 494		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 495
 496		/* Power on L2TCM */
 497		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 498		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 499		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 500		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 501				  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 502	}
 503
 504	scp_cluster->l2tcm_refcnt += 1;
 505
 506	mutex_unlock(&scp_cluster->cluster_lock);
 507
 508	return 0;
 509}
 510
 511static int mt8195_scp_before_load(struct mtk_scp *scp)
 512{
 513	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 514
 515	mt8195_scp_l2tcm_on(scp);
 516
 517	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 518
 519	/* enable MPU for all memory regions */
 520	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 521
 522	return 0;
 523}
 524
 525static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
 526{
 527	u32 sec_ctrl;
 528	struct mtk_scp *scp_c0;
 529	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 530
 531	scp->data->scp_reset_assert(scp);
 532
 533	mt8195_scp_l2tcm_on(scp);
 534
 535	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 536
 537	/* enable MPU for all memory regions */
 538	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
 539
 540	/*
 541	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
 542	 * on SRAM when SCP core 1 accesses SRAM.
 543	 *
 544	 * This configuration solves booting the SCP core 0 and core 1 from
 545	 * different SRAM address because core 0 and core 1 both boot from
 546	 * the head of SRAM by default. this must be configured before boot SCP core 1.
 547	 *
 548	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
 549	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
 550	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
 551	 * The shift action is tranparent to software.
 552	 */
 553	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
 554	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
 555
 556	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
 557	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
 558
 559	/* enable SRAM offset when fetching instruction and data */
 560	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
 561	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
 562	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 563
 564	return 0;
 565}
 566
 567static int scp_load(struct rproc *rproc, const struct firmware *fw)
 568{
 569	struct mtk_scp *scp = rproc->priv;
 570	struct device *dev = scp->dev;
 571	int ret;
 572
 573	ret = clk_prepare_enable(scp->clk);
 574	if (ret) {
 575		dev_err(dev, "failed to enable clocks\n");
 576		return ret;
 577	}
 578
 579	/* Hold SCP in reset while loading FW. */
 580	scp->data->scp_reset_assert(scp);
 581
 582	ret = scp->data->scp_before_load(scp);
 583	if (ret < 0)
 584		goto leave;
 585
 586	ret = scp_elf_load_segments(rproc, fw);
 587leave:
 588	clk_disable_unprepare(scp->clk);
 589
 590	return ret;
 591}
 592
 593static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
 594{
 595	struct mtk_scp *scp = rproc->priv;
 596	struct device *dev = scp->dev;
 597	int ret;
 598
 599	ret = clk_prepare_enable(scp->clk);
 600	if (ret) {
 601		dev_err(dev, "failed to enable clocks\n");
 602		return ret;
 603	}
 604
 605	ret = scp_ipi_init(scp, fw);
 606	clk_disable_unprepare(scp->clk);
 607	return ret;
 608}
 609
 610static int scp_start(struct rproc *rproc)
 611{
 612	struct mtk_scp *scp = rproc->priv;
 613	struct device *dev = scp->dev;
 614	struct scp_run *run = &scp->run;
 615	int ret;
 616
 617	ret = clk_prepare_enable(scp->clk);
 618	if (ret) {
 619		dev_err(dev, "failed to enable clocks\n");
 620		return ret;
 621	}
 622
 623	run->signaled = false;
 624
 625	scp->data->scp_reset_deassert(scp);
 626
 627	ret = wait_event_interruptible_timeout(
 628					run->wq,
 629					run->signaled,
 630					msecs_to_jiffies(2000));
 631
 632	if (ret == 0) {
 633		dev_err(dev, "wait SCP initialization timeout!\n");
 634		ret = -ETIME;
 635		goto stop;
 636	}
 637	if (ret == -ERESTARTSYS) {
 638		dev_err(dev, "wait SCP interrupted by a signal!\n");
 639		goto stop;
 640	}
 641
 642	clk_disable_unprepare(scp->clk);
 643	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
 644
 645	return 0;
 646
 647stop:
 648	scp->data->scp_reset_assert(scp);
 649	clk_disable_unprepare(scp->clk);
 650	return ret;
 651}
 652
 653static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 654{
 655	int offset;
 656
 657	if (da < scp->sram_size) {
 658		offset = da;
 659		if (offset >= 0 && (offset + len) <= scp->sram_size)
 660			return (void __force *)scp->sram_base + offset;
 661	} else if (scp->dram_size) {
 662		offset = da - scp->dma_addr;
 663		if (offset >= 0 && (offset + len) <= scp->dram_size)
 664			return scp->cpu_addr + offset;
 665	}
 666
 667	return NULL;
 668}
 669
 670static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 671{
 672	int offset;
 673
 674	if (da >= scp->sram_phys &&
 675	    (da + len) <= scp->sram_phys + scp->sram_size) {
 676		offset = da - scp->sram_phys;
 677		return (void __force *)scp->sram_base + offset;
 678	}
 679
 680	/* optional memory region */
 681	if (scp->cluster->l1tcm_size &&
 682	    da >= scp->cluster->l1tcm_phys &&
 683	    (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
 684		offset = da - scp->cluster->l1tcm_phys;
 685		return (void __force *)scp->cluster->l1tcm_base + offset;
 686	}
 687
 688	/* optional memory region */
 689	if (scp->dram_size &&
 690	    da >= scp->dma_addr &&
 691	    (da + len) <= scp->dma_addr + scp->dram_size) {
 692		offset = da - scp->dma_addr;
 693		return scp->cpu_addr + offset;
 694	}
 695
 696	return NULL;
 697}
 698
 699static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
 700{
 701	struct mtk_scp *scp = rproc->priv;
 702
 703	return scp->data->scp_da_to_va(scp, da, len);
 704}
 705
 706static void mt8183_scp_stop(struct mtk_scp *scp)
 707{
 708	/* Disable SCP watchdog */
 709	writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
 710}
 711
 712static void mt8192_scp_stop(struct mtk_scp *scp)
 713{
 714	/* Disable SRAM clock */
 715	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 716	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 717	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 718	scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 719	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 720
 721	/* Disable SCP watchdog */
 722	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 723}
 724
 725static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
 726{
 727	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 728
 729	mutex_lock(&scp_cluster->cluster_lock);
 730
 731	if (scp_cluster->l2tcm_refcnt > 0)
 732		scp_cluster->l2tcm_refcnt -= 1;
 733
 734	if (scp_cluster->l2tcm_refcnt == 0) {
 735		/* Power off L2TCM */
 736		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 737		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 738		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 739		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 740				   MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 741	}
 742
 743	mutex_unlock(&scp_cluster->cluster_lock);
 744}
 745
 746static void mt8195_scp_stop(struct mtk_scp *scp)
 747{
 748	mt8195_scp_l2tcm_off(scp);
 749
 750	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 751
 752	/* Disable SCP watchdog */
 753	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 754}
 755
 756static void mt8195_scp_c1_stop(struct mtk_scp *scp)
 757{
 758	mt8195_scp_l2tcm_off(scp);
 759
 760	/* Power off CPU SRAM */
 761	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 762
 763	/* Disable SCP watchdog */
 764	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 765}
 766
 767static int scp_stop(struct rproc *rproc)
 768{
 769	struct mtk_scp *scp = rproc->priv;
 770	int ret;
 771
 772	ret = clk_prepare_enable(scp->clk);
 773	if (ret) {
 774		dev_err(scp->dev, "failed to enable clocks\n");
 775		return ret;
 776	}
 777
 778	scp->data->scp_reset_assert(scp);
 779	scp->data->scp_stop(scp);
 780	clk_disable_unprepare(scp->clk);
 781
 782	return 0;
 783}
 784
 785static const struct rproc_ops scp_ops = {
 786	.start		= scp_start,
 787	.stop		= scp_stop,
 788	.load		= scp_load,
 789	.da_to_va	= scp_da_to_va,
 790	.parse_fw	= scp_parse_fw,
 791	.sanity_check	= rproc_elf_sanity_check,
 792};
 793
 794/**
 795 * scp_get_device() - get device struct of SCP
 796 *
 797 * @scp:	mtk_scp structure
 798 **/
 799struct device *scp_get_device(struct mtk_scp *scp)
 800{
 801	return scp->dev;
 802}
 803EXPORT_SYMBOL_GPL(scp_get_device);
 804
 805/**
 806 * scp_get_rproc() - get rproc struct of SCP
 807 *
 808 * @scp:	mtk_scp structure
 809 **/
 810struct rproc *scp_get_rproc(struct mtk_scp *scp)
 811{
 812	return scp->rproc;
 813}
 814EXPORT_SYMBOL_GPL(scp_get_rproc);
 815
 816/**
 817 * scp_get_vdec_hw_capa() - get video decoder hardware capability
 818 *
 819 * @scp:	mtk_scp structure
 820 *
 821 * Return: video decoder hardware capability
 822 **/
 823unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
 824{
 825	return scp->run.dec_capability;
 826}
 827EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
 828
 829/**
 830 * scp_get_venc_hw_capa() - get video encoder hardware capability
 831 *
 832 * @scp:	mtk_scp structure
 833 *
 834 * Return: video encoder hardware capability
 835 **/
 836unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
 837{
 838	return scp->run.enc_capability;
 839}
 840EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
 841
 842/**
 843 * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
 844 *
 845 * @scp:	mtk_scp structure
 846 * @mem_addr:	SCP views memory address
 847 *
 848 * Mapping the SCP's SRAM address /
 849 * DMEM (Data Extended Memory) memory address /
 850 * Working buffer memory address to
 851 * kernel virtual address.
 852 *
 853 * Return: Return ERR_PTR(-EINVAL) if mapping failed,
 854 * otherwise the mapped kernel virtual address
 855 **/
 856void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
 857{
 858	void *ptr;
 859
 860	ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
 861	if (!ptr)
 862		return ERR_PTR(-EINVAL);
 863
 864	return ptr;
 865}
 866EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
 867
 868static int scp_map_memory_region(struct mtk_scp *scp)
 869{
 870	int ret;
 871
 872	ret = of_reserved_mem_device_init(scp->dev);
 873
 874	/* reserved memory is optional. */
 875	if (ret == -ENODEV) {
 876		dev_info(scp->dev, "skipping reserved memory initialization.");
 877		return 0;
 878	}
 879
 880	if (ret) {
 881		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
 882		return -ENOMEM;
 883	}
 884
 885	/* Reserved SCP code size */
 886	scp->dram_size = MAX_CODE_SIZE;
 887	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
 888					   &scp->dma_addr, GFP_KERNEL);
 889	if (!scp->cpu_addr)
 890		return -ENOMEM;
 891
 892	return 0;
 893}
 894
 895static void scp_unmap_memory_region(struct mtk_scp *scp)
 896{
 897	if (scp->dram_size == 0)
 898		return;
 899
 900	dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
 901			  scp->dma_addr);
 902	of_reserved_mem_device_release(scp->dev);
 903}
 904
 905static int scp_register_ipi(struct platform_device *pdev, u32 id,
 906			    ipi_handler_t handler, void *priv)
 907{
 908	struct mtk_scp *scp = platform_get_drvdata(pdev);
 909
 910	return scp_ipi_register(scp, id, handler, priv);
 911}
 912
 913static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
 914{
 915	struct mtk_scp *scp = platform_get_drvdata(pdev);
 916
 917	scp_ipi_unregister(scp, id);
 918}
 919
 920static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
 921			unsigned int len, unsigned int wait)
 922{
 923	struct mtk_scp *scp = platform_get_drvdata(pdev);
 924
 925	return scp_ipi_send(scp, id, buf, len, wait);
 926}
 927
 928static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
 929	.send_ipi = scp_send_ipi,
 930	.register_ipi = scp_register_ipi,
 931	.unregister_ipi = scp_unregister_ipi,
 932	.ns_ipi_id = SCP_IPI_NS_SERVICE,
 933};
 934
 935static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
 936{
 937	scp->rpmsg_subdev =
 938		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
 939					      &mtk_scp_rpmsg_info);
 940	if (scp->rpmsg_subdev)
 941		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
 942}
 943
 944static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
 945{
 946	if (scp->rpmsg_subdev) {
 947		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
 948		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
 949		scp->rpmsg_subdev = NULL;
 950	}
 951}
 952
 953static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
 954				      struct mtk_scp_of_cluster *scp_cluster,
 955				      const struct mtk_scp_of_data *of_data)
 956{
 957	struct device *dev = &pdev->dev;
 958	struct device_node *np = dev->of_node;
 959	struct mtk_scp *scp;
 960	struct rproc *rproc;
 961	struct resource *res;
 962	const char *fw_name = "scp.img";
 963	int ret, i;
 964
 965	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
 966	if (ret < 0 && ret != -EINVAL)
 967		return ERR_PTR(ret);
 968
 969	rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
 970	if (!rproc) {
 971		dev_err(dev, "unable to allocate remoteproc\n");
 972		return ERR_PTR(-ENOMEM);
 973	}
 974
 975	scp = rproc->priv;
 976	scp->rproc = rproc;
 977	scp->dev = dev;
 978	scp->data = of_data;
 979	scp->cluster = scp_cluster;
 980	platform_set_drvdata(pdev, scp);
 981
 982	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
 983	scp->sram_base = devm_ioremap_resource(dev, res);
 984	if (IS_ERR(scp->sram_base)) {
 985		dev_err(dev, "Failed to parse and map sram memory\n");
 986		return ERR_CAST(scp->sram_base);
 987	}
 988
 989	scp->sram_size = resource_size(res);
 990	scp->sram_phys = res->start;
 991
 992	ret = scp->data->scp_clk_get(scp);
 993	if (ret)
 994		return ERR_PTR(ret);
 995
 996	ret = scp_map_memory_region(scp);
 997	if (ret)
 998		return ERR_PTR(ret);
 999
1000	mutex_init(&scp->send_lock);
1001	for (i = 0; i < SCP_IPI_MAX; i++)
1002		mutex_init(&scp->ipi_desc[i].lock);
1003
1004	/* register SCP initialization IPI */
1005	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
1006	if (ret) {
1007		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
1008		goto release_dev_mem;
1009	}
1010
1011	init_waitqueue_head(&scp->run.wq);
1012	init_waitqueue_head(&scp->ack_wq);
1013
1014	scp_add_rpmsg_subdev(scp);
1015
1016	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
1017					scp_irq_handler, IRQF_ONESHOT,
1018					pdev->name, scp);
1019
1020	if (ret) {
1021		dev_err(dev, "failed to request irq\n");
1022		goto remove_subdev;
1023	}
1024
1025	return scp;
1026
1027remove_subdev:
1028	scp_remove_rpmsg_subdev(scp);
1029	scp_ipi_unregister(scp, SCP_IPI_INIT);
1030release_dev_mem:
1031	scp_unmap_memory_region(scp);
1032	for (i = 0; i < SCP_IPI_MAX; i++)
1033		mutex_destroy(&scp->ipi_desc[i].lock);
1034	mutex_destroy(&scp->send_lock);
1035
1036	return ERR_PTR(ret);
1037}
1038
1039static void scp_free(struct mtk_scp *scp)
1040{
1041	int i;
1042
1043	scp_remove_rpmsg_subdev(scp);
1044	scp_ipi_unregister(scp, SCP_IPI_INIT);
1045	scp_unmap_memory_region(scp);
1046	for (i = 0; i < SCP_IPI_MAX; i++)
1047		mutex_destroy(&scp->ipi_desc[i].lock);
1048	mutex_destroy(&scp->send_lock);
1049}
1050
1051static int scp_add_single_core(struct platform_device *pdev,
1052			       struct mtk_scp_of_cluster *scp_cluster)
1053{
1054	struct device *dev = &pdev->dev;
1055	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1056	struct mtk_scp *scp;
1057	int ret;
1058
1059	scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
1060	if (IS_ERR(scp))
1061		return PTR_ERR(scp);
1062
1063	ret = rproc_add(scp->rproc);
1064	if (ret) {
1065		dev_err(dev, "Failed to add rproc\n");
1066		scp_free(scp);
1067		return ret;
1068	}
1069
1070	list_add_tail(&scp->elem, scp_list);
1071
1072	return 0;
1073}
1074
1075static int scp_add_multi_core(struct platform_device *pdev,
1076			      struct mtk_scp_of_cluster *scp_cluster)
1077{
1078	struct device *dev = &pdev->dev;
1079	struct device_node *np = dev_of_node(dev);
1080	struct platform_device *cpdev;
1081	struct device_node *child;
1082	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1083	const struct mtk_scp_of_data **cluster_of_data;
1084	struct mtk_scp *scp, *temp;
1085	int core_id = 0;
1086	int ret;
1087
1088	cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
1089
1090	for_each_available_child_of_node(np, child) {
1091		if (!cluster_of_data[core_id]) {
1092			ret = -EINVAL;
1093			dev_err(dev, "Not support core %d\n", core_id);
1094			of_node_put(child);
1095			goto init_fail;
1096		}
1097
1098		cpdev = of_find_device_by_node(child);
1099		if (!cpdev) {
1100			ret = -ENODEV;
1101			dev_err(dev, "Not found platform device for core %d\n", core_id);
1102			of_node_put(child);
1103			goto init_fail;
1104		}
1105
1106		scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
1107		put_device(&cpdev->dev);
1108		if (IS_ERR(scp)) {
1109			ret = PTR_ERR(scp);
1110			dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
1111			of_node_put(child);
1112			goto init_fail;
1113		}
1114
1115		ret = rproc_add(scp->rproc);
1116		if (ret) {
1117			dev_err(dev, "Failed to add rproc of core %d\n", core_id);
1118			of_node_put(child);
1119			scp_free(scp);
1120			goto init_fail;
1121		}
1122
1123		list_add_tail(&scp->elem, scp_list);
1124		core_id++;
1125	}
1126
1127	/*
1128	 * Here we are setting the platform device for @pdev to the last @scp that was
1129	 * created, which is needed because (1) scp_rproc_init() is calling
1130	 * platform_set_drvdata() on the child platform devices and (2) we need a handle to
1131	 * the cluster list in scp_remove().
1132	 */
1133	platform_set_drvdata(pdev, scp);
1134
1135	return 0;
1136
1137init_fail:
1138	list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
1139		list_del(&scp->elem);
1140		rproc_del(scp->rproc);
1141		scp_free(scp);
1142	}
1143
1144	return ret;
1145}
1146
1147static bool scp_is_single_core(struct platform_device *pdev)
1148{
1149	struct device *dev = &pdev->dev;
1150	struct device_node *np = dev_of_node(dev);
1151	struct device_node *child;
1152	int num_cores = 0;
1153
1154	for_each_child_of_node(np, child)
1155		if (of_device_is_compatible(child, "mediatek,scp-core"))
1156			num_cores++;
1157
1158	return num_cores < 2;
1159}
1160
1161static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
1162{
1163	int ret;
1164
1165	if (scp_is_single_core(pdev))
1166		ret = scp_add_single_core(pdev, scp_cluster);
1167	else
1168		ret = scp_add_multi_core(pdev, scp_cluster);
1169
1170	return ret;
1171}
1172
1173static int scp_probe(struct platform_device *pdev)
1174{
1175	struct device *dev = &pdev->dev;
1176	struct mtk_scp_of_cluster *scp_cluster;
1177	struct resource *res;
1178	int ret;
1179
1180	scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
1181	if (!scp_cluster)
1182		return -ENOMEM;
1183
1184	scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
1185	if (IS_ERR(scp_cluster->reg_base))
1186		return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
1187				     "Failed to parse and map cfg memory\n");
1188
1189	/* l1tcm is an optional memory region */
1190	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
1191	scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
1192	if (IS_ERR(scp_cluster->l1tcm_base)) {
1193		ret = PTR_ERR(scp_cluster->l1tcm_base);
1194		if (ret != -EINVAL)
1195			return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
1196
1197		scp_cluster->l1tcm_base = NULL;
1198	} else {
1199		scp_cluster->l1tcm_size = resource_size(res);
1200		scp_cluster->l1tcm_phys = res->start;
1201	}
1202
1203	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
1204	mutex_init(&scp_cluster->cluster_lock);
1205
1206	ret = devm_of_platform_populate(dev);
1207	if (ret)
1208		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
1209
1210	ret = scp_cluster_init(pdev, scp_cluster);
1211	if (ret)
1212		return ret;
1213
1214	return 0;
1215}
1216
1217static void scp_remove(struct platform_device *pdev)
1218{
1219	struct mtk_scp *scp = platform_get_drvdata(pdev);
1220	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
1221	struct mtk_scp *temp;
1222
1223	list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
1224		list_del(&scp->elem);
1225		rproc_del(scp->rproc);
1226		scp_free(scp);
1227	}
1228	mutex_destroy(&scp_cluster->cluster_lock);
1229}
1230
1231static const struct mtk_scp_of_data mt8183_of_data = {
1232	.scp_clk_get = mt8183_scp_clk_get,
1233	.scp_before_load = mt8183_scp_before_load,
1234	.scp_irq_handler = mt8183_scp_irq_handler,
1235	.scp_reset_assert = mt8183_scp_reset_assert,
1236	.scp_reset_deassert = mt8183_scp_reset_deassert,
1237	.scp_stop = mt8183_scp_stop,
1238	.scp_da_to_va = mt8183_scp_da_to_va,
1239	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1240	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1241	.ipi_buf_offset = 0x7bdb0,
1242};
1243
1244static const struct mtk_scp_of_data mt8186_of_data = {
1245	.scp_clk_get = mt8195_scp_clk_get,
1246	.scp_before_load = mt8186_scp_before_load,
1247	.scp_irq_handler = mt8183_scp_irq_handler,
1248	.scp_reset_assert = mt8183_scp_reset_assert,
1249	.scp_reset_deassert = mt8183_scp_reset_deassert,
1250	.scp_stop = mt8183_scp_stop,
1251	.scp_da_to_va = mt8183_scp_da_to_va,
1252	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1253	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1254	.ipi_buf_offset = 0x3bdb0,
1255};
1256
1257static const struct mtk_scp_of_data mt8188_of_data = {
1258	.scp_clk_get = mt8195_scp_clk_get,
1259	.scp_before_load = mt8192_scp_before_load,
1260	.scp_irq_handler = mt8192_scp_irq_handler,
1261	.scp_reset_assert = mt8192_scp_reset_assert,
1262	.scp_reset_deassert = mt8192_scp_reset_deassert,
1263	.scp_stop = mt8192_scp_stop,
1264	.scp_da_to_va = mt8192_scp_da_to_va,
1265	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1266	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1267};
1268
1269static const struct mtk_scp_of_data mt8192_of_data = {
1270	.scp_clk_get = mt8192_scp_clk_get,
1271	.scp_before_load = mt8192_scp_before_load,
1272	.scp_irq_handler = mt8192_scp_irq_handler,
1273	.scp_reset_assert = mt8192_scp_reset_assert,
1274	.scp_reset_deassert = mt8192_scp_reset_deassert,
1275	.scp_stop = mt8192_scp_stop,
1276	.scp_da_to_va = mt8192_scp_da_to_va,
1277	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1278	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1279};
1280
1281static const struct mtk_scp_of_data mt8195_of_data = {
1282	.scp_clk_get = mt8195_scp_clk_get,
1283	.scp_before_load = mt8195_scp_before_load,
1284	.scp_irq_handler = mt8195_scp_irq_handler,
1285	.scp_reset_assert = mt8192_scp_reset_assert,
1286	.scp_reset_deassert = mt8192_scp_reset_deassert,
1287	.scp_stop = mt8195_scp_stop,
1288	.scp_da_to_va = mt8192_scp_da_to_va,
1289	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1290	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1291};
1292
1293static const struct mtk_scp_of_data mt8195_of_data_c1 = {
1294	.scp_clk_get = mt8195_scp_clk_get,
1295	.scp_before_load = mt8195_scp_c1_before_load,
1296	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1297	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1298	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1299	.scp_stop = mt8195_scp_c1_stop,
1300	.scp_da_to_va = mt8192_scp_da_to_va,
1301	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1302	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1303};
1304
1305static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
1306	&mt8195_of_data,
1307	&mt8195_of_data_c1,
1308	NULL
1309};
1310
1311static const struct of_device_id mtk_scp_of_match[] = {
1312	{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
1313	{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
1314	{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
1315	{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
1316	{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
1317	{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
1318	{},
1319};
1320MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
1321
1322static struct platform_driver mtk_scp_driver = {
1323	.probe = scp_probe,
1324	.remove_new = scp_remove,
1325	.driver = {
1326		.name = "mtk-scp",
1327		.of_match_table = mtk_scp_of_match,
1328	},
1329};
1330
1331module_platform_driver(mtk_scp_driver);
1332
1333MODULE_LICENSE("GPL v2");
1334MODULE_DESCRIPTION("MediaTek SCP control driver");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2019 MediaTek Inc.
   4
   5#include <asm/barrier.h>
   6#include <linux/clk.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/interrupt.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/of_address.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/platform_device.h>
  16#include <linux/remoteproc.h>
  17#include <linux/remoteproc/mtk_scp.h>
  18#include <linux/rpmsg/mtk_rpmsg.h>
  19
  20#include "mtk_common.h"
  21#include "remoteproc_internal.h"
  22
  23#define MAX_CODE_SIZE 0x500000
  24#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
  25
  26/**
  27 * scp_get() - get a reference to SCP.
  28 *
  29 * @pdev:	the platform device of the module requesting SCP platform
  30 *		device for using SCP API.
  31 *
  32 * Return: Return NULL if failed.  otherwise reference to SCP.
  33 **/
  34struct mtk_scp *scp_get(struct platform_device *pdev)
  35{
  36	struct device *dev = &pdev->dev;
  37	struct device_node *scp_node;
  38	struct platform_device *scp_pdev;
  39
  40	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
  41	if (!scp_node) {
  42		dev_err(dev, "can't get SCP node\n");
  43		return NULL;
  44	}
  45
  46	scp_pdev = of_find_device_by_node(scp_node);
  47	of_node_put(scp_node);
  48
  49	if (WARN_ON(!scp_pdev)) {
  50		dev_err(dev, "SCP pdev failed\n");
  51		return NULL;
  52	}
  53
  54	return platform_get_drvdata(scp_pdev);
  55}
  56EXPORT_SYMBOL_GPL(scp_get);
  57
  58/**
  59 * scp_put() - "free" the SCP
  60 *
  61 * @scp:	mtk_scp structure from scp_get().
  62 **/
  63void scp_put(struct mtk_scp *scp)
  64{
  65	put_device(scp->dev);
  66}
  67EXPORT_SYMBOL_GPL(scp_put);
  68
  69static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
  70{
  71	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
  72	struct mtk_scp *scp_node;
  73
  74	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
  75
  76	/* report watchdog timeout to all cores */
  77	list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
  78		rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
  79}
  80
  81static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
  82{
  83	struct mtk_scp *scp = priv;
  84	struct scp_run *run = data;
  85
  86	scp->run.signaled = run->signaled;
  87	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
  88	scp->run.dec_capability = run->dec_capability;
  89	scp->run.enc_capability = run->enc_capability;
  90	wake_up_interruptible(&scp->run.wq);
  91}
  92
  93static void scp_ipi_handler(struct mtk_scp *scp)
  94{
  95	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
  96	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
  97	u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
  98	scp_ipi_handler_t handler;
  99	u32 id = readl(&rcv_obj->id);
 100	u32 len = readl(&rcv_obj->len);
 101
 102	if (len > SCP_SHARE_BUFFER_SIZE) {
 103		dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
 104			SCP_SHARE_BUFFER_SIZE);
 105		return;
 106	}
 107	if (id >= SCP_IPI_MAX) {
 108		dev_err(scp->dev, "No such ipi id = %d\n", id);
 109		return;
 110	}
 111
 112	scp_ipi_lock(scp, id);
 113	handler = ipi_desc[id].handler;
 114	if (!handler) {
 115		dev_err(scp->dev, "No handler for ipi id = %d\n", id);
 116		scp_ipi_unlock(scp, id);
 117		return;
 118	}
 119
 120	memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
 121	handler(tmp_data, len, ipi_desc[id].priv);
 122	scp_ipi_unlock(scp, id);
 123
 124	scp->ipi_id_ack[id] = true;
 125	wake_up(&scp->ack_wq);
 126}
 127
 128static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 129				     const struct firmware *fw,
 130				     size_t *offset);
 131
 132static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
 133{
 134	int ret;
 135	size_t buf_sz, offset;
 136
 137	/* read the ipi buf addr from FW itself first */
 138	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
 139	if (ret) {
 140		/* use default ipi buf addr if the FW doesn't have it */
 141		offset = scp->data->ipi_buf_offset;
 142		if (!offset)
 143			return ret;
 144	}
 145	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
 146
 147	/* Make sure IPI buffer fits in the L2TCM range assigned to this core */
 148	buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
 149
 150	if (scp->sram_size < buf_sz + offset) {
 151		dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
 152		return -EOVERFLOW;
 153	}
 154
 155	scp->recv_buf = (struct mtk_share_obj __iomem *)
 156			(scp->sram_base + offset);
 157	scp->send_buf = (struct mtk_share_obj __iomem *)
 158			(scp->sram_base + offset + sizeof(*scp->recv_buf));
 159	memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
 160	memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
 161
 162	return 0;
 163}
 164
 165static void mt8183_scp_reset_assert(struct mtk_scp *scp)
 166{
 167	u32 val;
 168
 169	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 170	val &= ~MT8183_SW_RSTN_BIT;
 171	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 172}
 173
 174static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
 175{
 176	u32 val;
 177
 178	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 179	val |= MT8183_SW_RSTN_BIT;
 180	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 181}
 182
 183static void mt8192_scp_reset_assert(struct mtk_scp *scp)
 184{
 185	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 186}
 187
 188static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
 189{
 190	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
 191}
 192
 193static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
 194{
 195	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
 196}
 197
 198static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
 199{
 200	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
 201}
 202
 203static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 204{
 205	u32 scp_to_host;
 206
 207	scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 208	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
 209		scp_ipi_handler(scp);
 210	else
 211		scp_wdt_handler(scp, scp_to_host);
 212
 213	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
 214	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
 215	       scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 216}
 217
 218static void mt8192_scp_irq_handler(struct mtk_scp *scp)
 219{
 220	u32 scp_to_host;
 221
 222	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 223
 224	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 225		scp_ipi_handler(scp);
 226
 227		/*
 228		 * SCP won't send another interrupt until we clear
 229		 * MT8192_SCP2APMCU_IPC.
 230		 */
 231		writel(MT8192_SCP_IPC_INT_BIT,
 232		       scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 233	} else {
 234		scp_wdt_handler(scp, scp_to_host);
 235		writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 236	}
 237}
 238
 239static void mt8195_scp_irq_handler(struct mtk_scp *scp)
 240{
 241	u32 scp_to_host;
 242
 243	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 244
 245	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 246		scp_ipi_handler(scp);
 247	} else {
 248		u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
 249
 250		if (reason & MT8195_CORE0_WDT)
 251			writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 252
 253		if (reason & MT8195_CORE1_WDT)
 254			writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
 255
 256		scp_wdt_handler(scp, reason);
 257	}
 258
 259	writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 260}
 261
 262static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
 263{
 264	u32 scp_to_host;
 265
 266	scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
 267
 268	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
 269		scp_ipi_handler(scp);
 270
 271	writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
 272}
 273
 274static irqreturn_t scp_irq_handler(int irq, void *priv)
 275{
 276	struct mtk_scp *scp = priv;
 277	int ret;
 278
 279	ret = clk_prepare_enable(scp->clk);
 280	if (ret) {
 281		dev_err(scp->dev, "failed to enable clocks\n");
 282		return IRQ_NONE;
 283	}
 284
 285	scp->data->scp_irq_handler(scp);
 286
 287	clk_disable_unprepare(scp->clk);
 288
 289	return IRQ_HANDLED;
 290}
 291
 292static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
 293{
 294	struct device *dev = &rproc->dev;
 295	struct elf32_hdr *ehdr;
 296	struct elf32_phdr *phdr;
 297	int i, ret = 0;
 298	const u8 *elf_data = fw->data;
 299
 300	ehdr = (struct elf32_hdr *)elf_data;
 301	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
 302
 303	/* go through the available ELF segments */
 304	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
 305		u32 da = phdr->p_paddr;
 306		u32 memsz = phdr->p_memsz;
 307		u32 filesz = phdr->p_filesz;
 308		u32 offset = phdr->p_offset;
 309		void __iomem *ptr;
 310
 311		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
 312			phdr->p_type, da, memsz, filesz);
 313
 314		if (phdr->p_type != PT_LOAD)
 315			continue;
 316		if (!filesz)
 317			continue;
 318
 319		if (filesz > memsz) {
 320			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
 321				filesz, memsz);
 322			ret = -EINVAL;
 323			break;
 324		}
 325
 326		if (offset + filesz > fw->size) {
 327			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
 328				offset + filesz, fw->size);
 329			ret = -EINVAL;
 330			break;
 331		}
 332
 333		/* grab the kernel address for this device address */
 334		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
 335		if (!ptr) {
 336			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
 337			ret = -EINVAL;
 338			break;
 339		}
 340
 341		/* put the segment where the remote processor expects it */
 342		scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
 343	}
 344
 345	return ret;
 346}
 347
 348static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 349				     const struct firmware *fw,
 350				     size_t *offset)
 351{
 352	struct elf32_hdr *ehdr;
 353	struct elf32_shdr *shdr, *shdr_strtab;
 354	int i;
 355	const u8 *elf_data = fw->data;
 356	const char *strtab;
 357
 358	ehdr = (struct elf32_hdr *)elf_data;
 359	shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
 360	shdr_strtab = shdr + ehdr->e_shstrndx;
 361	strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
 362
 363	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
 364		if (strcmp(strtab + shdr->sh_name,
 365			   SECTION_NAME_IPI_BUFFER) == 0) {
 366			*offset = shdr->sh_addr;
 367			return 0;
 368		}
 369	}
 370
 371	return -ENOENT;
 372}
 373
 374static int mt8183_scp_clk_get(struct mtk_scp *scp)
 375{
 376	struct device *dev = scp->dev;
 377	int ret = 0;
 378
 379	scp->clk = devm_clk_get(dev, "main");
 380	if (IS_ERR(scp->clk)) {
 381		dev_err(dev, "Failed to get clock\n");
 382		ret = PTR_ERR(scp->clk);
 383	}
 384
 385	return ret;
 386}
 387
 388static int mt8192_scp_clk_get(struct mtk_scp *scp)
 389{
 390	return mt8183_scp_clk_get(scp);
 391}
 392
 393static int mt8195_scp_clk_get(struct mtk_scp *scp)
 394{
 395	scp->clk = NULL;
 396
 397	return 0;
 398}
 399
 400static int mt8183_scp_before_load(struct mtk_scp *scp)
 401{
 402	/* Clear SCP to host interrupt */
 403	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 404
 405	/* Reset clocks before loading FW */
 406	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 407	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 408
 409	/* Initialize TCM before loading FW. */
 410	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 411	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 412
 413	/* Turn on the power of SCP's SRAM before using it. */
 414	writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
 415
 416	/*
 417	 * Set I-cache and D-cache size before loading SCP FW.
 418	 * SCP SRAM logical address may change when cache size setting differs.
 419	 */
 420	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 421	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 422	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 423
 424	return 0;
 425}
 426
 427static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
 428{
 429	int i;
 430
 431	for (i = 31; i >= 0; i--)
 432		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 433	writel(0, addr);
 434}
 435
 436static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 437{
 438	int i;
 439
 440	writel(0, addr);
 441	for (i = 0; i < 32; i++)
 442		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 443}
 444
 445static int mt8186_scp_before_load(struct mtk_scp *scp)
 446{
 447	/* Clear SCP to host interrupt */
 448	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 449
 450	/* Reset clocks before loading FW */
 451	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 452	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 453
 454	/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
 455	scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
 456
 457	/* Initialize TCM before loading FW. */
 458	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 459	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 460	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
 461	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
 462
 463	/*
 464	 * Set I-cache and D-cache size before loading SCP FW.
 465	 * SCP SRAM logical address may change when cache size setting differs.
 466	 */
 467	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 468	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 469	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 470
 471	return 0;
 472}
 473
 474static int mt8192_scp_before_load(struct mtk_scp *scp)
 475{
 476	/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 477	writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 478
 479	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 480
 481	/* enable SRAM clock */
 482	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 483	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 484	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 485	scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 486	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 487
 488	/* enable MPU for all memory regions */
 489	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 490
 491	return 0;
 492}
 493
 494static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
 495{
 496	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 497
 498	mutex_lock(&scp_cluster->cluster_lock);
 499
 500	if (scp_cluster->l2tcm_refcnt == 0) {
 501		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 502		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 503
 504		/* Power on L2TCM */
 505		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 506		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 507		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 508		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 509				  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 510	}
 511
 512	scp_cluster->l2tcm_refcnt += 1;
 513
 514	mutex_unlock(&scp_cluster->cluster_lock);
 515
 516	return 0;
 517}
 518
 519static int mt8195_scp_before_load(struct mtk_scp *scp)
 520{
 521	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 522
 523	mt8195_scp_l2tcm_on(scp);
 524
 525	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 526
 527	/* enable MPU for all memory regions */
 528	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 529
 530	return 0;
 531}
 532
 533static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
 534{
 535	u32 sec_ctrl;
 536	struct mtk_scp *scp_c0;
 537	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 538
 539	scp->data->scp_reset_assert(scp);
 540
 541	mt8195_scp_l2tcm_on(scp);
 542
 543	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 544
 545	/* enable MPU for all memory regions */
 546	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
 547
 548	/*
 549	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
 550	 * on SRAM when SCP core 1 accesses SRAM.
 551	 *
 552	 * This configuration solves booting the SCP core 0 and core 1 from
 553	 * different SRAM address because core 0 and core 1 both boot from
 554	 * the head of SRAM by default. this must be configured before boot SCP core 1.
 555	 *
 556	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
 557	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
 558	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
 559	 * The shift action is tranparent to software.
 560	 */
 561	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
 562	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
 563
 564	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
 565	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
 566
 567	/* enable SRAM offset when fetching instruction and data */
 568	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
 569	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
 570	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 571
 572	return 0;
 573}
 574
 575static int scp_load(struct rproc *rproc, const struct firmware *fw)
 576{
 577	struct mtk_scp *scp = rproc->priv;
 578	struct device *dev = scp->dev;
 579	int ret;
 580
 581	ret = clk_prepare_enable(scp->clk);
 582	if (ret) {
 583		dev_err(dev, "failed to enable clocks\n");
 584		return ret;
 585	}
 586
 587	/* Hold SCP in reset while loading FW. */
 588	scp->data->scp_reset_assert(scp);
 589
 590	ret = scp->data->scp_before_load(scp);
 591	if (ret < 0)
 592		goto leave;
 593
 594	ret = scp_elf_load_segments(rproc, fw);
 595leave:
 596	clk_disable_unprepare(scp->clk);
 597
 598	return ret;
 599}
 600
 601static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
 602{
 603	struct mtk_scp *scp = rproc->priv;
 604	struct device *dev = scp->dev;
 605	int ret;
 606
 607	ret = clk_prepare_enable(scp->clk);
 608	if (ret) {
 609		dev_err(dev, "failed to enable clocks\n");
 610		return ret;
 611	}
 612
 613	ret = scp_ipi_init(scp, fw);
 614	clk_disable_unprepare(scp->clk);
 615	return ret;
 616}
 617
 618static int scp_start(struct rproc *rproc)
 619{
 620	struct mtk_scp *scp = rproc->priv;
 621	struct device *dev = scp->dev;
 622	struct scp_run *run = &scp->run;
 623	int ret;
 624
 625	ret = clk_prepare_enable(scp->clk);
 626	if (ret) {
 627		dev_err(dev, "failed to enable clocks\n");
 628		return ret;
 629	}
 630
 631	run->signaled = false;
 632
 633	scp->data->scp_reset_deassert(scp);
 634
 635	ret = wait_event_interruptible_timeout(
 636					run->wq,
 637					run->signaled,
 638					msecs_to_jiffies(2000));
 639
 640	if (ret == 0) {
 641		dev_err(dev, "wait SCP initialization timeout!\n");
 642		ret = -ETIME;
 643		goto stop;
 644	}
 645	if (ret == -ERESTARTSYS) {
 646		dev_err(dev, "wait SCP interrupted by a signal!\n");
 647		goto stop;
 648	}
 649
 650	clk_disable_unprepare(scp->clk);
 651	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
 652
 653	return 0;
 654
 655stop:
 656	scp->data->scp_reset_assert(scp);
 657	clk_disable_unprepare(scp->clk);
 658	return ret;
 659}
 660
 661static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 662{
 663	int offset;
 664
 665	if (da < scp->sram_size) {
 666		offset = da;
 667		if (offset >= 0 && (offset + len) <= scp->sram_size)
 668			return (void __force *)scp->sram_base + offset;
 669	} else if (scp->dram_size) {
 670		offset = da - scp->dma_addr;
 671		if (offset >= 0 && (offset + len) <= scp->dram_size)
 672			return scp->cpu_addr + offset;
 673	}
 674
 675	return NULL;
 676}
 677
 678static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 679{
 680	int offset;
 681
 682	if (da >= scp->sram_phys &&
 683	    (da + len) <= scp->sram_phys + scp->sram_size) {
 684		offset = da - scp->sram_phys;
 685		return (void __force *)scp->sram_base + offset;
 686	}
 687
 688	/* optional memory region */
 689	if (scp->cluster->l1tcm_size &&
 690	    da >= scp->cluster->l1tcm_phys &&
 691	    (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
 692		offset = da - scp->cluster->l1tcm_phys;
 693		return (void __force *)scp->cluster->l1tcm_base + offset;
 694	}
 695
 696	/* optional memory region */
 697	if (scp->dram_size &&
 698	    da >= scp->dma_addr &&
 699	    (da + len) <= scp->dma_addr + scp->dram_size) {
 700		offset = da - scp->dma_addr;
 701		return scp->cpu_addr + offset;
 702	}
 703
 704	return NULL;
 705}
 706
 707static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
 708{
 709	struct mtk_scp *scp = rproc->priv;
 710
 711	return scp->data->scp_da_to_va(scp, da, len);
 712}
 713
 714static void mt8183_scp_stop(struct mtk_scp *scp)
 715{
 716	/* Disable SCP watchdog */
 717	writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
 718}
 719
 720static void mt8192_scp_stop(struct mtk_scp *scp)
 721{
 722	/* Disable SRAM clock */
 723	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 724	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 725	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 726	scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 727	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 728
 729	/* Disable SCP watchdog */
 730	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 731}
 732
 733static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
 734{
 735	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 736
 737	mutex_lock(&scp_cluster->cluster_lock);
 738
 739	if (scp_cluster->l2tcm_refcnt > 0)
 740		scp_cluster->l2tcm_refcnt -= 1;
 741
 742	if (scp_cluster->l2tcm_refcnt == 0) {
 743		/* Power off L2TCM */
 744		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 745		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 746		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 747		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 748				   MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 749	}
 750
 751	mutex_unlock(&scp_cluster->cluster_lock);
 752}
 753
 754static void mt8195_scp_stop(struct mtk_scp *scp)
 755{
 756	mt8195_scp_l2tcm_off(scp);
 757
 758	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 759
 760	/* Disable SCP watchdog */
 761	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 762}
 763
 764static void mt8195_scp_c1_stop(struct mtk_scp *scp)
 765{
 766	mt8195_scp_l2tcm_off(scp);
 767
 768	/* Power off CPU SRAM */
 769	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 770
 771	/* Disable SCP watchdog */
 772	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 773}
 774
 775static int scp_stop(struct rproc *rproc)
 776{
 777	struct mtk_scp *scp = rproc->priv;
 778	int ret;
 779
 780	ret = clk_prepare_enable(scp->clk);
 781	if (ret) {
 782		dev_err(scp->dev, "failed to enable clocks\n");
 783		return ret;
 784	}
 785
 786	scp->data->scp_reset_assert(scp);
 787	scp->data->scp_stop(scp);
 788	clk_disable_unprepare(scp->clk);
 789
 790	return 0;
 791}
 792
 793static const struct rproc_ops scp_ops = {
 794	.start		= scp_start,
 795	.stop		= scp_stop,
 796	.load		= scp_load,
 797	.da_to_va	= scp_da_to_va,
 798	.parse_fw	= scp_parse_fw,
 799	.sanity_check	= rproc_elf_sanity_check,
 800};
 801
 802/**
 803 * scp_get_device() - get device struct of SCP
 804 *
 805 * @scp:	mtk_scp structure
 806 **/
 807struct device *scp_get_device(struct mtk_scp *scp)
 808{
 809	return scp->dev;
 810}
 811EXPORT_SYMBOL_GPL(scp_get_device);
 812
 813/**
 814 * scp_get_rproc() - get rproc struct of SCP
 815 *
 816 * @scp:	mtk_scp structure
 817 **/
 818struct rproc *scp_get_rproc(struct mtk_scp *scp)
 819{
 820	return scp->rproc;
 821}
 822EXPORT_SYMBOL_GPL(scp_get_rproc);
 823
 824/**
 825 * scp_get_vdec_hw_capa() - get video decoder hardware capability
 826 *
 827 * @scp:	mtk_scp structure
 828 *
 829 * Return: video decoder hardware capability
 830 **/
 831unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
 832{
 833	return scp->run.dec_capability;
 834}
 835EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
 836
 837/**
 838 * scp_get_venc_hw_capa() - get video encoder hardware capability
 839 *
 840 * @scp:	mtk_scp structure
 841 *
 842 * Return: video encoder hardware capability
 843 **/
 844unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
 845{
 846	return scp->run.enc_capability;
 847}
 848EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
 849
 850/**
 851 * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
 852 *
 853 * @scp:	mtk_scp structure
 854 * @mem_addr:	SCP views memory address
 855 *
 856 * Mapping the SCP's SRAM address /
 857 * DMEM (Data Extended Memory) memory address /
 858 * Working buffer memory address to
 859 * kernel virtual address.
 860 *
 861 * Return: Return ERR_PTR(-EINVAL) if mapping failed,
 862 * otherwise the mapped kernel virtual address
 863 **/
 864void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
 865{
 866	void *ptr;
 867
 868	ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
 869	if (!ptr)
 870		return ERR_PTR(-EINVAL);
 871
 872	return ptr;
 873}
 874EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
 875
 876static int scp_map_memory_region(struct mtk_scp *scp)
 877{
 878	int ret;
 879
 880	ret = of_reserved_mem_device_init(scp->dev);
 881
 882	/* reserved memory is optional. */
 883	if (ret == -ENODEV) {
 884		dev_info(scp->dev, "skipping reserved memory initialization.");
 885		return 0;
 886	}
 887
 888	if (ret) {
 889		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
 890		return -ENOMEM;
 891	}
 892
 893	/* Reserved SCP code size */
 894	scp->dram_size = MAX_CODE_SIZE;
 895	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
 896					   &scp->dma_addr, GFP_KERNEL);
 897	if (!scp->cpu_addr)
 898		return -ENOMEM;
 899
 900	return 0;
 901}
 902
 903static void scp_unmap_memory_region(struct mtk_scp *scp)
 904{
 905	if (scp->dram_size == 0)
 906		return;
 907
 908	dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
 909			  scp->dma_addr);
 910	of_reserved_mem_device_release(scp->dev);
 911}
 912
 913static int scp_register_ipi(struct platform_device *pdev, u32 id,
 914			    ipi_handler_t handler, void *priv)
 915{
 916	struct mtk_scp *scp = platform_get_drvdata(pdev);
 917
 918	return scp_ipi_register(scp, id, handler, priv);
 919}
 920
 921static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
 922{
 923	struct mtk_scp *scp = platform_get_drvdata(pdev);
 924
 925	scp_ipi_unregister(scp, id);
 926}
 927
 928static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
 929			unsigned int len, unsigned int wait)
 930{
 931	struct mtk_scp *scp = platform_get_drvdata(pdev);
 932
 933	return scp_ipi_send(scp, id, buf, len, wait);
 934}
 935
 936static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
 937	.send_ipi = scp_send_ipi,
 938	.register_ipi = scp_register_ipi,
 939	.unregister_ipi = scp_unregister_ipi,
 940	.ns_ipi_id = SCP_IPI_NS_SERVICE,
 941};
 942
 943static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
 944{
 945	scp->rpmsg_subdev =
 946		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
 947					      &mtk_scp_rpmsg_info);
 948	if (scp->rpmsg_subdev)
 949		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
 950}
 951
 952static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
 953{
 954	if (scp->rpmsg_subdev) {
 955		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
 956		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
 957		scp->rpmsg_subdev = NULL;
 958	}
 959}
 960
 961static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
 962				      struct mtk_scp_of_cluster *scp_cluster,
 963				      const struct mtk_scp_of_data *of_data)
 964{
 965	struct device *dev = &pdev->dev;
 966	struct device_node *np = dev->of_node;
 967	struct mtk_scp *scp;
 968	struct rproc *rproc;
 969	struct resource *res;
 970	const char *fw_name = "scp.img";
 971	int ret, i;
 972
 973	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
 974	if (ret < 0 && ret != -EINVAL)
 975		return ERR_PTR(ret);
 976
 977	rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
 978	if (!rproc) {
 979		dev_err(dev, "unable to allocate remoteproc\n");
 980		return ERR_PTR(-ENOMEM);
 981	}
 982
 983	scp = rproc->priv;
 984	scp->rproc = rproc;
 985	scp->dev = dev;
 986	scp->data = of_data;
 987	scp->cluster = scp_cluster;
 988	platform_set_drvdata(pdev, scp);
 989
 990	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
 991	scp->sram_base = devm_ioremap_resource(dev, res);
 992	if (IS_ERR(scp->sram_base)) {
 993		dev_err(dev, "Failed to parse and map sram memory\n");
 994		return ERR_CAST(scp->sram_base);
 995	}
 996
 997	scp->sram_size = resource_size(res);
 998	scp->sram_phys = res->start;
 999
1000	ret = scp->data->scp_clk_get(scp);
1001	if (ret)
1002		return ERR_PTR(ret);
1003
1004	ret = scp_map_memory_region(scp);
1005	if (ret)
1006		return ERR_PTR(ret);
1007
1008	mutex_init(&scp->send_lock);
1009	for (i = 0; i < SCP_IPI_MAX; i++)
1010		mutex_init(&scp->ipi_desc[i].lock);
1011
1012	/* register SCP initialization IPI */
1013	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
1014	if (ret) {
1015		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
1016		goto release_dev_mem;
1017	}
1018
1019	init_waitqueue_head(&scp->run.wq);
1020	init_waitqueue_head(&scp->ack_wq);
1021
1022	scp_add_rpmsg_subdev(scp);
1023
1024	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
1025					scp_irq_handler, IRQF_ONESHOT,
1026					pdev->name, scp);
1027
1028	if (ret) {
1029		dev_err(dev, "failed to request irq\n");
1030		goto remove_subdev;
1031	}
1032
1033	return scp;
1034
1035remove_subdev:
1036	scp_remove_rpmsg_subdev(scp);
1037	scp_ipi_unregister(scp, SCP_IPI_INIT);
1038release_dev_mem:
1039	scp_unmap_memory_region(scp);
1040	for (i = 0; i < SCP_IPI_MAX; i++)
1041		mutex_destroy(&scp->ipi_desc[i].lock);
1042	mutex_destroy(&scp->send_lock);
1043
1044	return ERR_PTR(ret);
1045}
1046
1047static void scp_free(struct mtk_scp *scp)
1048{
1049	int i;
1050
1051	scp_remove_rpmsg_subdev(scp);
1052	scp_ipi_unregister(scp, SCP_IPI_INIT);
1053	scp_unmap_memory_region(scp);
1054	for (i = 0; i < SCP_IPI_MAX; i++)
1055		mutex_destroy(&scp->ipi_desc[i].lock);
1056	mutex_destroy(&scp->send_lock);
1057}
1058
1059static int scp_add_single_core(struct platform_device *pdev,
1060			       struct mtk_scp_of_cluster *scp_cluster)
1061{
1062	struct device *dev = &pdev->dev;
1063	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1064	struct mtk_scp *scp;
1065	int ret;
1066
1067	scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
1068	if (IS_ERR(scp))
1069		return PTR_ERR(scp);
1070
1071	ret = rproc_add(scp->rproc);
1072	if (ret) {
1073		dev_err(dev, "Failed to add rproc\n");
1074		scp_free(scp);
1075		return ret;
1076	}
1077
1078	list_add_tail(&scp->elem, scp_list);
1079
1080	return 0;
1081}
1082
1083static int scp_add_multi_core(struct platform_device *pdev,
1084			      struct mtk_scp_of_cluster *scp_cluster)
1085{
1086	struct device *dev = &pdev->dev;
1087	struct device_node *np = dev_of_node(dev);
1088	struct platform_device *cpdev;
1089	struct device_node *child;
1090	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1091	const struct mtk_scp_of_data **cluster_of_data;
1092	struct mtk_scp *scp, *temp;
1093	int core_id = 0;
1094	int ret;
1095
1096	cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
1097
1098	for_each_available_child_of_node(np, child) {
1099		if (!cluster_of_data[core_id]) {
1100			ret = -EINVAL;
1101			dev_err(dev, "Not support core %d\n", core_id);
1102			of_node_put(child);
1103			goto init_fail;
1104		}
1105
1106		cpdev = of_find_device_by_node(child);
1107		if (!cpdev) {
1108			ret = -ENODEV;
1109			dev_err(dev, "Not found platform device for core %d\n", core_id);
1110			of_node_put(child);
1111			goto init_fail;
1112		}
1113
1114		scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
1115		put_device(&cpdev->dev);
1116		if (IS_ERR(scp)) {
1117			ret = PTR_ERR(scp);
1118			dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
1119			of_node_put(child);
1120			goto init_fail;
1121		}
1122
1123		ret = rproc_add(scp->rproc);
1124		if (ret) {
1125			dev_err(dev, "Failed to add rproc of core %d\n", core_id);
1126			of_node_put(child);
1127			scp_free(scp);
1128			goto init_fail;
1129		}
1130
1131		list_add_tail(&scp->elem, scp_list);
1132		core_id++;
1133	}
1134
1135	/*
1136	 * Here we are setting the platform device for @pdev to the last @scp that was
1137	 * created, which is needed because (1) scp_rproc_init() is calling
1138	 * platform_set_drvdata() on the child platform devices and (2) we need a handle to
1139	 * the cluster list in scp_remove().
1140	 */
1141	platform_set_drvdata(pdev, scp);
1142
1143	return 0;
1144
1145init_fail:
1146	list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
1147		list_del(&scp->elem);
1148		rproc_del(scp->rproc);
1149		scp_free(scp);
1150	}
1151
1152	return ret;
1153}
1154
1155static bool scp_is_single_core(struct platform_device *pdev)
1156{
1157	struct device *dev = &pdev->dev;
1158	struct device_node *np = dev_of_node(dev);
1159	struct device_node *child;
1160	int num_cores = 0;
1161
1162	for_each_child_of_node(np, child)
1163		if (of_device_is_compatible(child, "mediatek,scp-core"))
1164			num_cores++;
1165
1166	return num_cores < 2;
1167}
1168
1169static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
1170{
1171	int ret;
1172
1173	if (scp_is_single_core(pdev))
1174		ret = scp_add_single_core(pdev, scp_cluster);
1175	else
1176		ret = scp_add_multi_core(pdev, scp_cluster);
1177
1178	return ret;
1179}
1180
1181static int scp_probe(struct platform_device *pdev)
1182{
1183	struct device *dev = &pdev->dev;
1184	struct mtk_scp_of_cluster *scp_cluster;
1185	struct resource *res;
1186	int ret;
1187
1188	scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
1189	if (!scp_cluster)
1190		return -ENOMEM;
1191
1192	scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
1193	if (IS_ERR(scp_cluster->reg_base))
1194		return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
1195				     "Failed to parse and map cfg memory\n");
1196
1197	/* l1tcm is an optional memory region */
1198	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
1199	scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
1200	if (IS_ERR(scp_cluster->l1tcm_base)) {
1201		ret = PTR_ERR(scp_cluster->l1tcm_base);
1202		if (ret != -EINVAL)
1203			return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
1204
1205		scp_cluster->l1tcm_base = NULL;
1206	} else {
1207		scp_cluster->l1tcm_size = resource_size(res);
1208		scp_cluster->l1tcm_phys = res->start;
1209	}
1210
1211	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
1212	mutex_init(&scp_cluster->cluster_lock);
1213
1214	ret = devm_of_platform_populate(dev);
1215	if (ret)
1216		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
1217
1218	ret = scp_cluster_init(pdev, scp_cluster);
1219	if (ret)
1220		return ret;
1221
1222	return 0;
1223}
1224
1225static void scp_remove(struct platform_device *pdev)
1226{
1227	struct mtk_scp *scp = platform_get_drvdata(pdev);
1228	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
1229	struct mtk_scp *temp;
1230
1231	list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
1232		list_del(&scp->elem);
1233		rproc_del(scp->rproc);
1234		scp_free(scp);
1235	}
1236	mutex_destroy(&scp_cluster->cluster_lock);
1237}
1238
1239static const struct mtk_scp_of_data mt8183_of_data = {
1240	.scp_clk_get = mt8183_scp_clk_get,
1241	.scp_before_load = mt8183_scp_before_load,
1242	.scp_irq_handler = mt8183_scp_irq_handler,
1243	.scp_reset_assert = mt8183_scp_reset_assert,
1244	.scp_reset_deassert = mt8183_scp_reset_deassert,
1245	.scp_stop = mt8183_scp_stop,
1246	.scp_da_to_va = mt8183_scp_da_to_va,
1247	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1248	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1249	.ipi_buf_offset = 0x7bdb0,
1250};
1251
1252static const struct mtk_scp_of_data mt8186_of_data = {
1253	.scp_clk_get = mt8195_scp_clk_get,
1254	.scp_before_load = mt8186_scp_before_load,
1255	.scp_irq_handler = mt8183_scp_irq_handler,
1256	.scp_reset_assert = mt8183_scp_reset_assert,
1257	.scp_reset_deassert = mt8183_scp_reset_deassert,
1258	.scp_stop = mt8183_scp_stop,
1259	.scp_da_to_va = mt8183_scp_da_to_va,
1260	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1261	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1262	.ipi_buf_offset = 0x3bdb0,
1263};
1264
1265static const struct mtk_scp_of_data mt8188_of_data = {
1266	.scp_clk_get = mt8195_scp_clk_get,
1267	.scp_before_load = mt8192_scp_before_load,
1268	.scp_irq_handler = mt8192_scp_irq_handler,
1269	.scp_reset_assert = mt8192_scp_reset_assert,
1270	.scp_reset_deassert = mt8192_scp_reset_deassert,
1271	.scp_stop = mt8192_scp_stop,
1272	.scp_da_to_va = mt8192_scp_da_to_va,
1273	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1274	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1275};
1276
1277static const struct mtk_scp_of_data mt8192_of_data = {
1278	.scp_clk_get = mt8192_scp_clk_get,
1279	.scp_before_load = mt8192_scp_before_load,
1280	.scp_irq_handler = mt8192_scp_irq_handler,
1281	.scp_reset_assert = mt8192_scp_reset_assert,
1282	.scp_reset_deassert = mt8192_scp_reset_deassert,
1283	.scp_stop = mt8192_scp_stop,
1284	.scp_da_to_va = mt8192_scp_da_to_va,
1285	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1286	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1287};
1288
1289static const struct mtk_scp_of_data mt8195_of_data = {
1290	.scp_clk_get = mt8195_scp_clk_get,
1291	.scp_before_load = mt8195_scp_before_load,
1292	.scp_irq_handler = mt8195_scp_irq_handler,
1293	.scp_reset_assert = mt8192_scp_reset_assert,
1294	.scp_reset_deassert = mt8192_scp_reset_deassert,
1295	.scp_stop = mt8195_scp_stop,
1296	.scp_da_to_va = mt8192_scp_da_to_va,
1297	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1298	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1299};
1300
1301static const struct mtk_scp_of_data mt8195_of_data_c1 = {
1302	.scp_clk_get = mt8195_scp_clk_get,
1303	.scp_before_load = mt8195_scp_c1_before_load,
1304	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1305	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1306	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1307	.scp_stop = mt8195_scp_c1_stop,
1308	.scp_da_to_va = mt8192_scp_da_to_va,
1309	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1310	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1311};
1312
1313static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
1314	&mt8195_of_data,
1315	&mt8195_of_data_c1,
1316	NULL
1317};
1318
1319static const struct of_device_id mtk_scp_of_match[] = {
1320	{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
1321	{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
1322	{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
1323	{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
1324	{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
1325	{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
1326	{},
1327};
1328MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
1329
1330static struct platform_driver mtk_scp_driver = {
1331	.probe = scp_probe,
1332	.remove_new = scp_remove,
1333	.driver = {
1334		.name = "mtk-scp",
1335		.of_match_table = mtk_scp_of_match,
1336	},
1337};
1338
1339module_platform_driver(mtk_scp_driver);
1340
1341MODULE_LICENSE("GPL v2");
1342MODULE_DESCRIPTION("MediaTek SCP control driver");