Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2019 MediaTek Inc.
   4
   5#include <asm/barrier.h>
   6#include <linux/clk.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/interrupt.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/of_address.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/platform_device.h>
  16#include <linux/remoteproc.h>
  17#include <linux/remoteproc/mtk_scp.h>
  18#include <linux/rpmsg/mtk_rpmsg.h>
  19
  20#include "mtk_common.h"
  21#include "remoteproc_internal.h"
  22
  23#define MAX_CODE_SIZE 0x500000
  24#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
  25
  26/**
  27 * scp_get() - get a reference to SCP.
  28 *
  29 * @pdev:	the platform device of the module requesting SCP platform
  30 *		device for using SCP API.
  31 *
  32 * Return: Return NULL if failed.  otherwise reference to SCP.
  33 **/
  34struct mtk_scp *scp_get(struct platform_device *pdev)
  35{
  36	struct device *dev = &pdev->dev;
  37	struct device_node *scp_node;
  38	struct platform_device *scp_pdev;
  39
  40	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
  41	if (!scp_node) {
  42		dev_err(dev, "can't get SCP node\n");
  43		return NULL;
  44	}
  45
  46	scp_pdev = of_find_device_by_node(scp_node);
  47	of_node_put(scp_node);
  48
  49	if (WARN_ON(!scp_pdev)) {
  50		dev_err(dev, "SCP pdev failed\n");
  51		return NULL;
  52	}
  53
  54	return platform_get_drvdata(scp_pdev);
  55}
  56EXPORT_SYMBOL_GPL(scp_get);
  57
  58/**
  59 * scp_put() - "free" the SCP
  60 *
  61 * @scp:	mtk_scp structure from scp_get().
  62 **/
  63void scp_put(struct mtk_scp *scp)
  64{
  65	put_device(scp->dev);
  66}
  67EXPORT_SYMBOL_GPL(scp_put);
  68
  69static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
  70{
  71	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
  72	struct mtk_scp *scp_node;
  73
  74	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
  75
  76	/* report watchdog timeout to all cores */
  77	list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
  78		rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
  79}
  80
  81static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
  82{
  83	struct mtk_scp *scp = priv;
  84	struct scp_run *run = data;
  85
  86	scp->run.signaled = run->signaled;
  87	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
  88	scp->run.dec_capability = run->dec_capability;
  89	scp->run.enc_capability = run->enc_capability;
  90	wake_up_interruptible(&scp->run.wq);
  91}
  92
  93static void scp_ipi_handler(struct mtk_scp *scp)
  94{
  95	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
  96	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
  97	u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
  98	scp_ipi_handler_t handler;
  99	u32 id = readl(&rcv_obj->id);
 100	u32 len = readl(&rcv_obj->len);
 
 101
 102	if (len > SCP_SHARE_BUFFER_SIZE) {
 103		dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
 104			SCP_SHARE_BUFFER_SIZE);
 
 105		return;
 106	}
 107	if (id >= SCP_IPI_MAX) {
 108		dev_err(scp->dev, "No such ipi id = %d\n", id);
 109		return;
 110	}
 111
 112	scp_ipi_lock(scp, id);
 113	handler = ipi_desc[id].handler;
 114	if (!handler) {
 115		dev_err(scp->dev, "No handler for ipi id = %d\n", id);
 116		scp_ipi_unlock(scp, id);
 117		return;
 118	}
 119
 120	memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
 121	handler(tmp_data, len, ipi_desc[id].priv);
 
 122	scp_ipi_unlock(scp, id);
 123
 124	scp->ipi_id_ack[id] = true;
 125	wake_up(&scp->ack_wq);
 126}
 127
 128static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 129				     const struct firmware *fw,
 130				     size_t *offset);
 131
 132static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
 133{
 134	int ret;
 135	size_t offset;
 
 
 136
 137	/* read the ipi buf addr from FW itself first */
 138	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
 139	if (ret) {
 140		/* use default ipi buf addr if the FW doesn't have it */
 141		offset = scp->data->ipi_buf_offset;
 142		if (!offset)
 143			return ret;
 144	}
 145	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
 146
 
 
 
 
 
 
 
 
 
 147	scp->recv_buf = (struct mtk_share_obj __iomem *)
 148			(scp->sram_base + offset);
 
 
 149	scp->send_buf = (struct mtk_share_obj __iomem *)
 150			(scp->sram_base + offset + sizeof(*scp->recv_buf));
 151	memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
 152	memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
 153
 154	return 0;
 155}
 156
 157static void mt8183_scp_reset_assert(struct mtk_scp *scp)
 158{
 159	u32 val;
 160
 161	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 162	val &= ~MT8183_SW_RSTN_BIT;
 163	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 164}
 165
 166static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
 167{
 168	u32 val;
 169
 170	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 171	val |= MT8183_SW_RSTN_BIT;
 172	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 173}
 174
 175static void mt8192_scp_reset_assert(struct mtk_scp *scp)
 176{
 177	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 178}
 179
 180static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
 181{
 182	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
 183}
 184
 185static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
 186{
 187	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
 188}
 189
 190static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
 191{
 192	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
 193}
 194
 195static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 196{
 197	u32 scp_to_host;
 198
 199	scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 200	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
 201		scp_ipi_handler(scp);
 202	else
 203		scp_wdt_handler(scp, scp_to_host);
 204
 205	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
 206	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
 207	       scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 208}
 209
 210static void mt8192_scp_irq_handler(struct mtk_scp *scp)
 211{
 212	u32 scp_to_host;
 213
 214	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 215
 216	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 217		scp_ipi_handler(scp);
 218
 219		/*
 220		 * SCP won't send another interrupt until we clear
 221		 * MT8192_SCP2APMCU_IPC.
 222		 */
 223		writel(MT8192_SCP_IPC_INT_BIT,
 224		       scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 225	} else {
 226		scp_wdt_handler(scp, scp_to_host);
 227		writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 228	}
 229}
 230
 231static void mt8195_scp_irq_handler(struct mtk_scp *scp)
 232{
 233	u32 scp_to_host;
 234
 235	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 236
 237	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 238		scp_ipi_handler(scp);
 239	} else {
 240		u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
 241
 242		if (reason & MT8195_CORE0_WDT)
 243			writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 244
 245		if (reason & MT8195_CORE1_WDT)
 246			writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
 247
 248		scp_wdt_handler(scp, reason);
 249	}
 250
 251	writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 252}
 253
 254static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
 255{
 256	u32 scp_to_host;
 257
 258	scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
 259
 260	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
 261		scp_ipi_handler(scp);
 262
 263	writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
 264}
 265
 266static irqreturn_t scp_irq_handler(int irq, void *priv)
 267{
 268	struct mtk_scp *scp = priv;
 269	int ret;
 270
 271	ret = clk_prepare_enable(scp->clk);
 272	if (ret) {
 273		dev_err(scp->dev, "failed to enable clocks\n");
 274		return IRQ_NONE;
 275	}
 276
 277	scp->data->scp_irq_handler(scp);
 278
 279	clk_disable_unprepare(scp->clk);
 280
 281	return IRQ_HANDLED;
 282}
 283
 284static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
 285{
 286	struct device *dev = &rproc->dev;
 287	struct elf32_hdr *ehdr;
 288	struct elf32_phdr *phdr;
 289	int i, ret = 0;
 290	const u8 *elf_data = fw->data;
 291
 292	ehdr = (struct elf32_hdr *)elf_data;
 293	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
 294
 295	/* go through the available ELF segments */
 296	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
 297		u32 da = phdr->p_paddr;
 298		u32 memsz = phdr->p_memsz;
 299		u32 filesz = phdr->p_filesz;
 300		u32 offset = phdr->p_offset;
 301		void __iomem *ptr;
 302
 303		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
 304			phdr->p_type, da, memsz, filesz);
 305
 306		if (phdr->p_type != PT_LOAD)
 307			continue;
 308		if (!filesz)
 309			continue;
 310
 311		if (filesz > memsz) {
 312			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
 313				filesz, memsz);
 314			ret = -EINVAL;
 315			break;
 316		}
 317
 318		if (offset + filesz > fw->size) {
 319			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
 320				offset + filesz, fw->size);
 321			ret = -EINVAL;
 322			break;
 323		}
 324
 325		/* grab the kernel address for this device address */
 326		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
 327		if (!ptr) {
 328			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
 329			ret = -EINVAL;
 330			break;
 331		}
 332
 333		/* put the segment where the remote processor expects it */
 334		scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
 335	}
 336
 337	return ret;
 338}
 339
 340static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 341				     const struct firmware *fw,
 342				     size_t *offset)
 343{
 344	struct elf32_hdr *ehdr;
 345	struct elf32_shdr *shdr, *shdr_strtab;
 346	int i;
 347	const u8 *elf_data = fw->data;
 348	const char *strtab;
 349
 350	ehdr = (struct elf32_hdr *)elf_data;
 351	shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
 352	shdr_strtab = shdr + ehdr->e_shstrndx;
 353	strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
 354
 355	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
 356		if (strcmp(strtab + shdr->sh_name,
 357			   SECTION_NAME_IPI_BUFFER) == 0) {
 358			*offset = shdr->sh_addr;
 359			return 0;
 360		}
 361	}
 362
 363	return -ENOENT;
 364}
 365
 366static int mt8183_scp_clk_get(struct mtk_scp *scp)
 367{
 368	struct device *dev = scp->dev;
 369	int ret = 0;
 370
 371	scp->clk = devm_clk_get(dev, "main");
 372	if (IS_ERR(scp->clk)) {
 373		dev_err(dev, "Failed to get clock\n");
 374		ret = PTR_ERR(scp->clk);
 375	}
 376
 377	return ret;
 378}
 379
 380static int mt8192_scp_clk_get(struct mtk_scp *scp)
 381{
 382	return mt8183_scp_clk_get(scp);
 383}
 384
 385static int mt8195_scp_clk_get(struct mtk_scp *scp)
 386{
 387	scp->clk = NULL;
 388
 389	return 0;
 390}
 391
 392static int mt8183_scp_before_load(struct mtk_scp *scp)
 393{
 394	/* Clear SCP to host interrupt */
 395	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 396
 397	/* Reset clocks before loading FW */
 398	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 399	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 400
 401	/* Initialize TCM before loading FW. */
 402	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 403	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 404
 405	/* Turn on the power of SCP's SRAM before using it. */
 406	writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
 407
 408	/*
 409	 * Set I-cache and D-cache size before loading SCP FW.
 410	 * SCP SRAM logical address may change when cache size setting differs.
 411	 */
 412	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 413	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 414	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 415
 416	return 0;
 417}
 418
 419static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
 420{
 421	int i;
 422
 423	for (i = 31; i >= 0; i--)
 424		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 425	writel(0, addr);
 426}
 427
 428static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 429{
 430	int i;
 431
 432	writel(0, addr);
 433	for (i = 0; i < 32; i++)
 434		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 435}
 436
 437static int mt8186_scp_before_load(struct mtk_scp *scp)
 438{
 439	/* Clear SCP to host interrupt */
 440	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 441
 442	/* Reset clocks before loading FW */
 443	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 444	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 445
 446	/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
 447	scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
 448
 449	/* Initialize TCM before loading FW. */
 450	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 451	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 452	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
 453	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
 454
 455	/*
 456	 * Set I-cache and D-cache size before loading SCP FW.
 457	 * SCP SRAM logical address may change when cache size setting differs.
 458	 */
 459	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 460	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 461	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 462
 463	return 0;
 464}
 465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 466static int mt8192_scp_before_load(struct mtk_scp *scp)
 467{
 468	/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 469	writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 470
 471	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 472
 473	/* enable SRAM clock */
 474	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 475	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 476	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 477	scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 478	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 479
 480	/* enable MPU for all memory regions */
 481	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 482
 483	return 0;
 484}
 485
 486static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
 487{
 488	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 489
 490	mutex_lock(&scp_cluster->cluster_lock);
 491
 492	if (scp_cluster->l2tcm_refcnt == 0) {
 493		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 494		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 495
 496		/* Power on L2TCM */
 497		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 498		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 499		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 500		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 501				  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 502	}
 503
 504	scp_cluster->l2tcm_refcnt += 1;
 505
 506	mutex_unlock(&scp_cluster->cluster_lock);
 507
 508	return 0;
 509}
 510
 511static int mt8195_scp_before_load(struct mtk_scp *scp)
 512{
 513	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 514
 515	mt8195_scp_l2tcm_on(scp);
 516
 517	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 518
 519	/* enable MPU for all memory regions */
 520	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 521
 522	return 0;
 523}
 524
 525static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
 526{
 527	u32 sec_ctrl;
 528	struct mtk_scp *scp_c0;
 529	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 530
 531	scp->data->scp_reset_assert(scp);
 532
 533	mt8195_scp_l2tcm_on(scp);
 534
 535	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 536
 537	/* enable MPU for all memory regions */
 538	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
 539
 540	/*
 541	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
 542	 * on SRAM when SCP core 1 accesses SRAM.
 543	 *
 544	 * This configuration solves booting the SCP core 0 and core 1 from
 545	 * different SRAM address because core 0 and core 1 both boot from
 546	 * the head of SRAM by default. this must be configured before boot SCP core 1.
 547	 *
 548	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
 549	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
 550	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
 551	 * The shift action is tranparent to software.
 552	 */
 553	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
 554	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
 555
 556	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
 557	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
 558
 559	/* enable SRAM offset when fetching instruction and data */
 560	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
 561	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
 562	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 563
 564	return 0;
 565}
 566
 567static int scp_load(struct rproc *rproc, const struct firmware *fw)
 568{
 569	struct mtk_scp *scp = rproc->priv;
 570	struct device *dev = scp->dev;
 571	int ret;
 572
 573	ret = clk_prepare_enable(scp->clk);
 574	if (ret) {
 575		dev_err(dev, "failed to enable clocks\n");
 576		return ret;
 577	}
 578
 579	/* Hold SCP in reset while loading FW. */
 580	scp->data->scp_reset_assert(scp);
 581
 582	ret = scp->data->scp_before_load(scp);
 583	if (ret < 0)
 584		goto leave;
 585
 586	ret = scp_elf_load_segments(rproc, fw);
 587leave:
 588	clk_disable_unprepare(scp->clk);
 589
 590	return ret;
 591}
 592
 593static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
 594{
 595	struct mtk_scp *scp = rproc->priv;
 596	struct device *dev = scp->dev;
 597	int ret;
 598
 599	ret = clk_prepare_enable(scp->clk);
 600	if (ret) {
 601		dev_err(dev, "failed to enable clocks\n");
 602		return ret;
 603	}
 604
 605	ret = scp_ipi_init(scp, fw);
 606	clk_disable_unprepare(scp->clk);
 607	return ret;
 608}
 609
 610static int scp_start(struct rproc *rproc)
 611{
 612	struct mtk_scp *scp = rproc->priv;
 613	struct device *dev = scp->dev;
 614	struct scp_run *run = &scp->run;
 615	int ret;
 616
 617	ret = clk_prepare_enable(scp->clk);
 618	if (ret) {
 619		dev_err(dev, "failed to enable clocks\n");
 620		return ret;
 621	}
 622
 623	run->signaled = false;
 624
 625	scp->data->scp_reset_deassert(scp);
 626
 627	ret = wait_event_interruptible_timeout(
 628					run->wq,
 629					run->signaled,
 630					msecs_to_jiffies(2000));
 631
 632	if (ret == 0) {
 633		dev_err(dev, "wait SCP initialization timeout!\n");
 634		ret = -ETIME;
 635		goto stop;
 636	}
 637	if (ret == -ERESTARTSYS) {
 638		dev_err(dev, "wait SCP interrupted by a signal!\n");
 639		goto stop;
 640	}
 641
 642	clk_disable_unprepare(scp->clk);
 643	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
 644
 645	return 0;
 646
 647stop:
 648	scp->data->scp_reset_assert(scp);
 649	clk_disable_unprepare(scp->clk);
 650	return ret;
 651}
 652
 653static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 654{
 655	int offset;
 
 656
 
 657	if (da < scp->sram_size) {
 658		offset = da;
 659		if (offset >= 0 && (offset + len) <= scp->sram_size)
 660			return (void __force *)scp->sram_base + offset;
 661	} else if (scp->dram_size) {
 662		offset = da - scp->dma_addr;
 663		if (offset >= 0 && (offset + len) <= scp->dram_size)
 664			return scp->cpu_addr + offset;
 665	}
 666
 667	return NULL;
 668}
 669
 670static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 671{
 672	int offset;
 
 673
 
 674	if (da >= scp->sram_phys &&
 675	    (da + len) <= scp->sram_phys + scp->sram_size) {
 676		offset = da - scp->sram_phys;
 677		return (void __force *)scp->sram_base + offset;
 678	}
 679
 680	/* optional memory region */
 681	if (scp->cluster->l1tcm_size &&
 682	    da >= scp->cluster->l1tcm_phys &&
 683	    (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
 684		offset = da - scp->cluster->l1tcm_phys;
 685		return (void __force *)scp->cluster->l1tcm_base + offset;
 686	}
 687
 688	/* optional memory region */
 689	if (scp->dram_size &&
 690	    da >= scp->dma_addr &&
 691	    (da + len) <= scp->dma_addr + scp->dram_size) {
 692		offset = da - scp->dma_addr;
 693		return scp->cpu_addr + offset;
 694	}
 695
 696	return NULL;
 697}
 698
 699static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
 700{
 701	struct mtk_scp *scp = rproc->priv;
 702
 703	return scp->data->scp_da_to_va(scp, da, len);
 704}
 705
 706static void mt8183_scp_stop(struct mtk_scp *scp)
 707{
 708	/* Disable SCP watchdog */
 709	writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
 710}
 711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712static void mt8192_scp_stop(struct mtk_scp *scp)
 713{
 714	/* Disable SRAM clock */
 715	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 716	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 717	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 718	scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 719	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 720
 721	/* Disable SCP watchdog */
 722	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 723}
 724
 725static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
 726{
 727	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 728
 729	mutex_lock(&scp_cluster->cluster_lock);
 730
 731	if (scp_cluster->l2tcm_refcnt > 0)
 732		scp_cluster->l2tcm_refcnt -= 1;
 733
 734	if (scp_cluster->l2tcm_refcnt == 0) {
 735		/* Power off L2TCM */
 736		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 737		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 738		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 739		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 740				   MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 741	}
 742
 743	mutex_unlock(&scp_cluster->cluster_lock);
 744}
 745
 746static void mt8195_scp_stop(struct mtk_scp *scp)
 747{
 748	mt8195_scp_l2tcm_off(scp);
 749
 750	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 751
 752	/* Disable SCP watchdog */
 753	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 754}
 755
 756static void mt8195_scp_c1_stop(struct mtk_scp *scp)
 757{
 758	mt8195_scp_l2tcm_off(scp);
 759
 760	/* Power off CPU SRAM */
 761	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 762
 763	/* Disable SCP watchdog */
 764	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 765}
 766
 767static int scp_stop(struct rproc *rproc)
 768{
 769	struct mtk_scp *scp = rproc->priv;
 770	int ret;
 771
 772	ret = clk_prepare_enable(scp->clk);
 773	if (ret) {
 774		dev_err(scp->dev, "failed to enable clocks\n");
 775		return ret;
 776	}
 777
 778	scp->data->scp_reset_assert(scp);
 779	scp->data->scp_stop(scp);
 780	clk_disable_unprepare(scp->clk);
 781
 782	return 0;
 783}
 784
 785static const struct rproc_ops scp_ops = {
 786	.start		= scp_start,
 787	.stop		= scp_stop,
 788	.load		= scp_load,
 789	.da_to_va	= scp_da_to_va,
 790	.parse_fw	= scp_parse_fw,
 791	.sanity_check	= rproc_elf_sanity_check,
 792};
 793
 794/**
 795 * scp_get_device() - get device struct of SCP
 796 *
 797 * @scp:	mtk_scp structure
 798 **/
 799struct device *scp_get_device(struct mtk_scp *scp)
 800{
 801	return scp->dev;
 802}
 803EXPORT_SYMBOL_GPL(scp_get_device);
 804
 805/**
 806 * scp_get_rproc() - get rproc struct of SCP
 807 *
 808 * @scp:	mtk_scp structure
 809 **/
 810struct rproc *scp_get_rproc(struct mtk_scp *scp)
 811{
 812	return scp->rproc;
 813}
 814EXPORT_SYMBOL_GPL(scp_get_rproc);
 815
 816/**
 817 * scp_get_vdec_hw_capa() - get video decoder hardware capability
 818 *
 819 * @scp:	mtk_scp structure
 820 *
 821 * Return: video decoder hardware capability
 822 **/
 823unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
 824{
 825	return scp->run.dec_capability;
 826}
 827EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
 828
 829/**
 830 * scp_get_venc_hw_capa() - get video encoder hardware capability
 831 *
 832 * @scp:	mtk_scp structure
 833 *
 834 * Return: video encoder hardware capability
 835 **/
 836unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
 837{
 838	return scp->run.enc_capability;
 839}
 840EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
 841
 842/**
 843 * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
 844 *
 845 * @scp:	mtk_scp structure
 846 * @mem_addr:	SCP views memory address
 847 *
 848 * Mapping the SCP's SRAM address /
 849 * DMEM (Data Extended Memory) memory address /
 850 * Working buffer memory address to
 851 * kernel virtual address.
 852 *
 853 * Return: Return ERR_PTR(-EINVAL) if mapping failed,
 854 * otherwise the mapped kernel virtual address
 855 **/
 856void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
 857{
 858	void *ptr;
 859
 860	ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
 861	if (!ptr)
 862		return ERR_PTR(-EINVAL);
 863
 864	return ptr;
 865}
 866EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
 867
 868static int scp_map_memory_region(struct mtk_scp *scp)
 869{
 870	int ret;
 
 871
 872	ret = of_reserved_mem_device_init(scp->dev);
 873
 874	/* reserved memory is optional. */
 875	if (ret == -ENODEV) {
 876		dev_info(scp->dev, "skipping reserved memory initialization.");
 877		return 0;
 878	}
 879
 880	if (ret) {
 881		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
 882		return -ENOMEM;
 883	}
 884
 885	/* Reserved SCP code size */
 886	scp->dram_size = MAX_CODE_SIZE;
 887	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
 888					   &scp->dma_addr, GFP_KERNEL);
 889	if (!scp->cpu_addr)
 890		return -ENOMEM;
 891
 892	return 0;
 893}
 894
 895static void scp_unmap_memory_region(struct mtk_scp *scp)
 896{
 897	if (scp->dram_size == 0)
 
 
 
 898		return;
 899
 900	dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
 901			  scp->dma_addr);
 902	of_reserved_mem_device_release(scp->dev);
 903}
 904
 905static int scp_register_ipi(struct platform_device *pdev, u32 id,
 906			    ipi_handler_t handler, void *priv)
 907{
 908	struct mtk_scp *scp = platform_get_drvdata(pdev);
 909
 910	return scp_ipi_register(scp, id, handler, priv);
 911}
 912
 913static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
 914{
 915	struct mtk_scp *scp = platform_get_drvdata(pdev);
 916
 917	scp_ipi_unregister(scp, id);
 918}
 919
 920static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
 921			unsigned int len, unsigned int wait)
 922{
 923	struct mtk_scp *scp = platform_get_drvdata(pdev);
 924
 925	return scp_ipi_send(scp, id, buf, len, wait);
 926}
 927
 928static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
 929	.send_ipi = scp_send_ipi,
 930	.register_ipi = scp_register_ipi,
 931	.unregister_ipi = scp_unregister_ipi,
 932	.ns_ipi_id = SCP_IPI_NS_SERVICE,
 933};
 934
 935static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
 936{
 937	scp->rpmsg_subdev =
 938		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
 939					      &mtk_scp_rpmsg_info);
 940	if (scp->rpmsg_subdev)
 941		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
 942}
 943
 944static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
 945{
 946	if (scp->rpmsg_subdev) {
 947		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
 948		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
 949		scp->rpmsg_subdev = NULL;
 950	}
 951}
 952
 953static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
 954				      struct mtk_scp_of_cluster *scp_cluster,
 955				      const struct mtk_scp_of_data *of_data)
 956{
 957	struct device *dev = &pdev->dev;
 958	struct device_node *np = dev->of_node;
 959	struct mtk_scp *scp;
 960	struct rproc *rproc;
 961	struct resource *res;
 962	const char *fw_name = "scp.img";
 963	int ret, i;
 
 964
 965	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
 966	if (ret < 0 && ret != -EINVAL)
 967		return ERR_PTR(ret);
 968
 969	rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
 970	if (!rproc) {
 971		dev_err(dev, "unable to allocate remoteproc\n");
 972		return ERR_PTR(-ENOMEM);
 973	}
 974
 975	scp = rproc->priv;
 976	scp->rproc = rproc;
 977	scp->dev = dev;
 978	scp->data = of_data;
 979	scp->cluster = scp_cluster;
 980	platform_set_drvdata(pdev, scp);
 981
 982	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
 983	scp->sram_base = devm_ioremap_resource(dev, res);
 984	if (IS_ERR(scp->sram_base)) {
 985		dev_err(dev, "Failed to parse and map sram memory\n");
 986		return ERR_CAST(scp->sram_base);
 987	}
 988
 989	scp->sram_size = resource_size(res);
 990	scp->sram_phys = res->start;
 991
 992	ret = scp->data->scp_clk_get(scp);
 993	if (ret)
 994		return ERR_PTR(ret);
 995
 996	ret = scp_map_memory_region(scp);
 997	if (ret)
 998		return ERR_PTR(ret);
 999
1000	mutex_init(&scp->send_lock);
1001	for (i = 0; i < SCP_IPI_MAX; i++)
1002		mutex_init(&scp->ipi_desc[i].lock);
1003
1004	/* register SCP initialization IPI */
1005	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
1006	if (ret) {
1007		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
1008		goto release_dev_mem;
1009	}
1010
 
 
 
 
 
 
 
 
1011	init_waitqueue_head(&scp->run.wq);
1012	init_waitqueue_head(&scp->ack_wq);
1013
1014	scp_add_rpmsg_subdev(scp);
1015
1016	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
1017					scp_irq_handler, IRQF_ONESHOT,
1018					pdev->name, scp);
1019
1020	if (ret) {
1021		dev_err(dev, "failed to request irq\n");
1022		goto remove_subdev;
1023	}
1024
1025	return scp;
1026
1027remove_subdev:
1028	scp_remove_rpmsg_subdev(scp);
1029	scp_ipi_unregister(scp, SCP_IPI_INIT);
 
 
1030release_dev_mem:
1031	scp_unmap_memory_region(scp);
1032	for (i = 0; i < SCP_IPI_MAX; i++)
1033		mutex_destroy(&scp->ipi_desc[i].lock);
1034	mutex_destroy(&scp->send_lock);
1035
1036	return ERR_PTR(ret);
1037}
1038
1039static void scp_free(struct mtk_scp *scp)
1040{
1041	int i;
1042
1043	scp_remove_rpmsg_subdev(scp);
1044	scp_ipi_unregister(scp, SCP_IPI_INIT);
 
 
1045	scp_unmap_memory_region(scp);
1046	for (i = 0; i < SCP_IPI_MAX; i++)
1047		mutex_destroy(&scp->ipi_desc[i].lock);
1048	mutex_destroy(&scp->send_lock);
1049}
1050
1051static int scp_add_single_core(struct platform_device *pdev,
1052			       struct mtk_scp_of_cluster *scp_cluster)
1053{
1054	struct device *dev = &pdev->dev;
1055	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1056	struct mtk_scp *scp;
1057	int ret;
1058
1059	scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
1060	if (IS_ERR(scp))
1061		return PTR_ERR(scp);
1062
1063	ret = rproc_add(scp->rproc);
1064	if (ret) {
1065		dev_err(dev, "Failed to add rproc\n");
1066		scp_free(scp);
1067		return ret;
1068	}
1069
1070	list_add_tail(&scp->elem, scp_list);
1071
1072	return 0;
1073}
1074
1075static int scp_add_multi_core(struct platform_device *pdev,
1076			      struct mtk_scp_of_cluster *scp_cluster)
1077{
1078	struct device *dev = &pdev->dev;
1079	struct device_node *np = dev_of_node(dev);
1080	struct platform_device *cpdev;
1081	struct device_node *child;
1082	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1083	const struct mtk_scp_of_data **cluster_of_data;
1084	struct mtk_scp *scp, *temp;
1085	int core_id = 0;
1086	int ret;
1087
1088	cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
1089
1090	for_each_available_child_of_node(np, child) {
1091		if (!cluster_of_data[core_id]) {
1092			ret = -EINVAL;
1093			dev_err(dev, "Not support core %d\n", core_id);
1094			of_node_put(child);
1095			goto init_fail;
1096		}
1097
1098		cpdev = of_find_device_by_node(child);
1099		if (!cpdev) {
1100			ret = -ENODEV;
1101			dev_err(dev, "Not found platform device for core %d\n", core_id);
1102			of_node_put(child);
1103			goto init_fail;
1104		}
1105
1106		scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
1107		put_device(&cpdev->dev);
1108		if (IS_ERR(scp)) {
1109			ret = PTR_ERR(scp);
1110			dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
1111			of_node_put(child);
1112			goto init_fail;
1113		}
1114
1115		ret = rproc_add(scp->rproc);
1116		if (ret) {
1117			dev_err(dev, "Failed to add rproc of core %d\n", core_id);
1118			of_node_put(child);
1119			scp_free(scp);
1120			goto init_fail;
1121		}
1122
1123		list_add_tail(&scp->elem, scp_list);
1124		core_id++;
1125	}
1126
1127	/*
1128	 * Here we are setting the platform device for @pdev to the last @scp that was
1129	 * created, which is needed because (1) scp_rproc_init() is calling
1130	 * platform_set_drvdata() on the child platform devices and (2) we need a handle to
1131	 * the cluster list in scp_remove().
1132	 */
1133	platform_set_drvdata(pdev, scp);
1134
1135	return 0;
1136
1137init_fail:
1138	list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
1139		list_del(&scp->elem);
1140		rproc_del(scp->rproc);
1141		scp_free(scp);
1142	}
1143
1144	return ret;
1145}
1146
1147static bool scp_is_single_core(struct platform_device *pdev)
1148{
1149	struct device *dev = &pdev->dev;
1150	struct device_node *np = dev_of_node(dev);
1151	struct device_node *child;
1152	int num_cores = 0;
1153
1154	for_each_child_of_node(np, child)
1155		if (of_device_is_compatible(child, "mediatek,scp-core"))
1156			num_cores++;
1157
1158	return num_cores < 2;
1159}
1160
1161static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
1162{
1163	int ret;
1164
1165	if (scp_is_single_core(pdev))
1166		ret = scp_add_single_core(pdev, scp_cluster);
1167	else
1168		ret = scp_add_multi_core(pdev, scp_cluster);
1169
1170	return ret;
1171}
1172
 
 
 
 
 
1173static int scp_probe(struct platform_device *pdev)
1174{
1175	struct device *dev = &pdev->dev;
1176	struct mtk_scp_of_cluster *scp_cluster;
1177	struct resource *res;
1178	int ret;
1179
1180	scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
1181	if (!scp_cluster)
1182		return -ENOMEM;
1183
1184	scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
1185	if (IS_ERR(scp_cluster->reg_base))
1186		return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
1187				     "Failed to parse and map cfg memory\n");
1188
1189	/* l1tcm is an optional memory region */
1190	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
1191	scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
1192	if (IS_ERR(scp_cluster->l1tcm_base)) {
1193		ret = PTR_ERR(scp_cluster->l1tcm_base);
1194		if (ret != -EINVAL)
1195			return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
1196
1197		scp_cluster->l1tcm_base = NULL;
1198	} else {
1199		scp_cluster->l1tcm_size = resource_size(res);
1200		scp_cluster->l1tcm_phys = res->start;
1201	}
1202
1203	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
1204	mutex_init(&scp_cluster->cluster_lock);
1205
1206	ret = devm_of_platform_populate(dev);
1207	if (ret)
1208		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
1209
1210	ret = scp_cluster_init(pdev, scp_cluster);
1211	if (ret)
 
1212		return ret;
 
1213
1214	return 0;
1215}
1216
1217static void scp_remove(struct platform_device *pdev)
1218{
1219	struct mtk_scp *scp = platform_get_drvdata(pdev);
1220	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
1221	struct mtk_scp *temp;
1222
1223	list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
1224		list_del(&scp->elem);
1225		rproc_del(scp->rproc);
1226		scp_free(scp);
1227	}
 
1228	mutex_destroy(&scp_cluster->cluster_lock);
1229}
1230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231static const struct mtk_scp_of_data mt8183_of_data = {
1232	.scp_clk_get = mt8183_scp_clk_get,
1233	.scp_before_load = mt8183_scp_before_load,
1234	.scp_irq_handler = mt8183_scp_irq_handler,
1235	.scp_reset_assert = mt8183_scp_reset_assert,
1236	.scp_reset_deassert = mt8183_scp_reset_deassert,
1237	.scp_stop = mt8183_scp_stop,
1238	.scp_da_to_va = mt8183_scp_da_to_va,
1239	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1240	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1241	.ipi_buf_offset = 0x7bdb0,
 
1242};
1243
1244static const struct mtk_scp_of_data mt8186_of_data = {
1245	.scp_clk_get = mt8195_scp_clk_get,
1246	.scp_before_load = mt8186_scp_before_load,
1247	.scp_irq_handler = mt8183_scp_irq_handler,
1248	.scp_reset_assert = mt8183_scp_reset_assert,
1249	.scp_reset_deassert = mt8183_scp_reset_deassert,
1250	.scp_stop = mt8183_scp_stop,
1251	.scp_da_to_va = mt8183_scp_da_to_va,
1252	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1253	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1254	.ipi_buf_offset = 0x3bdb0,
 
1255};
1256
1257static const struct mtk_scp_of_data mt8188_of_data = {
1258	.scp_clk_get = mt8195_scp_clk_get,
1259	.scp_before_load = mt8192_scp_before_load,
1260	.scp_irq_handler = mt8192_scp_irq_handler,
1261	.scp_reset_assert = mt8192_scp_reset_assert,
1262	.scp_reset_deassert = mt8192_scp_reset_deassert,
1263	.scp_stop = mt8192_scp_stop,
1264	.scp_da_to_va = mt8192_scp_da_to_va,
1265	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1266	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267};
1268
1269static const struct mtk_scp_of_data mt8192_of_data = {
1270	.scp_clk_get = mt8192_scp_clk_get,
1271	.scp_before_load = mt8192_scp_before_load,
1272	.scp_irq_handler = mt8192_scp_irq_handler,
1273	.scp_reset_assert = mt8192_scp_reset_assert,
1274	.scp_reset_deassert = mt8192_scp_reset_deassert,
1275	.scp_stop = mt8192_scp_stop,
1276	.scp_da_to_va = mt8192_scp_da_to_va,
1277	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1278	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
 
1279};
1280
1281static const struct mtk_scp_of_data mt8195_of_data = {
1282	.scp_clk_get = mt8195_scp_clk_get,
1283	.scp_before_load = mt8195_scp_before_load,
1284	.scp_irq_handler = mt8195_scp_irq_handler,
1285	.scp_reset_assert = mt8192_scp_reset_assert,
1286	.scp_reset_deassert = mt8192_scp_reset_deassert,
1287	.scp_stop = mt8195_scp_stop,
1288	.scp_da_to_va = mt8192_scp_da_to_va,
1289	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1290	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
 
1291};
1292
1293static const struct mtk_scp_of_data mt8195_of_data_c1 = {
1294	.scp_clk_get = mt8195_scp_clk_get,
1295	.scp_before_load = mt8195_scp_c1_before_load,
1296	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1297	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1298	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1299	.scp_stop = mt8195_scp_c1_stop,
1300	.scp_da_to_va = mt8192_scp_da_to_va,
1301	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1302	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
 
 
 
 
 
 
 
1303};
1304
1305static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
1306	&mt8195_of_data,
1307	&mt8195_of_data_c1,
1308	NULL
1309};
1310
1311static const struct of_device_id mtk_scp_of_match[] = {
1312	{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
1313	{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
1314	{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
 
1315	{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
1316	{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
1317	{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
1318	{},
1319};
1320MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
1321
1322static struct platform_driver mtk_scp_driver = {
1323	.probe = scp_probe,
1324	.remove_new = scp_remove,
1325	.driver = {
1326		.name = "mtk-scp",
1327		.of_match_table = mtk_scp_of_match,
1328	},
1329};
1330
1331module_platform_driver(mtk_scp_driver);
1332
1333MODULE_LICENSE("GPL v2");
1334MODULE_DESCRIPTION("MediaTek SCP control driver");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2019 MediaTek Inc.
   4
   5#include <asm/barrier.h>
   6#include <linux/clk.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/interrupt.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/of_address.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/platform_device.h>
  16#include <linux/remoteproc.h>
  17#include <linux/remoteproc/mtk_scp.h>
  18#include <linux/rpmsg/mtk_rpmsg.h>
  19
  20#include "mtk_common.h"
  21#include "remoteproc_internal.h"
  22
 
  23#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
  24
  25/**
  26 * scp_get() - get a reference to SCP.
  27 *
  28 * @pdev:	the platform device of the module requesting SCP platform
  29 *		device for using SCP API.
  30 *
  31 * Return: Return NULL if failed.  otherwise reference to SCP.
  32 **/
  33struct mtk_scp *scp_get(struct platform_device *pdev)
  34{
  35	struct device *dev = &pdev->dev;
  36	struct device_node *scp_node;
  37	struct platform_device *scp_pdev;
  38
  39	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
  40	if (!scp_node) {
  41		dev_err(dev, "can't get SCP node\n");
  42		return NULL;
  43	}
  44
  45	scp_pdev = of_find_device_by_node(scp_node);
  46	of_node_put(scp_node);
  47
  48	if (WARN_ON(!scp_pdev)) {
  49		dev_err(dev, "SCP pdev failed\n");
  50		return NULL;
  51	}
  52
  53	return platform_get_drvdata(scp_pdev);
  54}
  55EXPORT_SYMBOL_GPL(scp_get);
  56
  57/**
  58 * scp_put() - "free" the SCP
  59 *
  60 * @scp:	mtk_scp structure from scp_get().
  61 **/
  62void scp_put(struct mtk_scp *scp)
  63{
  64	put_device(scp->dev);
  65}
  66EXPORT_SYMBOL_GPL(scp_put);
  67
  68static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
  69{
  70	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
  71	struct mtk_scp *scp_node;
  72
  73	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
  74
  75	/* report watchdog timeout to all cores */
  76	list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
  77		rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
  78}
  79
  80static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
  81{
  82	struct mtk_scp *scp = priv;
  83	struct scp_run *run = data;
  84
  85	scp->run.signaled = run->signaled;
  86	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
  87	scp->run.dec_capability = run->dec_capability;
  88	scp->run.enc_capability = run->enc_capability;
  89	wake_up_interruptible(&scp->run.wq);
  90}
  91
  92static void scp_ipi_handler(struct mtk_scp *scp)
  93{
  94	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
  95	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
 
  96	scp_ipi_handler_t handler;
  97	u32 id = readl(&rcv_obj->id);
  98	u32 len = readl(&rcv_obj->len);
  99	const struct mtk_scp_sizes_data *scp_sizes;
 100
 101	scp_sizes = scp->data->scp_sizes;
 102	if (len > scp_sizes->ipi_share_buffer_size) {
 103		dev_err(scp->dev, "ipi message too long (len %d, max %zd)", len,
 104			scp_sizes->ipi_share_buffer_size);
 105		return;
 106	}
 107	if (id >= SCP_IPI_MAX) {
 108		dev_err(scp->dev, "No such ipi id = %d\n", id);
 109		return;
 110	}
 111
 112	scp_ipi_lock(scp, id);
 113	handler = ipi_desc[id].handler;
 114	if (!handler) {
 115		dev_err(scp->dev, "No handler for ipi id = %d\n", id);
 116		scp_ipi_unlock(scp, id);
 117		return;
 118	}
 119
 120	memcpy_fromio(scp->share_buf, &rcv_obj->share_buf, len);
 121	memset(&scp->share_buf[len], 0, scp_sizes->ipi_share_buffer_size - len);
 122	handler(scp->share_buf, len, ipi_desc[id].priv);
 123	scp_ipi_unlock(scp, id);
 124
 125	scp->ipi_id_ack[id] = true;
 126	wake_up(&scp->ack_wq);
 127}
 128
 129static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 130				     const struct firmware *fw,
 131				     size_t *offset);
 132
 133static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
 134{
 135	int ret;
 136	size_t buf_sz, offset;
 137	size_t share_buf_offset;
 138	const struct mtk_scp_sizes_data *scp_sizes;
 139
 140	/* read the ipi buf addr from FW itself first */
 141	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
 142	if (ret) {
 143		/* use default ipi buf addr if the FW doesn't have it */
 144		offset = scp->data->ipi_buf_offset;
 145		if (!offset)
 146			return ret;
 147	}
 148	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
 149
 150	/* Make sure IPI buffer fits in the L2TCM range assigned to this core */
 151	buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
 152
 153	if (scp->sram_size < buf_sz + offset) {
 154		dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
 155		return -EOVERFLOW;
 156	}
 157
 158	scp_sizes = scp->data->scp_sizes;
 159	scp->recv_buf = (struct mtk_share_obj __iomem *)
 160			(scp->sram_base + offset);
 161	share_buf_offset = sizeof(scp->recv_buf->id)
 162		+ sizeof(scp->recv_buf->len) + scp_sizes->ipi_share_buffer_size;
 163	scp->send_buf = (struct mtk_share_obj __iomem *)
 164			(scp->sram_base + offset + share_buf_offset);
 165	memset_io(scp->recv_buf, 0, share_buf_offset);
 166	memset_io(scp->send_buf, 0, share_buf_offset);
 167
 168	return 0;
 169}
 170
 171static void mt8183_scp_reset_assert(struct mtk_scp *scp)
 172{
 173	u32 val;
 174
 175	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 176	val &= ~MT8183_SW_RSTN_BIT;
 177	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 178}
 179
 180static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
 181{
 182	u32 val;
 183
 184	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 185	val |= MT8183_SW_RSTN_BIT;
 186	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 187}
 188
 189static void mt8192_scp_reset_assert(struct mtk_scp *scp)
 190{
 191	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 192}
 193
 194static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
 195{
 196	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
 197}
 198
 199static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
 200{
 201	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
 202}
 203
 204static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
 205{
 206	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
 207}
 208
 209static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 210{
 211	u32 scp_to_host;
 212
 213	scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 214	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
 215		scp_ipi_handler(scp);
 216	else
 217		scp_wdt_handler(scp, scp_to_host);
 218
 219	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
 220	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
 221	       scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 222}
 223
 224static void mt8192_scp_irq_handler(struct mtk_scp *scp)
 225{
 226	u32 scp_to_host;
 227
 228	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 229
 230	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 231		scp_ipi_handler(scp);
 232
 233		/*
 234		 * SCP won't send another interrupt until we clear
 235		 * MT8192_SCP2APMCU_IPC.
 236		 */
 237		writel(MT8192_SCP_IPC_INT_BIT,
 238		       scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 239	} else {
 240		scp_wdt_handler(scp, scp_to_host);
 241		writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 242	}
 243}
 244
 245static void mt8195_scp_irq_handler(struct mtk_scp *scp)
 246{
 247	u32 scp_to_host;
 248
 249	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 250
 251	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 252		scp_ipi_handler(scp);
 253	} else {
 254		u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
 255
 256		if (reason & MT8195_CORE0_WDT)
 257			writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 258
 259		if (reason & MT8195_CORE1_WDT)
 260			writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
 261
 262		scp_wdt_handler(scp, reason);
 263	}
 264
 265	writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 266}
 267
 268static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
 269{
 270	u32 scp_to_host;
 271
 272	scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
 273
 274	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
 275		scp_ipi_handler(scp);
 276
 277	writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
 278}
 279
 280static irqreturn_t scp_irq_handler(int irq, void *priv)
 281{
 282	struct mtk_scp *scp = priv;
 283	int ret;
 284
 285	ret = clk_prepare_enable(scp->clk);
 286	if (ret) {
 287		dev_err(scp->dev, "failed to enable clocks\n");
 288		return IRQ_NONE;
 289	}
 290
 291	scp->data->scp_irq_handler(scp);
 292
 293	clk_disable_unprepare(scp->clk);
 294
 295	return IRQ_HANDLED;
 296}
 297
 298static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
 299{
 300	struct device *dev = &rproc->dev;
 301	struct elf32_hdr *ehdr;
 302	struct elf32_phdr *phdr;
 303	int i, ret = 0;
 304	const u8 *elf_data = fw->data;
 305
 306	ehdr = (struct elf32_hdr *)elf_data;
 307	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
 308
 309	/* go through the available ELF segments */
 310	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
 311		u32 da = phdr->p_paddr;
 312		u32 memsz = phdr->p_memsz;
 313		u32 filesz = phdr->p_filesz;
 314		u32 offset = phdr->p_offset;
 315		void __iomem *ptr;
 316
 317		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
 318			phdr->p_type, da, memsz, filesz);
 319
 320		if (phdr->p_type != PT_LOAD)
 321			continue;
 322		if (!filesz)
 323			continue;
 324
 325		if (filesz > memsz) {
 326			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
 327				filesz, memsz);
 328			ret = -EINVAL;
 329			break;
 330		}
 331
 332		if (offset + filesz > fw->size) {
 333			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
 334				offset + filesz, fw->size);
 335			ret = -EINVAL;
 336			break;
 337		}
 338
 339		/* grab the kernel address for this device address */
 340		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
 341		if (!ptr) {
 342			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
 343			ret = -EINVAL;
 344			break;
 345		}
 346
 347		/* put the segment where the remote processor expects it */
 348		scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
 349	}
 350
 351	return ret;
 352}
 353
 354static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 355				     const struct firmware *fw,
 356				     size_t *offset)
 357{
 358	struct elf32_hdr *ehdr;
 359	struct elf32_shdr *shdr, *shdr_strtab;
 360	int i;
 361	const u8 *elf_data = fw->data;
 362	const char *strtab;
 363
 364	ehdr = (struct elf32_hdr *)elf_data;
 365	shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
 366	shdr_strtab = shdr + ehdr->e_shstrndx;
 367	strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
 368
 369	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
 370		if (strcmp(strtab + shdr->sh_name,
 371			   SECTION_NAME_IPI_BUFFER) == 0) {
 372			*offset = shdr->sh_addr;
 373			return 0;
 374		}
 375	}
 376
 377	return -ENOENT;
 378}
 379
 380static int mt8183_scp_clk_get(struct mtk_scp *scp)
 381{
 382	struct device *dev = scp->dev;
 383	int ret = 0;
 384
 385	scp->clk = devm_clk_get(dev, "main");
 386	if (IS_ERR(scp->clk)) {
 387		dev_err(dev, "Failed to get clock\n");
 388		ret = PTR_ERR(scp->clk);
 389	}
 390
 391	return ret;
 392}
 393
 394static int mt8192_scp_clk_get(struct mtk_scp *scp)
 395{
 396	return mt8183_scp_clk_get(scp);
 397}
 398
 399static int mt8195_scp_clk_get(struct mtk_scp *scp)
 400{
 401	scp->clk = NULL;
 402
 403	return 0;
 404}
 405
 406static int mt8183_scp_before_load(struct mtk_scp *scp)
 407{
 408	/* Clear SCP to host interrupt */
 409	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 410
 411	/* Reset clocks before loading FW */
 412	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 413	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 414
 415	/* Initialize TCM before loading FW. */
 416	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 417	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 418
 419	/* Turn on the power of SCP's SRAM before using it. */
 420	writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
 421
 422	/*
 423	 * Set I-cache and D-cache size before loading SCP FW.
 424	 * SCP SRAM logical address may change when cache size setting differs.
 425	 */
 426	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 427	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 428	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 429
 430	return 0;
 431}
 432
 433static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
 434{
 435	int i;
 436
 437	for (i = 31; i >= 0; i--)
 438		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 439	writel(0, addr);
 440}
 441
 442static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 443{
 444	int i;
 445
 446	writel(0, addr);
 447	for (i = 0; i < 32; i++)
 448		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 449}
 450
 451static int mt8186_scp_before_load(struct mtk_scp *scp)
 452{
 453	/* Clear SCP to host interrupt */
 454	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 455
 456	/* Reset clocks before loading FW */
 457	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 458	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 459
 460	/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
 461	scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
 462
 463	/* Initialize TCM before loading FW. */
 464	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 465	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 466	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
 467	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
 468
 469	/*
 470	 * Set I-cache and D-cache size before loading SCP FW.
 471	 * SCP SRAM logical address may change when cache size setting differs.
 472	 */
 473	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 474	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 475	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 476
 477	return 0;
 478}
 479
 480static int mt8188_scp_l2tcm_on(struct mtk_scp *scp)
 481{
 482	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 483
 484	mutex_lock(&scp_cluster->cluster_lock);
 485
 486	if (scp_cluster->l2tcm_refcnt == 0) {
 487		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 488		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 489
 490		/* Power on L2TCM */
 491		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 492		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 493		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 494		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 495	}
 496
 497	scp_cluster->l2tcm_refcnt += 1;
 498
 499	mutex_unlock(&scp_cluster->cluster_lock);
 500
 501	return 0;
 502}
 503
 504static int mt8188_scp_before_load(struct mtk_scp *scp)
 505{
 506	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 507
 508	mt8188_scp_l2tcm_on(scp);
 509
 510	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 511
 512	/* enable MPU for all memory regions */
 513	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 514
 515	return 0;
 516}
 517
 518static int mt8188_scp_c1_before_load(struct mtk_scp *scp)
 519{
 520	u32 sec_ctrl;
 521	struct mtk_scp *scp_c0;
 522	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 523
 524	scp->data->scp_reset_assert(scp);
 525
 526	mt8188_scp_l2tcm_on(scp);
 527
 528	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 529
 530	/* enable MPU for all memory regions */
 531	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
 532
 533	/*
 534	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
 535	 * on SRAM when SCP core 1 accesses SRAM.
 536	 *
 537	 * This configuration solves booting the SCP core 0 and core 1 from
 538	 * different SRAM address because core 0 and core 1 both boot from
 539	 * the head of SRAM by default. this must be configured before boot SCP core 1.
 540	 *
 541	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
 542	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
 543	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
 544	 * The shift action is tranparent to software.
 545	 */
 546	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
 547	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
 548
 549	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
 550	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
 551
 552	/* enable SRAM offset when fetching instruction and data */
 553	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
 554	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
 555	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 556
 557	return 0;
 558}
 559
 560static int mt8192_scp_before_load(struct mtk_scp *scp)
 561{
 562	/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 563	writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 564
 565	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 566
 567	/* enable SRAM clock */
 568	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 569	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 570	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 571	scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 572	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 573
 574	/* enable MPU for all memory regions */
 575	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 576
 577	return 0;
 578}
 579
 580static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
 581{
 582	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 583
 584	mutex_lock(&scp_cluster->cluster_lock);
 585
 586	if (scp_cluster->l2tcm_refcnt == 0) {
 587		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 588		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 589
 590		/* Power on L2TCM */
 591		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 592		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 593		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 594		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 595				  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 596	}
 597
 598	scp_cluster->l2tcm_refcnt += 1;
 599
 600	mutex_unlock(&scp_cluster->cluster_lock);
 601
 602	return 0;
 603}
 604
 605static int mt8195_scp_before_load(struct mtk_scp *scp)
 606{
 607	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 608
 609	mt8195_scp_l2tcm_on(scp);
 610
 611	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 612
 613	/* enable MPU for all memory regions */
 614	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 615
 616	return 0;
 617}
 618
 619static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
 620{
 621	u32 sec_ctrl;
 622	struct mtk_scp *scp_c0;
 623	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 624
 625	scp->data->scp_reset_assert(scp);
 626
 627	mt8195_scp_l2tcm_on(scp);
 628
 629	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 630
 631	/* enable MPU for all memory regions */
 632	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
 633
 634	/*
 635	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
 636	 * on SRAM when SCP core 1 accesses SRAM.
 637	 *
 638	 * This configuration solves booting the SCP core 0 and core 1 from
 639	 * different SRAM address because core 0 and core 1 both boot from
 640	 * the head of SRAM by default. this must be configured before boot SCP core 1.
 641	 *
 642	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
 643	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
 644	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
 645	 * The shift action is tranparent to software.
 646	 */
 647	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
 648	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
 649
 650	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
 651	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
 652
 653	/* enable SRAM offset when fetching instruction and data */
 654	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
 655	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
 656	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 657
 658	return 0;
 659}
 660
 661static int scp_load(struct rproc *rproc, const struct firmware *fw)
 662{
 663	struct mtk_scp *scp = rproc->priv;
 664	struct device *dev = scp->dev;
 665	int ret;
 666
 667	ret = clk_prepare_enable(scp->clk);
 668	if (ret) {
 669		dev_err(dev, "failed to enable clocks\n");
 670		return ret;
 671	}
 672
 673	/* Hold SCP in reset while loading FW. */
 674	scp->data->scp_reset_assert(scp);
 675
 676	ret = scp->data->scp_before_load(scp);
 677	if (ret < 0)
 678		goto leave;
 679
 680	ret = scp_elf_load_segments(rproc, fw);
 681leave:
 682	clk_disable_unprepare(scp->clk);
 683
 684	return ret;
 685}
 686
 687static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
 688{
 689	struct mtk_scp *scp = rproc->priv;
 690	struct device *dev = scp->dev;
 691	int ret;
 692
 693	ret = clk_prepare_enable(scp->clk);
 694	if (ret) {
 695		dev_err(dev, "failed to enable clocks\n");
 696		return ret;
 697	}
 698
 699	ret = scp_ipi_init(scp, fw);
 700	clk_disable_unprepare(scp->clk);
 701	return ret;
 702}
 703
 704static int scp_start(struct rproc *rproc)
 705{
 706	struct mtk_scp *scp = rproc->priv;
 707	struct device *dev = scp->dev;
 708	struct scp_run *run = &scp->run;
 709	int ret;
 710
 711	ret = clk_prepare_enable(scp->clk);
 712	if (ret) {
 713		dev_err(dev, "failed to enable clocks\n");
 714		return ret;
 715	}
 716
 717	run->signaled = false;
 718
 719	scp->data->scp_reset_deassert(scp);
 720
 721	ret = wait_event_interruptible_timeout(
 722					run->wq,
 723					run->signaled,
 724					msecs_to_jiffies(2000));
 725
 726	if (ret == 0) {
 727		dev_err(dev, "wait SCP initialization timeout!\n");
 728		ret = -ETIME;
 729		goto stop;
 730	}
 731	if (ret == -ERESTARTSYS) {
 732		dev_err(dev, "wait SCP interrupted by a signal!\n");
 733		goto stop;
 734	}
 735
 736	clk_disable_unprepare(scp->clk);
 737	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
 738
 739	return 0;
 740
 741stop:
 742	scp->data->scp_reset_assert(scp);
 743	clk_disable_unprepare(scp->clk);
 744	return ret;
 745}
 746
 747static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 748{
 749	int offset;
 750	const struct mtk_scp_sizes_data *scp_sizes;
 751
 752	scp_sizes = scp->data->scp_sizes;
 753	if (da < scp->sram_size) {
 754		offset = da;
 755		if (offset >= 0 && (offset + len) <= scp->sram_size)
 756			return (void __force *)scp->sram_base + offset;
 757	} else if (scp_sizes->max_dram_size) {
 758		offset = da - scp->dma_addr;
 759		if (offset >= 0 && (offset + len) <= scp_sizes->max_dram_size)
 760			return scp->cpu_addr + offset;
 761	}
 762
 763	return NULL;
 764}
 765
 766static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 767{
 768	int offset;
 769	const struct mtk_scp_sizes_data *scp_sizes;
 770
 771	scp_sizes = scp->data->scp_sizes;
 772	if (da >= scp->sram_phys &&
 773	    (da + len) <= scp->sram_phys + scp->sram_size) {
 774		offset = da - scp->sram_phys;
 775		return (void __force *)scp->sram_base + offset;
 776	}
 777
 778	/* optional memory region */
 779	if (scp->cluster->l1tcm_size &&
 780	    da >= scp->cluster->l1tcm_phys &&
 781	    (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
 782		offset = da - scp->cluster->l1tcm_phys;
 783		return (void __force *)scp->cluster->l1tcm_base + offset;
 784	}
 785
 786	/* optional memory region */
 787	if (scp_sizes->max_dram_size &&
 788	    da >= scp->dma_addr &&
 789	    (da + len) <= scp->dma_addr + scp_sizes->max_dram_size) {
 790		offset = da - scp->dma_addr;
 791		return scp->cpu_addr + offset;
 792	}
 793
 794	return NULL;
 795}
 796
 797static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
 798{
 799	struct mtk_scp *scp = rproc->priv;
 800
 801	return scp->data->scp_da_to_va(scp, da, len);
 802}
 803
 804static void mt8183_scp_stop(struct mtk_scp *scp)
 805{
 806	/* Disable SCP watchdog */
 807	writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
 808}
 809
 810static void mt8188_scp_l2tcm_off(struct mtk_scp *scp)
 811{
 812	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 813
 814	mutex_lock(&scp_cluster->cluster_lock);
 815
 816	if (scp_cluster->l2tcm_refcnt > 0)
 817		scp_cluster->l2tcm_refcnt -= 1;
 818
 819	if (scp_cluster->l2tcm_refcnt == 0) {
 820		/* Power off L2TCM */
 821		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 822		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 823		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 824		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 825	}
 826
 827	mutex_unlock(&scp_cluster->cluster_lock);
 828}
 829
 830static void mt8188_scp_stop(struct mtk_scp *scp)
 831{
 832	mt8188_scp_l2tcm_off(scp);
 833
 834	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 835
 836	/* Disable SCP watchdog */
 837	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 838}
 839
 840static void mt8188_scp_c1_stop(struct mtk_scp *scp)
 841{
 842	mt8188_scp_l2tcm_off(scp);
 843
 844	/* Power off CPU SRAM */
 845	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 846
 847	/* Disable SCP watchdog */
 848	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 849}
 850
 851static void mt8192_scp_stop(struct mtk_scp *scp)
 852{
 853	/* Disable SRAM clock */
 854	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 855	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 856	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 857	scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 858	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 859
 860	/* Disable SCP watchdog */
 861	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 862}
 863
 864static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
 865{
 866	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 867
 868	mutex_lock(&scp_cluster->cluster_lock);
 869
 870	if (scp_cluster->l2tcm_refcnt > 0)
 871		scp_cluster->l2tcm_refcnt -= 1;
 872
 873	if (scp_cluster->l2tcm_refcnt == 0) {
 874		/* Power off L2TCM */
 875		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 876		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 877		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 878		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 879				   MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 880	}
 881
 882	mutex_unlock(&scp_cluster->cluster_lock);
 883}
 884
 885static void mt8195_scp_stop(struct mtk_scp *scp)
 886{
 887	mt8195_scp_l2tcm_off(scp);
 888
 889	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 890
 891	/* Disable SCP watchdog */
 892	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 893}
 894
 895static void mt8195_scp_c1_stop(struct mtk_scp *scp)
 896{
 897	mt8195_scp_l2tcm_off(scp);
 898
 899	/* Power off CPU SRAM */
 900	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 901
 902	/* Disable SCP watchdog */
 903	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 904}
 905
 906static int scp_stop(struct rproc *rproc)
 907{
 908	struct mtk_scp *scp = rproc->priv;
 909	int ret;
 910
 911	ret = clk_prepare_enable(scp->clk);
 912	if (ret) {
 913		dev_err(scp->dev, "failed to enable clocks\n");
 914		return ret;
 915	}
 916
 917	scp->data->scp_reset_assert(scp);
 918	scp->data->scp_stop(scp);
 919	clk_disable_unprepare(scp->clk);
 920
 921	return 0;
 922}
 923
 924static const struct rproc_ops scp_ops = {
 925	.start		= scp_start,
 926	.stop		= scp_stop,
 927	.load		= scp_load,
 928	.da_to_va	= scp_da_to_va,
 929	.parse_fw	= scp_parse_fw,
 930	.sanity_check	= rproc_elf_sanity_check,
 931};
 932
 933/**
 934 * scp_get_device() - get device struct of SCP
 935 *
 936 * @scp:	mtk_scp structure
 937 **/
 938struct device *scp_get_device(struct mtk_scp *scp)
 939{
 940	return scp->dev;
 941}
 942EXPORT_SYMBOL_GPL(scp_get_device);
 943
 944/**
 945 * scp_get_rproc() - get rproc struct of SCP
 946 *
 947 * @scp:	mtk_scp structure
 948 **/
 949struct rproc *scp_get_rproc(struct mtk_scp *scp)
 950{
 951	return scp->rproc;
 952}
 953EXPORT_SYMBOL_GPL(scp_get_rproc);
 954
 955/**
 956 * scp_get_vdec_hw_capa() - get video decoder hardware capability
 957 *
 958 * @scp:	mtk_scp structure
 959 *
 960 * Return: video decoder hardware capability
 961 **/
 962unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
 963{
 964	return scp->run.dec_capability;
 965}
 966EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
 967
 968/**
 969 * scp_get_venc_hw_capa() - get video encoder hardware capability
 970 *
 971 * @scp:	mtk_scp structure
 972 *
 973 * Return: video encoder hardware capability
 974 **/
 975unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
 976{
 977	return scp->run.enc_capability;
 978}
 979EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
 980
 981/**
 982 * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
 983 *
 984 * @scp:	mtk_scp structure
 985 * @mem_addr:	SCP views memory address
 986 *
 987 * Mapping the SCP's SRAM address /
 988 * DMEM (Data Extended Memory) memory address /
 989 * Working buffer memory address to
 990 * kernel virtual address.
 991 *
 992 * Return: Return ERR_PTR(-EINVAL) if mapping failed,
 993 * otherwise the mapped kernel virtual address
 994 **/
 995void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
 996{
 997	void *ptr;
 998
 999	ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
1000	if (!ptr)
1001		return ERR_PTR(-EINVAL);
1002
1003	return ptr;
1004}
1005EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
1006
1007static int scp_map_memory_region(struct mtk_scp *scp)
1008{
1009	int ret;
1010	const struct mtk_scp_sizes_data *scp_sizes;
1011
1012	ret = of_reserved_mem_device_init(scp->dev);
1013
1014	/* reserved memory is optional. */
1015	if (ret == -ENODEV) {
1016		dev_info(scp->dev, "skipping reserved memory initialization.");
1017		return 0;
1018	}
1019
1020	if (ret) {
1021		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
1022		return -ENOMEM;
1023	}
1024
1025	/* Reserved SCP code size */
1026	scp_sizes = scp->data->scp_sizes;
1027	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp_sizes->max_dram_size,
1028					   &scp->dma_addr, GFP_KERNEL);
1029	if (!scp->cpu_addr)
1030		return -ENOMEM;
1031
1032	return 0;
1033}
1034
1035static void scp_unmap_memory_region(struct mtk_scp *scp)
1036{
1037	const struct mtk_scp_sizes_data *scp_sizes;
1038
1039	scp_sizes = scp->data->scp_sizes;
1040	if (scp_sizes->max_dram_size == 0)
1041		return;
1042
1043	dma_free_coherent(scp->dev, scp_sizes->max_dram_size, scp->cpu_addr,
1044			  scp->dma_addr);
1045	of_reserved_mem_device_release(scp->dev);
1046}
1047
1048static int scp_register_ipi(struct platform_device *pdev, u32 id,
1049			    ipi_handler_t handler, void *priv)
1050{
1051	struct mtk_scp *scp = platform_get_drvdata(pdev);
1052
1053	return scp_ipi_register(scp, id, handler, priv);
1054}
1055
1056static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
1057{
1058	struct mtk_scp *scp = platform_get_drvdata(pdev);
1059
1060	scp_ipi_unregister(scp, id);
1061}
1062
1063static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
1064			unsigned int len, unsigned int wait)
1065{
1066	struct mtk_scp *scp = platform_get_drvdata(pdev);
1067
1068	return scp_ipi_send(scp, id, buf, len, wait);
1069}
1070
1071static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
1072	.send_ipi = scp_send_ipi,
1073	.register_ipi = scp_register_ipi,
1074	.unregister_ipi = scp_unregister_ipi,
1075	.ns_ipi_id = SCP_IPI_NS_SERVICE,
1076};
1077
1078static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
1079{
1080	scp->rpmsg_subdev =
1081		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
1082					      &mtk_scp_rpmsg_info);
1083	if (scp->rpmsg_subdev)
1084		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
1085}
1086
1087static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
1088{
1089	if (scp->rpmsg_subdev) {
1090		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
1091		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
1092		scp->rpmsg_subdev = NULL;
1093	}
1094}
1095
1096static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
1097				      struct mtk_scp_of_cluster *scp_cluster,
1098				      const struct mtk_scp_of_data *of_data)
1099{
1100	struct device *dev = &pdev->dev;
1101	struct device_node *np = dev->of_node;
1102	struct mtk_scp *scp;
1103	struct rproc *rproc;
1104	struct resource *res;
1105	const char *fw_name = "scp.img";
1106	int ret, i;
1107	const struct mtk_scp_sizes_data *scp_sizes;
1108
1109	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
1110	if (ret < 0 && ret != -EINVAL)
1111		return ERR_PTR(ret);
1112
1113	rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
1114	if (!rproc) {
1115		dev_err(dev, "unable to allocate remoteproc\n");
1116		return ERR_PTR(-ENOMEM);
1117	}
1118
1119	scp = rproc->priv;
1120	scp->rproc = rproc;
1121	scp->dev = dev;
1122	scp->data = of_data;
1123	scp->cluster = scp_cluster;
1124	platform_set_drvdata(pdev, scp);
1125
1126	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1127	scp->sram_base = devm_ioremap_resource(dev, res);
1128	if (IS_ERR(scp->sram_base)) {
1129		dev_err(dev, "Failed to parse and map sram memory\n");
1130		return ERR_CAST(scp->sram_base);
1131	}
1132
1133	scp->sram_size = resource_size(res);
1134	scp->sram_phys = res->start;
1135
1136	ret = scp->data->scp_clk_get(scp);
1137	if (ret)
1138		return ERR_PTR(ret);
1139
1140	ret = scp_map_memory_region(scp);
1141	if (ret)
1142		return ERR_PTR(ret);
1143
1144	mutex_init(&scp->send_lock);
1145	for (i = 0; i < SCP_IPI_MAX; i++)
1146		mutex_init(&scp->ipi_desc[i].lock);
1147
1148	/* register SCP initialization IPI */
1149	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
1150	if (ret) {
1151		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
1152		goto release_dev_mem;
1153	}
1154
1155	scp_sizes = scp->data->scp_sizes;
1156	scp->share_buf = kzalloc(scp_sizes->ipi_share_buffer_size, GFP_KERNEL);
1157	if (!scp->share_buf) {
1158		dev_err(dev, "Failed to allocate IPI share buffer\n");
1159		ret = -ENOMEM;
1160		goto release_dev_mem;
1161	}
1162
1163	init_waitqueue_head(&scp->run.wq);
1164	init_waitqueue_head(&scp->ack_wq);
1165
1166	scp_add_rpmsg_subdev(scp);
1167
1168	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
1169					scp_irq_handler, IRQF_ONESHOT,
1170					pdev->name, scp);
1171
1172	if (ret) {
1173		dev_err(dev, "failed to request irq\n");
1174		goto remove_subdev;
1175	}
1176
1177	return scp;
1178
1179remove_subdev:
1180	scp_remove_rpmsg_subdev(scp);
1181	scp_ipi_unregister(scp, SCP_IPI_INIT);
1182	kfree(scp->share_buf);
1183	scp->share_buf = NULL;
1184release_dev_mem:
1185	scp_unmap_memory_region(scp);
1186	for (i = 0; i < SCP_IPI_MAX; i++)
1187		mutex_destroy(&scp->ipi_desc[i].lock);
1188	mutex_destroy(&scp->send_lock);
1189
1190	return ERR_PTR(ret);
1191}
1192
1193static void scp_free(struct mtk_scp *scp)
1194{
1195	int i;
1196
1197	scp_remove_rpmsg_subdev(scp);
1198	scp_ipi_unregister(scp, SCP_IPI_INIT);
1199	kfree(scp->share_buf);
1200	scp->share_buf = NULL;
1201	scp_unmap_memory_region(scp);
1202	for (i = 0; i < SCP_IPI_MAX; i++)
1203		mutex_destroy(&scp->ipi_desc[i].lock);
1204	mutex_destroy(&scp->send_lock);
1205}
1206
1207static int scp_add_single_core(struct platform_device *pdev,
1208			       struct mtk_scp_of_cluster *scp_cluster)
1209{
1210	struct device *dev = &pdev->dev;
1211	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1212	struct mtk_scp *scp;
1213	int ret;
1214
1215	scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
1216	if (IS_ERR(scp))
1217		return PTR_ERR(scp);
1218
1219	ret = rproc_add(scp->rproc);
1220	if (ret) {
1221		dev_err(dev, "Failed to add rproc\n");
1222		scp_free(scp);
1223		return ret;
1224	}
1225
1226	list_add_tail(&scp->elem, scp_list);
1227
1228	return 0;
1229}
1230
1231static int scp_add_multi_core(struct platform_device *pdev,
1232			      struct mtk_scp_of_cluster *scp_cluster)
1233{
1234	struct device *dev = &pdev->dev;
1235	struct device_node *np = dev_of_node(dev);
1236	struct platform_device *cpdev;
1237	struct device_node *child;
1238	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1239	const struct mtk_scp_of_data **cluster_of_data;
1240	struct mtk_scp *scp, *temp;
1241	int core_id = 0;
1242	int ret;
1243
1244	cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
1245
1246	for_each_available_child_of_node(np, child) {
1247		if (!cluster_of_data[core_id]) {
1248			ret = -EINVAL;
1249			dev_err(dev, "Not support core %d\n", core_id);
1250			of_node_put(child);
1251			goto init_fail;
1252		}
1253
1254		cpdev = of_find_device_by_node(child);
1255		if (!cpdev) {
1256			ret = -ENODEV;
1257			dev_err(dev, "Not found platform device for core %d\n", core_id);
1258			of_node_put(child);
1259			goto init_fail;
1260		}
1261
1262		scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
1263		put_device(&cpdev->dev);
1264		if (IS_ERR(scp)) {
1265			ret = PTR_ERR(scp);
1266			dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
1267			of_node_put(child);
1268			goto init_fail;
1269		}
1270
1271		ret = rproc_add(scp->rproc);
1272		if (ret) {
1273			dev_err(dev, "Failed to add rproc of core %d\n", core_id);
1274			of_node_put(child);
1275			scp_free(scp);
1276			goto init_fail;
1277		}
1278
1279		list_add_tail(&scp->elem, scp_list);
1280		core_id++;
1281	}
1282
1283	/*
1284	 * Here we are setting the platform device for @pdev to the last @scp that was
1285	 * created, which is needed because (1) scp_rproc_init() is calling
1286	 * platform_set_drvdata() on the child platform devices and (2) we need a handle to
1287	 * the cluster list in scp_remove().
1288	 */
1289	platform_set_drvdata(pdev, scp);
1290
1291	return 0;
1292
1293init_fail:
1294	list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
1295		list_del(&scp->elem);
1296		rproc_del(scp->rproc);
1297		scp_free(scp);
1298	}
1299
1300	return ret;
1301}
1302
1303static bool scp_is_single_core(struct platform_device *pdev)
1304{
1305	struct device *dev = &pdev->dev;
1306	struct device_node *np = dev_of_node(dev);
1307	struct device_node *child;
1308	int num_cores = 0;
1309
1310	for_each_child_of_node(np, child)
1311		if (of_device_is_compatible(child, "mediatek,scp-core"))
1312			num_cores++;
1313
1314	return num_cores < 2;
1315}
1316
1317static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
1318{
1319	int ret;
1320
1321	if (scp_is_single_core(pdev))
1322		ret = scp_add_single_core(pdev, scp_cluster);
1323	else
1324		ret = scp_add_multi_core(pdev, scp_cluster);
1325
1326	return ret;
1327}
1328
1329static const struct of_device_id scp_core_match[] = {
1330	{ .compatible = "mediatek,scp-core" },
1331	{}
1332};
1333
1334static int scp_probe(struct platform_device *pdev)
1335{
1336	struct device *dev = &pdev->dev;
1337	struct mtk_scp_of_cluster *scp_cluster;
1338	struct resource *res;
1339	int ret;
1340
1341	scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
1342	if (!scp_cluster)
1343		return -ENOMEM;
1344
1345	scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
1346	if (IS_ERR(scp_cluster->reg_base))
1347		return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
1348				     "Failed to parse and map cfg memory\n");
1349
1350	/* l1tcm is an optional memory region */
1351	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
1352	if (res) {
1353		scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
1354		if (IS_ERR(scp_cluster->l1tcm_base))
1355			return dev_err_probe(dev, PTR_ERR(scp_cluster->l1tcm_base),
1356					     "Failed to map l1tcm memory\n");
1357
 
 
1358		scp_cluster->l1tcm_size = resource_size(res);
1359		scp_cluster->l1tcm_phys = res->start;
1360	}
1361
1362	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
1363	mutex_init(&scp_cluster->cluster_lock);
1364
1365	ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
1366	if (ret)
1367		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
1368
1369	ret = scp_cluster_init(pdev, scp_cluster);
1370	if (ret) {
1371		of_platform_depopulate(dev);
1372		return ret;
1373	}
1374
1375	return 0;
1376}
1377
1378static void scp_remove(struct platform_device *pdev)
1379{
1380	struct mtk_scp *scp = platform_get_drvdata(pdev);
1381	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
1382	struct mtk_scp *temp;
1383
1384	list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
1385		list_del(&scp->elem);
1386		rproc_del(scp->rproc);
1387		scp_free(scp);
1388	}
1389	of_platform_depopulate(&pdev->dev);
1390	mutex_destroy(&scp_cluster->cluster_lock);
1391}
1392
1393static const struct mtk_scp_sizes_data default_scp_sizes = {
1394	.max_dram_size = 0x500000,
1395	.ipi_share_buffer_size = 288,
1396};
1397
1398static const struct mtk_scp_sizes_data mt8188_scp_sizes = {
1399	.max_dram_size = 0x800000,
1400	.ipi_share_buffer_size = 600,
1401};
1402
1403static const struct mtk_scp_sizes_data mt8188_scp_c1_sizes = {
1404	.max_dram_size = 0xA00000,
1405	.ipi_share_buffer_size = 600,
1406};
1407
1408static const struct mtk_scp_sizes_data mt8195_scp_sizes = {
1409	.max_dram_size = 0x800000,
1410	.ipi_share_buffer_size = 288,
1411};
1412
1413static const struct mtk_scp_of_data mt8183_of_data = {
1414	.scp_clk_get = mt8183_scp_clk_get,
1415	.scp_before_load = mt8183_scp_before_load,
1416	.scp_irq_handler = mt8183_scp_irq_handler,
1417	.scp_reset_assert = mt8183_scp_reset_assert,
1418	.scp_reset_deassert = mt8183_scp_reset_deassert,
1419	.scp_stop = mt8183_scp_stop,
1420	.scp_da_to_va = mt8183_scp_da_to_va,
1421	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1422	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1423	.ipi_buf_offset = 0x7bdb0,
1424	.scp_sizes = &default_scp_sizes,
1425};
1426
1427static const struct mtk_scp_of_data mt8186_of_data = {
1428	.scp_clk_get = mt8195_scp_clk_get,
1429	.scp_before_load = mt8186_scp_before_load,
1430	.scp_irq_handler = mt8183_scp_irq_handler,
1431	.scp_reset_assert = mt8183_scp_reset_assert,
1432	.scp_reset_deassert = mt8183_scp_reset_deassert,
1433	.scp_stop = mt8183_scp_stop,
1434	.scp_da_to_va = mt8183_scp_da_to_va,
1435	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1436	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1437	.ipi_buf_offset = 0x3bdb0,
1438	.scp_sizes = &default_scp_sizes,
1439};
1440
1441static const struct mtk_scp_of_data mt8188_of_data = {
1442	.scp_clk_get = mt8195_scp_clk_get,
1443	.scp_before_load = mt8188_scp_before_load,
1444	.scp_irq_handler = mt8195_scp_irq_handler,
1445	.scp_reset_assert = mt8192_scp_reset_assert,
1446	.scp_reset_deassert = mt8192_scp_reset_deassert,
1447	.scp_stop = mt8188_scp_stop,
1448	.scp_da_to_va = mt8192_scp_da_to_va,
1449	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1450	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1451	.scp_sizes = &mt8188_scp_sizes,
1452};
1453
1454static const struct mtk_scp_of_data mt8188_of_data_c1 = {
1455	.scp_clk_get = mt8195_scp_clk_get,
1456	.scp_before_load = mt8188_scp_c1_before_load,
1457	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1458	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1459	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1460	.scp_stop = mt8188_scp_c1_stop,
1461	.scp_da_to_va = mt8192_scp_da_to_va,
1462	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1463	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1464	.scp_sizes = &mt8188_scp_c1_sizes,
1465};
1466
1467static const struct mtk_scp_of_data mt8192_of_data = {
1468	.scp_clk_get = mt8192_scp_clk_get,
1469	.scp_before_load = mt8192_scp_before_load,
1470	.scp_irq_handler = mt8192_scp_irq_handler,
1471	.scp_reset_assert = mt8192_scp_reset_assert,
1472	.scp_reset_deassert = mt8192_scp_reset_deassert,
1473	.scp_stop = mt8192_scp_stop,
1474	.scp_da_to_va = mt8192_scp_da_to_va,
1475	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1476	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1477	.scp_sizes = &default_scp_sizes,
1478};
1479
1480static const struct mtk_scp_of_data mt8195_of_data = {
1481	.scp_clk_get = mt8195_scp_clk_get,
1482	.scp_before_load = mt8195_scp_before_load,
1483	.scp_irq_handler = mt8195_scp_irq_handler,
1484	.scp_reset_assert = mt8192_scp_reset_assert,
1485	.scp_reset_deassert = mt8192_scp_reset_deassert,
1486	.scp_stop = mt8195_scp_stop,
1487	.scp_da_to_va = mt8192_scp_da_to_va,
1488	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1489	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1490	.scp_sizes = &mt8195_scp_sizes,
1491};
1492
1493static const struct mtk_scp_of_data mt8195_of_data_c1 = {
1494	.scp_clk_get = mt8195_scp_clk_get,
1495	.scp_before_load = mt8195_scp_c1_before_load,
1496	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1497	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1498	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1499	.scp_stop = mt8195_scp_c1_stop,
1500	.scp_da_to_va = mt8192_scp_da_to_va,
1501	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1502	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1503	.scp_sizes = &default_scp_sizes,
1504};
1505
1506static const struct mtk_scp_of_data *mt8188_of_data_cores[] = {
1507	&mt8188_of_data,
1508	&mt8188_of_data_c1,
1509	NULL
1510};
1511
1512static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
1513	&mt8195_of_data,
1514	&mt8195_of_data_c1,
1515	NULL
1516};
1517
1518static const struct of_device_id mtk_scp_of_match[] = {
1519	{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
1520	{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
1521	{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
1522	{ .compatible = "mediatek,mt8188-scp-dual", .data = &mt8188_of_data_cores },
1523	{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
1524	{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
1525	{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
1526	{},
1527};
1528MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
1529
1530static struct platform_driver mtk_scp_driver = {
1531	.probe = scp_probe,
1532	.remove = scp_remove,
1533	.driver = {
1534		.name = "mtk-scp",
1535		.of_match_table = mtk_scp_of_match,
1536	},
1537};
1538
1539module_platform_driver(mtk_scp_driver);
1540
1541MODULE_LICENSE("GPL v2");
1542MODULE_DESCRIPTION("MediaTek SCP control driver");