Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2019 MediaTek Inc.
   4
   5#include <asm/barrier.h>
   6#include <linux/clk.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/interrupt.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/of_address.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/platform_device.h>
  16#include <linux/remoteproc.h>
  17#include <linux/remoteproc/mtk_scp.h>
  18#include <linux/rpmsg/mtk_rpmsg.h>
  19
  20#include "mtk_common.h"
  21#include "remoteproc_internal.h"
  22
  23#define MAX_CODE_SIZE 0x500000
  24#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
  25
  26/**
  27 * scp_get() - get a reference to SCP.
  28 *
  29 * @pdev:	the platform device of the module requesting SCP platform
  30 *		device for using SCP API.
  31 *
  32 * Return: Return NULL if failed.  otherwise reference to SCP.
  33 **/
  34struct mtk_scp *scp_get(struct platform_device *pdev)
  35{
  36	struct device *dev = &pdev->dev;
  37	struct device_node *scp_node;
  38	struct platform_device *scp_pdev;
  39
  40	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
  41	if (!scp_node) {
  42		dev_err(dev, "can't get SCP node\n");
  43		return NULL;
  44	}
  45
  46	scp_pdev = of_find_device_by_node(scp_node);
  47	of_node_put(scp_node);
  48
  49	if (WARN_ON(!scp_pdev)) {
  50		dev_err(dev, "SCP pdev failed\n");
  51		return NULL;
  52	}
  53
  54	return platform_get_drvdata(scp_pdev);
  55}
  56EXPORT_SYMBOL_GPL(scp_get);
  57
  58/**
  59 * scp_put() - "free" the SCP
  60 *
  61 * @scp:	mtk_scp structure from scp_get().
  62 **/
  63void scp_put(struct mtk_scp *scp)
  64{
  65	put_device(scp->dev);
  66}
  67EXPORT_SYMBOL_GPL(scp_put);
  68
  69static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
  70{
  71	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
  72	struct mtk_scp *scp_node;
  73
  74	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
  75
  76	/* report watchdog timeout to all cores */
  77	list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
  78		rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
  79}
  80
  81static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
  82{
  83	struct mtk_scp *scp = priv;
  84	struct scp_run *run = data;
  85
  86	scp->run.signaled = run->signaled;
  87	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
  88	scp->run.dec_capability = run->dec_capability;
  89	scp->run.enc_capability = run->enc_capability;
  90	wake_up_interruptible(&scp->run.wq);
  91}
  92
  93static void scp_ipi_handler(struct mtk_scp *scp)
  94{
  95	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
  96	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
  97	u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
  98	scp_ipi_handler_t handler;
  99	u32 id = readl(&rcv_obj->id);
 100	u32 len = readl(&rcv_obj->len);
 101
 102	if (len > SCP_SHARE_BUFFER_SIZE) {
 103		dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
 104			SCP_SHARE_BUFFER_SIZE);
 105		return;
 106	}
 107	if (id >= SCP_IPI_MAX) {
 108		dev_err(scp->dev, "No such ipi id = %d\n", id);
 109		return;
 110	}
 111
 112	scp_ipi_lock(scp, id);
 113	handler = ipi_desc[id].handler;
 114	if (!handler) {
 115		dev_err(scp->dev, "No handler for ipi id = %d\n", id);
 116		scp_ipi_unlock(scp, id);
 117		return;
 118	}
 119
 120	memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
 121	handler(tmp_data, len, ipi_desc[id].priv);
 122	scp_ipi_unlock(scp, id);
 123
 124	scp->ipi_id_ack[id] = true;
 125	wake_up(&scp->ack_wq);
 126}
 127
 128static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 129				     const struct firmware *fw,
 130				     size_t *offset);
 131
 132static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
 133{
 134	int ret;
 135	size_t offset;
 136
 137	/* read the ipi buf addr from FW itself first */
 138	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
 139	if (ret) {
 140		/* use default ipi buf addr if the FW doesn't have it */
 141		offset = scp->data->ipi_buf_offset;
 142		if (!offset)
 143			return ret;
 144	}
 145	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
 146
 147	scp->recv_buf = (struct mtk_share_obj __iomem *)
 148			(scp->sram_base + offset);
 149	scp->send_buf = (struct mtk_share_obj __iomem *)
 150			(scp->sram_base + offset + sizeof(*scp->recv_buf));
 151	memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
 152	memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
 153
 154	return 0;
 155}
 156
 157static void mt8183_scp_reset_assert(struct mtk_scp *scp)
 158{
 159	u32 val;
 160
 161	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 162	val &= ~MT8183_SW_RSTN_BIT;
 163	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 164}
 165
 166static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
 167{
 168	u32 val;
 169
 170	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
 171	val |= MT8183_SW_RSTN_BIT;
 172	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 173}
 174
 175static void mt8192_scp_reset_assert(struct mtk_scp *scp)
 176{
 177	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 178}
 179
 180static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
 181{
 182	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
 183}
 184
 185static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
 186{
 187	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
 188}
 189
 190static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
 191{
 192	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
 193}
 194
 195static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 196{
 197	u32 scp_to_host;
 198
 199	scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 200	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
 201		scp_ipi_handler(scp);
 202	else
 203		scp_wdt_handler(scp, scp_to_host);
 204
 205	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
 206	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
 207	       scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 208}
 209
 210static void mt8192_scp_irq_handler(struct mtk_scp *scp)
 211{
 212	u32 scp_to_host;
 213
 214	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 215
 216	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 217		scp_ipi_handler(scp);
 218
 219		/*
 220		 * SCP won't send another interrupt until we clear
 221		 * MT8192_SCP2APMCU_IPC.
 222		 */
 223		writel(MT8192_SCP_IPC_INT_BIT,
 224		       scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 225	} else {
 226		scp_wdt_handler(scp, scp_to_host);
 227		writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 228	}
 229}
 230
 231static void mt8195_scp_irq_handler(struct mtk_scp *scp)
 232{
 233	u32 scp_to_host;
 234
 235	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 236
 237	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
 238		scp_ipi_handler(scp);
 239	} else {
 240		u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
 241
 242		if (reason & MT8195_CORE0_WDT)
 243			writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
 244
 245		if (reason & MT8195_CORE1_WDT)
 246			writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
 247
 248		scp_wdt_handler(scp, reason);
 249	}
 250
 251	writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
 252}
 253
 254static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
 255{
 256	u32 scp_to_host;
 257
 258	scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
 259
 260	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
 261		scp_ipi_handler(scp);
 262
 263	writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
 264}
 265
 266static irqreturn_t scp_irq_handler(int irq, void *priv)
 267{
 268	struct mtk_scp *scp = priv;
 269	int ret;
 270
 271	ret = clk_prepare_enable(scp->clk);
 272	if (ret) {
 273		dev_err(scp->dev, "failed to enable clocks\n");
 274		return IRQ_NONE;
 275	}
 276
 277	scp->data->scp_irq_handler(scp);
 278
 279	clk_disable_unprepare(scp->clk);
 280
 281	return IRQ_HANDLED;
 282}
 283
 284static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
 285{
 286	struct device *dev = &rproc->dev;
 287	struct elf32_hdr *ehdr;
 288	struct elf32_phdr *phdr;
 289	int i, ret = 0;
 290	const u8 *elf_data = fw->data;
 291
 292	ehdr = (struct elf32_hdr *)elf_data;
 293	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
 294
 295	/* go through the available ELF segments */
 296	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
 297		u32 da = phdr->p_paddr;
 298		u32 memsz = phdr->p_memsz;
 299		u32 filesz = phdr->p_filesz;
 300		u32 offset = phdr->p_offset;
 301		void __iomem *ptr;
 302
 303		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
 304			phdr->p_type, da, memsz, filesz);
 305
 306		if (phdr->p_type != PT_LOAD)
 307			continue;
 308		if (!filesz)
 309			continue;
 310
 311		if (filesz > memsz) {
 312			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
 313				filesz, memsz);
 314			ret = -EINVAL;
 315			break;
 316		}
 317
 318		if (offset + filesz > fw->size) {
 319			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
 320				offset + filesz, fw->size);
 321			ret = -EINVAL;
 322			break;
 323		}
 324
 325		/* grab the kernel address for this device address */
 326		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
 327		if (!ptr) {
 328			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
 329			ret = -EINVAL;
 330			break;
 331		}
 332
 333		/* put the segment where the remote processor expects it */
 334		scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
 335	}
 336
 337	return ret;
 338}
 339
 340static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
 341				     const struct firmware *fw,
 342				     size_t *offset)
 343{
 344	struct elf32_hdr *ehdr;
 345	struct elf32_shdr *shdr, *shdr_strtab;
 346	int i;
 347	const u8 *elf_data = fw->data;
 348	const char *strtab;
 349
 350	ehdr = (struct elf32_hdr *)elf_data;
 351	shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
 352	shdr_strtab = shdr + ehdr->e_shstrndx;
 353	strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
 354
 355	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
 356		if (strcmp(strtab + shdr->sh_name,
 357			   SECTION_NAME_IPI_BUFFER) == 0) {
 358			*offset = shdr->sh_addr;
 359			return 0;
 360		}
 361	}
 362
 363	return -ENOENT;
 364}
 365
 366static int mt8183_scp_clk_get(struct mtk_scp *scp)
 367{
 368	struct device *dev = scp->dev;
 369	int ret = 0;
 370
 371	scp->clk = devm_clk_get(dev, "main");
 372	if (IS_ERR(scp->clk)) {
 373		dev_err(dev, "Failed to get clock\n");
 374		ret = PTR_ERR(scp->clk);
 375	}
 376
 377	return ret;
 378}
 379
 380static int mt8192_scp_clk_get(struct mtk_scp *scp)
 381{
 382	return mt8183_scp_clk_get(scp);
 383}
 384
 385static int mt8195_scp_clk_get(struct mtk_scp *scp)
 386{
 387	scp->clk = NULL;
 388
 389	return 0;
 390}
 391
 392static int mt8183_scp_before_load(struct mtk_scp *scp)
 393{
 394	/* Clear SCP to host interrupt */
 395	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 396
 397	/* Reset clocks before loading FW */
 398	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 399	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 400
 401	/* Initialize TCM before loading FW. */
 402	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 403	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 404
 405	/* Turn on the power of SCP's SRAM before using it. */
 406	writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
 407
 408	/*
 409	 * Set I-cache and D-cache size before loading SCP FW.
 410	 * SCP SRAM logical address may change when cache size setting differs.
 411	 */
 412	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 413	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 414	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 415
 416	return 0;
 417}
 418
 419static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
 420{
 421	int i;
 422
 423	for (i = 31; i >= 0; i--)
 424		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 425	writel(0, addr);
 426}
 427
 428static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 429{
 430	int i;
 431
 432	writel(0, addr);
 433	for (i = 0; i < 32; i++)
 434		writel(GENMASK(i, 0) & ~reserved_mask, addr);
 435}
 436
 437static int mt8186_scp_before_load(struct mtk_scp *scp)
 438{
 439	/* Clear SCP to host interrupt */
 440	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 441
 442	/* Reset clocks before loading FW */
 443	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
 444	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 445
 446	/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
 447	scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
 448
 449	/* Initialize TCM before loading FW. */
 450	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
 451	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 452	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
 453	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
 454
 455	/*
 456	 * Set I-cache and D-cache size before loading SCP FW.
 457	 * SCP SRAM logical address may change when cache size setting differs.
 458	 */
 459	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
 460	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
 461	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 462
 463	return 0;
 464}
 465
 466static int mt8192_scp_before_load(struct mtk_scp *scp)
 467{
 468	/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 469	writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 470
 471	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 472
 473	/* enable SRAM clock */
 474	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 475	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 476	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 477	scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 478	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 479
 480	/* enable MPU for all memory regions */
 481	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 482
 483	return 0;
 484}
 485
 486static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
 487{
 488	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 489
 490	mutex_lock(&scp_cluster->cluster_lock);
 491
 492	if (scp_cluster->l2tcm_refcnt == 0) {
 493		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
 494		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 495
 496		/* Power on L2TCM */
 497		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 498		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 499		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 500		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 501				  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 502	}
 503
 504	scp_cluster->l2tcm_refcnt += 1;
 505
 506	mutex_unlock(&scp_cluster->cluster_lock);
 507
 508	return 0;
 509}
 510
 511static int mt8195_scp_before_load(struct mtk_scp *scp)
 512{
 513	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 514
 515	mt8195_scp_l2tcm_on(scp);
 516
 517	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 518
 519	/* enable MPU for all memory regions */
 520	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
 521
 522	return 0;
 523}
 524
 525static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
 526{
 527	u32 sec_ctrl;
 528	struct mtk_scp *scp_c0;
 529	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 530
 531	scp->data->scp_reset_assert(scp);
 532
 533	mt8195_scp_l2tcm_on(scp);
 534
 535	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 536
 537	/* enable MPU for all memory regions */
 538	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
 539
 540	/*
 541	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
 542	 * on SRAM when SCP core 1 accesses SRAM.
 543	 *
 544	 * This configuration solves booting the SCP core 0 and core 1 from
 545	 * different SRAM address because core 0 and core 1 both boot from
 546	 * the head of SRAM by default. this must be configured before boot SCP core 1.
 547	 *
 548	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
 549	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
 550	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
 551	 * The shift action is tranparent to software.
 552	 */
 553	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
 554	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
 555
 556	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
 557	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
 558
 559	/* enable SRAM offset when fetching instruction and data */
 560	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
 561	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
 562	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 563
 564	return 0;
 565}
 566
 567static int scp_load(struct rproc *rproc, const struct firmware *fw)
 568{
 569	struct mtk_scp *scp = rproc->priv;
 570	struct device *dev = scp->dev;
 571	int ret;
 572
 573	ret = clk_prepare_enable(scp->clk);
 574	if (ret) {
 575		dev_err(dev, "failed to enable clocks\n");
 576		return ret;
 577	}
 578
 579	/* Hold SCP in reset while loading FW. */
 580	scp->data->scp_reset_assert(scp);
 581
 582	ret = scp->data->scp_before_load(scp);
 583	if (ret < 0)
 584		goto leave;
 585
 586	ret = scp_elf_load_segments(rproc, fw);
 587leave:
 588	clk_disable_unprepare(scp->clk);
 589
 590	return ret;
 591}
 592
 593static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
 594{
 595	struct mtk_scp *scp = rproc->priv;
 596	struct device *dev = scp->dev;
 597	int ret;
 598
 599	ret = clk_prepare_enable(scp->clk);
 600	if (ret) {
 601		dev_err(dev, "failed to enable clocks\n");
 602		return ret;
 603	}
 604
 605	ret = scp_ipi_init(scp, fw);
 606	clk_disable_unprepare(scp->clk);
 607	return ret;
 608}
 609
 610static int scp_start(struct rproc *rproc)
 611{
 612	struct mtk_scp *scp = rproc->priv;
 613	struct device *dev = scp->dev;
 614	struct scp_run *run = &scp->run;
 615	int ret;
 616
 617	ret = clk_prepare_enable(scp->clk);
 618	if (ret) {
 619		dev_err(dev, "failed to enable clocks\n");
 620		return ret;
 621	}
 622
 623	run->signaled = false;
 624
 625	scp->data->scp_reset_deassert(scp);
 626
 627	ret = wait_event_interruptible_timeout(
 628					run->wq,
 629					run->signaled,
 630					msecs_to_jiffies(2000));
 631
 632	if (ret == 0) {
 633		dev_err(dev, "wait SCP initialization timeout!\n");
 634		ret = -ETIME;
 635		goto stop;
 636	}
 637	if (ret == -ERESTARTSYS) {
 638		dev_err(dev, "wait SCP interrupted by a signal!\n");
 639		goto stop;
 640	}
 641
 642	clk_disable_unprepare(scp->clk);
 643	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
 644
 645	return 0;
 646
 647stop:
 648	scp->data->scp_reset_assert(scp);
 649	clk_disable_unprepare(scp->clk);
 650	return ret;
 651}
 652
 653static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 654{
 655	int offset;
 656
 657	if (da < scp->sram_size) {
 658		offset = da;
 659		if (offset >= 0 && (offset + len) <= scp->sram_size)
 660			return (void __force *)scp->sram_base + offset;
 661	} else if (scp->dram_size) {
 662		offset = da - scp->dma_addr;
 663		if (offset >= 0 && (offset + len) <= scp->dram_size)
 664			return scp->cpu_addr + offset;
 665	}
 666
 667	return NULL;
 668}
 669
 670static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
 671{
 672	int offset;
 673
 674	if (da >= scp->sram_phys &&
 675	    (da + len) <= scp->sram_phys + scp->sram_size) {
 676		offset = da - scp->sram_phys;
 677		return (void __force *)scp->sram_base + offset;
 678	}
 679
 680	/* optional memory region */
 681	if (scp->cluster->l1tcm_size &&
 682	    da >= scp->cluster->l1tcm_phys &&
 683	    (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
 684		offset = da - scp->cluster->l1tcm_phys;
 685		return (void __force *)scp->cluster->l1tcm_base + offset;
 686	}
 687
 688	/* optional memory region */
 689	if (scp->dram_size &&
 690	    da >= scp->dma_addr &&
 691	    (da + len) <= scp->dma_addr + scp->dram_size) {
 692		offset = da - scp->dma_addr;
 693		return scp->cpu_addr + offset;
 694	}
 695
 696	return NULL;
 697}
 698
 699static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
 700{
 701	struct mtk_scp *scp = rproc->priv;
 702
 703	return scp->data->scp_da_to_va(scp, da, len);
 704}
 705
 706static void mt8183_scp_stop(struct mtk_scp *scp)
 707{
 708	/* Disable SCP watchdog */
 709	writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
 710}
 711
 712static void mt8192_scp_stop(struct mtk_scp *scp)
 713{
 714	/* Disable SRAM clock */
 715	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 716	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 717	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 718	scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
 719	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 720
 721	/* Disable SCP watchdog */
 722	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 723}
 724
 725static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
 726{
 727	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
 728
 729	mutex_lock(&scp_cluster->cluster_lock);
 730
 731	if (scp_cluster->l2tcm_refcnt > 0)
 732		scp_cluster->l2tcm_refcnt -= 1;
 733
 734	if (scp_cluster->l2tcm_refcnt == 0) {
 735		/* Power off L2TCM */
 736		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
 737		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
 738		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
 739		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
 740				   MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
 741	}
 742
 743	mutex_unlock(&scp_cluster->cluster_lock);
 744}
 745
 746static void mt8195_scp_stop(struct mtk_scp *scp)
 747{
 748	mt8195_scp_l2tcm_off(scp);
 749
 750	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 751
 752	/* Disable SCP watchdog */
 753	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
 754}
 755
 756static void mt8195_scp_c1_stop(struct mtk_scp *scp)
 757{
 758	mt8195_scp_l2tcm_off(scp);
 759
 760	/* Power off CPU SRAM */
 761	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 762
 763	/* Disable SCP watchdog */
 764	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 765}
 766
 767static int scp_stop(struct rproc *rproc)
 768{
 769	struct mtk_scp *scp = rproc->priv;
 770	int ret;
 771
 772	ret = clk_prepare_enable(scp->clk);
 773	if (ret) {
 774		dev_err(scp->dev, "failed to enable clocks\n");
 775		return ret;
 776	}
 777
 778	scp->data->scp_reset_assert(scp);
 779	scp->data->scp_stop(scp);
 780	clk_disable_unprepare(scp->clk);
 781
 782	return 0;
 783}
 784
 785static const struct rproc_ops scp_ops = {
 786	.start		= scp_start,
 787	.stop		= scp_stop,
 788	.load		= scp_load,
 789	.da_to_va	= scp_da_to_va,
 790	.parse_fw	= scp_parse_fw,
 791	.sanity_check	= rproc_elf_sanity_check,
 792};
 793
 794/**
 795 * scp_get_device() - get device struct of SCP
 796 *
 797 * @scp:	mtk_scp structure
 798 **/
 799struct device *scp_get_device(struct mtk_scp *scp)
 800{
 801	return scp->dev;
 802}
 803EXPORT_SYMBOL_GPL(scp_get_device);
 804
 805/**
 806 * scp_get_rproc() - get rproc struct of SCP
 807 *
 808 * @scp:	mtk_scp structure
 809 **/
 810struct rproc *scp_get_rproc(struct mtk_scp *scp)
 811{
 812	return scp->rproc;
 813}
 814EXPORT_SYMBOL_GPL(scp_get_rproc);
 815
 816/**
 817 * scp_get_vdec_hw_capa() - get video decoder hardware capability
 818 *
 819 * @scp:	mtk_scp structure
 820 *
 821 * Return: video decoder hardware capability
 822 **/
 823unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
 824{
 825	return scp->run.dec_capability;
 826}
 827EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
 828
 829/**
 830 * scp_get_venc_hw_capa() - get video encoder hardware capability
 831 *
 832 * @scp:	mtk_scp structure
 833 *
 834 * Return: video encoder hardware capability
 835 **/
 836unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
 837{
 838	return scp->run.enc_capability;
 839}
 840EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
 841
 842/**
 843 * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
 844 *
 845 * @scp:	mtk_scp structure
 846 * @mem_addr:	SCP views memory address
 847 *
 848 * Mapping the SCP's SRAM address /
 849 * DMEM (Data Extended Memory) memory address /
 850 * Working buffer memory address to
 851 * kernel virtual address.
 852 *
 853 * Return: Return ERR_PTR(-EINVAL) if mapping failed,
 854 * otherwise the mapped kernel virtual address
 855 **/
 856void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
 857{
 858	void *ptr;
 859
 860	ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
 861	if (!ptr)
 862		return ERR_PTR(-EINVAL);
 863
 864	return ptr;
 865}
 866EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
 867
 868static int scp_map_memory_region(struct mtk_scp *scp)
 869{
 870	int ret;
 871
 872	ret = of_reserved_mem_device_init(scp->dev);
 873
 874	/* reserved memory is optional. */
 875	if (ret == -ENODEV) {
 876		dev_info(scp->dev, "skipping reserved memory initialization.");
 877		return 0;
 878	}
 879
 880	if (ret) {
 881		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
 882		return -ENOMEM;
 883	}
 884
 885	/* Reserved SCP code size */
 886	scp->dram_size = MAX_CODE_SIZE;
 887	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
 888					   &scp->dma_addr, GFP_KERNEL);
 889	if (!scp->cpu_addr)
 890		return -ENOMEM;
 891
 892	return 0;
 893}
 894
 895static void scp_unmap_memory_region(struct mtk_scp *scp)
 896{
 897	if (scp->dram_size == 0)
 898		return;
 899
 900	dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
 901			  scp->dma_addr);
 902	of_reserved_mem_device_release(scp->dev);
 903}
 904
 905static int scp_register_ipi(struct platform_device *pdev, u32 id,
 906			    ipi_handler_t handler, void *priv)
 907{
 908	struct mtk_scp *scp = platform_get_drvdata(pdev);
 909
 910	return scp_ipi_register(scp, id, handler, priv);
 911}
 912
 913static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
 914{
 915	struct mtk_scp *scp = platform_get_drvdata(pdev);
 916
 917	scp_ipi_unregister(scp, id);
 918}
 919
 920static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
 921			unsigned int len, unsigned int wait)
 922{
 923	struct mtk_scp *scp = platform_get_drvdata(pdev);
 924
 925	return scp_ipi_send(scp, id, buf, len, wait);
 926}
 927
 928static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
 929	.send_ipi = scp_send_ipi,
 930	.register_ipi = scp_register_ipi,
 931	.unregister_ipi = scp_unregister_ipi,
 932	.ns_ipi_id = SCP_IPI_NS_SERVICE,
 933};
 934
 935static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
 936{
 937	scp->rpmsg_subdev =
 938		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
 939					      &mtk_scp_rpmsg_info);
 940	if (scp->rpmsg_subdev)
 941		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
 942}
 943
 944static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
 945{
 946	if (scp->rpmsg_subdev) {
 947		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
 948		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
 949		scp->rpmsg_subdev = NULL;
 950	}
 951}
 952
 953static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
 954				      struct mtk_scp_of_cluster *scp_cluster,
 955				      const struct mtk_scp_of_data *of_data)
 956{
 957	struct device *dev = &pdev->dev;
 958	struct device_node *np = dev->of_node;
 959	struct mtk_scp *scp;
 960	struct rproc *rproc;
 961	struct resource *res;
 962	const char *fw_name = "scp.img";
 963	int ret, i;
 964
 965	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
 966	if (ret < 0 && ret != -EINVAL)
 967		return ERR_PTR(ret);
 968
 969	rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
 970	if (!rproc) {
 971		dev_err(dev, "unable to allocate remoteproc\n");
 972		return ERR_PTR(-ENOMEM);
 973	}
 974
 975	scp = rproc->priv;
 976	scp->rproc = rproc;
 977	scp->dev = dev;
 978	scp->data = of_data;
 979	scp->cluster = scp_cluster;
 980	platform_set_drvdata(pdev, scp);
 981
 982	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
 983	scp->sram_base = devm_ioremap_resource(dev, res);
 984	if (IS_ERR(scp->sram_base)) {
 985		dev_err(dev, "Failed to parse and map sram memory\n");
 986		return ERR_CAST(scp->sram_base);
 987	}
 988
 989	scp->sram_size = resource_size(res);
 990	scp->sram_phys = res->start;
 991
 992	ret = scp->data->scp_clk_get(scp);
 993	if (ret)
 994		return ERR_PTR(ret);
 995
 996	ret = scp_map_memory_region(scp);
 997	if (ret)
 998		return ERR_PTR(ret);
 999
1000	mutex_init(&scp->send_lock);
1001	for (i = 0; i < SCP_IPI_MAX; i++)
1002		mutex_init(&scp->ipi_desc[i].lock);
1003
1004	/* register SCP initialization IPI */
1005	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
1006	if (ret) {
1007		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
1008		goto release_dev_mem;
1009	}
1010
1011	init_waitqueue_head(&scp->run.wq);
1012	init_waitqueue_head(&scp->ack_wq);
1013
1014	scp_add_rpmsg_subdev(scp);
1015
1016	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
1017					scp_irq_handler, IRQF_ONESHOT,
1018					pdev->name, scp);
1019
1020	if (ret) {
1021		dev_err(dev, "failed to request irq\n");
1022		goto remove_subdev;
1023	}
1024
1025	return scp;
1026
1027remove_subdev:
1028	scp_remove_rpmsg_subdev(scp);
1029	scp_ipi_unregister(scp, SCP_IPI_INIT);
1030release_dev_mem:
1031	scp_unmap_memory_region(scp);
1032	for (i = 0; i < SCP_IPI_MAX; i++)
1033		mutex_destroy(&scp->ipi_desc[i].lock);
1034	mutex_destroy(&scp->send_lock);
1035
1036	return ERR_PTR(ret);
1037}
1038
1039static void scp_free(struct mtk_scp *scp)
1040{
1041	int i;
1042
1043	scp_remove_rpmsg_subdev(scp);
1044	scp_ipi_unregister(scp, SCP_IPI_INIT);
1045	scp_unmap_memory_region(scp);
1046	for (i = 0; i < SCP_IPI_MAX; i++)
1047		mutex_destroy(&scp->ipi_desc[i].lock);
1048	mutex_destroy(&scp->send_lock);
1049}
1050
1051static int scp_add_single_core(struct platform_device *pdev,
1052			       struct mtk_scp_of_cluster *scp_cluster)
1053{
1054	struct device *dev = &pdev->dev;
1055	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1056	struct mtk_scp *scp;
1057	int ret;
1058
1059	scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
1060	if (IS_ERR(scp))
1061		return PTR_ERR(scp);
1062
1063	ret = rproc_add(scp->rproc);
1064	if (ret) {
1065		dev_err(dev, "Failed to add rproc\n");
1066		scp_free(scp);
1067		return ret;
1068	}
1069
1070	list_add_tail(&scp->elem, scp_list);
1071
1072	return 0;
1073}
1074
1075static int scp_add_multi_core(struct platform_device *pdev,
1076			      struct mtk_scp_of_cluster *scp_cluster)
1077{
1078	struct device *dev = &pdev->dev;
1079	struct device_node *np = dev_of_node(dev);
1080	struct platform_device *cpdev;
1081	struct device_node *child;
1082	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1083	const struct mtk_scp_of_data **cluster_of_data;
1084	struct mtk_scp *scp, *temp;
1085	int core_id = 0;
1086	int ret;
1087
1088	cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
1089
1090	for_each_available_child_of_node(np, child) {
1091		if (!cluster_of_data[core_id]) {
1092			ret = -EINVAL;
1093			dev_err(dev, "Not support core %d\n", core_id);
1094			of_node_put(child);
1095			goto init_fail;
1096		}
1097
1098		cpdev = of_find_device_by_node(child);
1099		if (!cpdev) {
1100			ret = -ENODEV;
1101			dev_err(dev, "Not found platform device for core %d\n", core_id);
1102			of_node_put(child);
1103			goto init_fail;
1104		}
1105
1106		scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
1107		put_device(&cpdev->dev);
1108		if (IS_ERR(scp)) {
1109			ret = PTR_ERR(scp);
1110			dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
1111			of_node_put(child);
1112			goto init_fail;
1113		}
1114
1115		ret = rproc_add(scp->rproc);
1116		if (ret) {
1117			dev_err(dev, "Failed to add rproc of core %d\n", core_id);
1118			of_node_put(child);
1119			scp_free(scp);
1120			goto init_fail;
1121		}
1122
1123		list_add_tail(&scp->elem, scp_list);
1124		core_id++;
1125	}
1126
1127	/*
1128	 * Here we are setting the platform device for @pdev to the last @scp that was
1129	 * created, which is needed because (1) scp_rproc_init() is calling
1130	 * platform_set_drvdata() on the child platform devices and (2) we need a handle to
1131	 * the cluster list in scp_remove().
1132	 */
1133	platform_set_drvdata(pdev, scp);
1134
1135	return 0;
1136
1137init_fail:
1138	list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
1139		list_del(&scp->elem);
1140		rproc_del(scp->rproc);
1141		scp_free(scp);
1142	}
1143
1144	return ret;
1145}
1146
1147static bool scp_is_single_core(struct platform_device *pdev)
1148{
1149	struct device *dev = &pdev->dev;
1150	struct device_node *np = dev_of_node(dev);
1151	struct device_node *child;
1152	int num_cores = 0;
1153
1154	for_each_child_of_node(np, child)
1155		if (of_device_is_compatible(child, "mediatek,scp-core"))
1156			num_cores++;
1157
1158	return num_cores < 2;
1159}
1160
1161static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
1162{
1163	int ret;
1164
1165	if (scp_is_single_core(pdev))
1166		ret = scp_add_single_core(pdev, scp_cluster);
1167	else
1168		ret = scp_add_multi_core(pdev, scp_cluster);
1169
1170	return ret;
1171}
1172
1173static int scp_probe(struct platform_device *pdev)
1174{
1175	struct device *dev = &pdev->dev;
1176	struct mtk_scp_of_cluster *scp_cluster;
1177	struct resource *res;
1178	int ret;
1179
1180	scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
1181	if (!scp_cluster)
1182		return -ENOMEM;
1183
1184	scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
1185	if (IS_ERR(scp_cluster->reg_base))
1186		return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
1187				     "Failed to parse and map cfg memory\n");
1188
1189	/* l1tcm is an optional memory region */
1190	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
1191	scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
1192	if (IS_ERR(scp_cluster->l1tcm_base)) {
1193		ret = PTR_ERR(scp_cluster->l1tcm_base);
1194		if (ret != -EINVAL)
1195			return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
1196
1197		scp_cluster->l1tcm_base = NULL;
1198	} else {
1199		scp_cluster->l1tcm_size = resource_size(res);
1200		scp_cluster->l1tcm_phys = res->start;
1201	}
1202
1203	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
1204	mutex_init(&scp_cluster->cluster_lock);
1205
1206	ret = devm_of_platform_populate(dev);
1207	if (ret)
1208		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
1209
1210	ret = scp_cluster_init(pdev, scp_cluster);
1211	if (ret)
1212		return ret;
1213
1214	return 0;
1215}
1216
1217static void scp_remove(struct platform_device *pdev)
1218{
1219	struct mtk_scp *scp = platform_get_drvdata(pdev);
1220	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
1221	struct mtk_scp *temp;
1222
1223	list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
1224		list_del(&scp->elem);
1225		rproc_del(scp->rproc);
1226		scp_free(scp);
1227	}
1228	mutex_destroy(&scp_cluster->cluster_lock);
1229}
1230
1231static const struct mtk_scp_of_data mt8183_of_data = {
1232	.scp_clk_get = mt8183_scp_clk_get,
1233	.scp_before_load = mt8183_scp_before_load,
1234	.scp_irq_handler = mt8183_scp_irq_handler,
1235	.scp_reset_assert = mt8183_scp_reset_assert,
1236	.scp_reset_deassert = mt8183_scp_reset_deassert,
1237	.scp_stop = mt8183_scp_stop,
1238	.scp_da_to_va = mt8183_scp_da_to_va,
1239	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1240	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1241	.ipi_buf_offset = 0x7bdb0,
1242};
1243
1244static const struct mtk_scp_of_data mt8186_of_data = {
1245	.scp_clk_get = mt8195_scp_clk_get,
1246	.scp_before_load = mt8186_scp_before_load,
1247	.scp_irq_handler = mt8183_scp_irq_handler,
1248	.scp_reset_assert = mt8183_scp_reset_assert,
1249	.scp_reset_deassert = mt8183_scp_reset_deassert,
1250	.scp_stop = mt8183_scp_stop,
1251	.scp_da_to_va = mt8183_scp_da_to_va,
1252	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1253	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1254	.ipi_buf_offset = 0x3bdb0,
1255};
1256
1257static const struct mtk_scp_of_data mt8188_of_data = {
1258	.scp_clk_get = mt8195_scp_clk_get,
1259	.scp_before_load = mt8192_scp_before_load,
1260	.scp_irq_handler = mt8192_scp_irq_handler,
1261	.scp_reset_assert = mt8192_scp_reset_assert,
1262	.scp_reset_deassert = mt8192_scp_reset_deassert,
1263	.scp_stop = mt8192_scp_stop,
1264	.scp_da_to_va = mt8192_scp_da_to_va,
1265	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1266	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1267};
1268
1269static const struct mtk_scp_of_data mt8192_of_data = {
1270	.scp_clk_get = mt8192_scp_clk_get,
1271	.scp_before_load = mt8192_scp_before_load,
1272	.scp_irq_handler = mt8192_scp_irq_handler,
1273	.scp_reset_assert = mt8192_scp_reset_assert,
1274	.scp_reset_deassert = mt8192_scp_reset_deassert,
1275	.scp_stop = mt8192_scp_stop,
1276	.scp_da_to_va = mt8192_scp_da_to_va,
1277	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1278	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1279};
1280
1281static const struct mtk_scp_of_data mt8195_of_data = {
1282	.scp_clk_get = mt8195_scp_clk_get,
1283	.scp_before_load = mt8195_scp_before_load,
1284	.scp_irq_handler = mt8195_scp_irq_handler,
1285	.scp_reset_assert = mt8192_scp_reset_assert,
1286	.scp_reset_deassert = mt8192_scp_reset_deassert,
1287	.scp_stop = mt8195_scp_stop,
1288	.scp_da_to_va = mt8192_scp_da_to_va,
1289	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1290	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1291};
1292
1293static const struct mtk_scp_of_data mt8195_of_data_c1 = {
1294	.scp_clk_get = mt8195_scp_clk_get,
1295	.scp_before_load = mt8195_scp_c1_before_load,
1296	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1297	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1298	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1299	.scp_stop = mt8195_scp_c1_stop,
1300	.scp_da_to_va = mt8192_scp_da_to_va,
1301	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1302	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1303};
1304
1305static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
1306	&mt8195_of_data,
1307	&mt8195_of_data_c1,
1308	NULL
1309};
1310
1311static const struct of_device_id mtk_scp_of_match[] = {
1312	{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
1313	{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
1314	{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
1315	{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
1316	{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
1317	{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
1318	{},
1319};
1320MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
1321
1322static struct platform_driver mtk_scp_driver = {
1323	.probe = scp_probe,
1324	.remove_new = scp_remove,
1325	.driver = {
1326		.name = "mtk-scp",
1327		.of_match_table = mtk_scp_of_match,
1328	},
1329};
1330
1331module_platform_driver(mtk_scp_driver);
1332
1333MODULE_LICENSE("GPL v2");
1334MODULE_DESCRIPTION("MediaTek SCP control driver");