Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MediaTek PCIe host controller driver.
   4 *
   5 * Copyright (c) 2017 MediaTek Inc.
   6 * Author: Ryder Lee <ryder.lee@mediatek.com>
   7 *	   Honghui Zhang <honghui.zhang@mediatek.com>
   8 */
   9
  10#include <linux/clk.h>
  11#include <linux/delay.h>
  12#include <linux/iopoll.h>
  13#include <linux/irq.h>
  14#include <linux/irqchip/chained_irq.h>
  15#include <linux/irqdomain.h>
  16#include <linux/kernel.h>
  17#include <linux/msi.h>
  18#include <linux/module.h>
  19#include <linux/of_address.h>
  20#include <linux/of_pci.h>
  21#include <linux/of_platform.h>
  22#include <linux/pci.h>
  23#include <linux/phy/phy.h>
  24#include <linux/platform_device.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/reset.h>
  27
  28#include "../pci.h"
  29
  30/* PCIe shared registers */
  31#define PCIE_SYS_CFG		0x00
  32#define PCIE_INT_ENABLE		0x0c
  33#define PCIE_CFG_ADDR		0x20
  34#define PCIE_CFG_DATA		0x24
  35
  36/* PCIe per port registers */
  37#define PCIE_BAR0_SETUP		0x10
  38#define PCIE_CLASS		0x34
  39#define PCIE_LINK_STATUS	0x50
  40
  41#define PCIE_PORT_INT_EN(x)	BIT(20 + (x))
  42#define PCIE_PORT_PERST(x)	BIT(1 + (x))
  43#define PCIE_PORT_LINKUP	BIT(0)
  44#define PCIE_BAR_MAP_MAX	GENMASK(31, 16)
  45
  46#define PCIE_BAR_ENABLE		BIT(0)
  47#define PCIE_REVISION_ID	BIT(0)
  48#define PCIE_CLASS_CODE		(0x60400 << 8)
  49#define PCIE_CONF_REG(regn)	(((regn) & GENMASK(7, 2)) | \
  50				((((regn) >> 8) & GENMASK(3, 0)) << 24))
  51#define PCIE_CONF_FUN(fun)	(((fun) << 8) & GENMASK(10, 8))
  52#define PCIE_CONF_DEV(dev)	(((dev) << 11) & GENMASK(15, 11))
  53#define PCIE_CONF_BUS(bus)	(((bus) << 16) & GENMASK(23, 16))
  54#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  55	(PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  56	 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  57
  58/* MediaTek specific configuration registers */
  59#define PCIE_FTS_NUM		0x70c
  60#define PCIE_FTS_NUM_MASK	GENMASK(15, 8)
  61#define PCIE_FTS_NUM_L0(x)	((x) & 0xff << 8)
  62
  63#define PCIE_FC_CREDIT		0x73c
  64#define PCIE_FC_CREDIT_MASK	(GENMASK(31, 31) | GENMASK(28, 16))
  65#define PCIE_FC_CREDIT_VAL(x)	((x) << 16)
  66
  67/* PCIe V2 share registers */
  68#define PCIE_SYS_CFG_V2		0x0
  69#define PCIE_CSR_LTSSM_EN(x)	BIT(0 + (x) * 8)
  70#define PCIE_CSR_ASPM_L1_EN(x)	BIT(1 + (x) * 8)
  71
  72/* PCIe V2 per-port registers */
  73#define PCIE_MSI_VECTOR		0x0c0
  74
  75#define PCIE_CONF_VEND_ID	0x100
  76#define PCIE_CONF_DEVICE_ID	0x102
  77#define PCIE_CONF_CLASS_ID	0x106
  78
  79#define PCIE_INT_MASK		0x420
  80#define INTX_MASK		GENMASK(19, 16)
  81#define INTX_SHIFT		16
  82#define PCIE_INT_STATUS		0x424
  83#define MSI_STATUS		BIT(23)
  84#define PCIE_IMSI_STATUS	0x42c
  85#define PCIE_IMSI_ADDR		0x430
  86#define MSI_MASK		BIT(23)
  87#define MTK_MSI_IRQS_NUM	32
  88
  89#define PCIE_AHB_TRANS_BASE0_L	0x438
  90#define PCIE_AHB_TRANS_BASE0_H	0x43c
  91#define AHB2PCIE_SIZE(x)	((x) & GENMASK(4, 0))
  92#define PCIE_AXI_WINDOW0	0x448
  93#define WIN_ENABLE		BIT(7)
  94/*
  95 * Define PCIe to AHB window size as 2^33 to support max 8GB address space
  96 * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
  97 * start from 0x40000000).
  98 */
  99#define PCIE2AHB_SIZE	0x21
 100
 101/* PCIe V2 configuration transaction header */
 102#define PCIE_CFG_HEADER0	0x460
 103#define PCIE_CFG_HEADER1	0x464
 104#define PCIE_CFG_HEADER2	0x468
 105#define PCIE_CFG_WDATA		0x470
 106#define PCIE_APP_TLP_REQ	0x488
 107#define PCIE_CFG_RDATA		0x48c
 108#define APP_CFG_REQ		BIT(0)
 109#define APP_CPL_STATUS		GENMASK(7, 5)
 110
 111#define CFG_WRRD_TYPE_0		4
 112#define CFG_WR_FMT		2
 113#define CFG_RD_FMT		0
 114
 115#define CFG_DW0_LENGTH(length)	((length) & GENMASK(9, 0))
 116#define CFG_DW0_TYPE(type)	(((type) << 24) & GENMASK(28, 24))
 117#define CFG_DW0_FMT(fmt)	(((fmt) << 29) & GENMASK(31, 29))
 118#define CFG_DW2_REGN(regn)	((regn) & GENMASK(11, 2))
 119#define CFG_DW2_FUN(fun)	(((fun) << 16) & GENMASK(18, 16))
 120#define CFG_DW2_DEV(dev)	(((dev) << 19) & GENMASK(23, 19))
 121#define CFG_DW2_BUS(bus)	(((bus) << 24) & GENMASK(31, 24))
 122#define CFG_HEADER_DW0(type, fmt) \
 123	(CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
 124#define CFG_HEADER_DW1(where, size) \
 125	(GENMASK(((size) - 1), 0) << ((where) & 0x3))
 126#define CFG_HEADER_DW2(regn, fun, dev, bus) \
 127	(CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
 128	CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
 129
 130#define PCIE_RST_CTRL		0x510
 131#define PCIE_PHY_RSTB		BIT(0)
 132#define PCIE_PIPE_SRSTB		BIT(1)
 133#define PCIE_MAC_SRSTB		BIT(2)
 134#define PCIE_CRSTB		BIT(3)
 135#define PCIE_PERSTB		BIT(8)
 136#define PCIE_LINKDOWN_RST_EN	GENMASK(15, 13)
 137#define PCIE_LINK_STATUS_V2	0x804
 138#define PCIE_PORT_LINKUP_V2	BIT(10)
 139
 140struct mtk_pcie_port;
 141
 142/**
 143 * struct mtk_pcie_soc - differentiate between host generations
 144 * @need_fix_class_id: whether this host's class ID needed to be fixed or not
 145 * @need_fix_device_id: whether this host's device ID needed to be fixed or not
 
 146 * @device_id: device ID which this host need to be fixed
 147 * @ops: pointer to configuration access functions
 148 * @startup: pointer to controller setting functions
 149 * @setup_irq: pointer to initialize IRQ functions
 150 */
 151struct mtk_pcie_soc {
 152	bool need_fix_class_id;
 153	bool need_fix_device_id;
 
 154	unsigned int device_id;
 155	struct pci_ops *ops;
 156	int (*startup)(struct mtk_pcie_port *port);
 157	int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
 158};
 159
 160/**
 161 * struct mtk_pcie_port - PCIe port information
 162 * @base: IO mapped register base
 163 * @list: port list
 164 * @pcie: pointer to PCIe host info
 165 * @reset: pointer to port reset control
 166 * @sys_ck: pointer to transaction/data link layer clock
 167 * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
 168 *          and RC initiated MMIO access
 169 * @axi_ck: pointer to application layer MMIO channel operating clock
 170 * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
 171 *          when pcie_mac_ck/pcie_pipe_ck is turned off
 172 * @obff_ck: pointer to OBFF functional block operating clock
 173 * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
 174 * @phy: pointer to PHY control block
 175 * @slot: port slot
 176 * @irq: GIC irq
 177 * @irq_domain: legacy INTx IRQ domain
 178 * @inner_domain: inner IRQ domain
 179 * @msi_domain: MSI IRQ domain
 180 * @lock: protect the msi_irq_in_use bitmap
 181 * @msi_irq_in_use: bit map for assigned MSI IRQ
 182 */
 183struct mtk_pcie_port {
 184	void __iomem *base;
 185	struct list_head list;
 186	struct mtk_pcie *pcie;
 187	struct reset_control *reset;
 188	struct clk *sys_ck;
 189	struct clk *ahb_ck;
 190	struct clk *axi_ck;
 191	struct clk *aux_ck;
 192	struct clk *obff_ck;
 193	struct clk *pipe_ck;
 194	struct phy *phy;
 195	u32 slot;
 196	int irq;
 197	struct irq_domain *irq_domain;
 198	struct irq_domain *inner_domain;
 199	struct irq_domain *msi_domain;
 200	struct mutex lock;
 201	DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
 202};
 203
 204/**
 205 * struct mtk_pcie - PCIe host information
 206 * @dev: pointer to PCIe device
 207 * @base: IO mapped register base
 208 * @free_ck: free-run reference clock
 209 * @mem: non-prefetchable memory resource
 210 * @ports: pointer to PCIe port information
 211 * @soc: pointer to SoC-dependent operations
 212 */
 213struct mtk_pcie {
 214	struct device *dev;
 215	void __iomem *base;
 216	struct clk *free_ck;
 217
 218	struct list_head ports;
 219	const struct mtk_pcie_soc *soc;
 220};
 221
 222static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
 223{
 224	struct device *dev = pcie->dev;
 225
 226	clk_disable_unprepare(pcie->free_ck);
 227
 228	pm_runtime_put_sync(dev);
 229	pm_runtime_disable(dev);
 230}
 231
 232static void mtk_pcie_port_free(struct mtk_pcie_port *port)
 233{
 234	struct mtk_pcie *pcie = port->pcie;
 235	struct device *dev = pcie->dev;
 236
 237	devm_iounmap(dev, port->base);
 238	list_del(&port->list);
 239	devm_kfree(dev, port);
 240}
 241
 242static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
 243{
 244	struct mtk_pcie_port *port, *tmp;
 245
 246	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
 247		phy_power_off(port->phy);
 248		phy_exit(port->phy);
 249		clk_disable_unprepare(port->pipe_ck);
 250		clk_disable_unprepare(port->obff_ck);
 251		clk_disable_unprepare(port->axi_ck);
 252		clk_disable_unprepare(port->aux_ck);
 253		clk_disable_unprepare(port->ahb_ck);
 254		clk_disable_unprepare(port->sys_ck);
 255		mtk_pcie_port_free(port);
 256	}
 257
 258	mtk_pcie_subsys_powerdown(pcie);
 259}
 260
 261static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
 262{
 263	u32 val;
 264	int err;
 265
 266	err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
 267					!(val & APP_CFG_REQ), 10,
 268					100 * USEC_PER_MSEC);
 269	if (err)
 270		return PCIBIOS_SET_FAILED;
 271
 272	if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
 273		return PCIBIOS_SET_FAILED;
 274
 275	return PCIBIOS_SUCCESSFUL;
 276}
 277
 278static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
 279			      int where, int size, u32 *val)
 280{
 281	u32 tmp;
 282
 283	/* Write PCIe configuration transaction header for Cfgrd */
 284	writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
 285	       port->base + PCIE_CFG_HEADER0);
 286	writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
 287	writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
 288	       port->base + PCIE_CFG_HEADER2);
 289
 290	/* Trigger h/w to transmit Cfgrd TLP */
 291	tmp = readl(port->base + PCIE_APP_TLP_REQ);
 292	tmp |= APP_CFG_REQ;
 293	writel(tmp, port->base + PCIE_APP_TLP_REQ);
 294
 295	/* Check completion status */
 296	if (mtk_pcie_check_cfg_cpld(port))
 297		return PCIBIOS_SET_FAILED;
 298
 299	/* Read cpld payload of Cfgrd */
 300	*val = readl(port->base + PCIE_CFG_RDATA);
 301
 302	if (size == 1)
 303		*val = (*val >> (8 * (where & 3))) & 0xff;
 304	else if (size == 2)
 305		*val = (*val >> (8 * (where & 3))) & 0xffff;
 306
 307	return PCIBIOS_SUCCESSFUL;
 308}
 309
 310static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
 311			      int where, int size, u32 val)
 312{
 313	/* Write PCIe configuration transaction header for Cfgwr */
 314	writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
 315	       port->base + PCIE_CFG_HEADER0);
 316	writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
 317	writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
 318	       port->base + PCIE_CFG_HEADER2);
 319
 320	/* Write Cfgwr data */
 321	val = val << 8 * (where & 3);
 322	writel(val, port->base + PCIE_CFG_WDATA);
 323
 324	/* Trigger h/w to transmit Cfgwr TLP */
 325	val = readl(port->base + PCIE_APP_TLP_REQ);
 326	val |= APP_CFG_REQ;
 327	writel(val, port->base + PCIE_APP_TLP_REQ);
 328
 329	/* Check completion status */
 330	return mtk_pcie_check_cfg_cpld(port);
 331}
 332
 333static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
 334						unsigned int devfn)
 335{
 336	struct mtk_pcie *pcie = bus->sysdata;
 337	struct mtk_pcie_port *port;
 338	struct pci_dev *dev = NULL;
 339
 340	/*
 341	 * Walk the bus hierarchy to get the devfn value
 342	 * of the port in the root bus.
 343	 */
 344	while (bus && bus->number) {
 345		dev = bus->self;
 346		bus = dev->bus;
 347		devfn = dev->devfn;
 348	}
 349
 350	list_for_each_entry(port, &pcie->ports, list)
 351		if (port->slot == PCI_SLOT(devfn))
 352			return port;
 353
 354	return NULL;
 355}
 356
 357static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 358				int where, int size, u32 *val)
 359{
 360	struct mtk_pcie_port *port;
 361	u32 bn = bus->number;
 362	int ret;
 363
 364	port = mtk_pcie_find_port(bus, devfn);
 365	if (!port) {
 366		*val = ~0;
 367		return PCIBIOS_DEVICE_NOT_FOUND;
 368	}
 369
 370	ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
 371	if (ret)
 372		*val = ~0;
 373
 374	return ret;
 375}
 376
 377static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 378				 int where, int size, u32 val)
 379{
 380	struct mtk_pcie_port *port;
 381	u32 bn = bus->number;
 382
 383	port = mtk_pcie_find_port(bus, devfn);
 384	if (!port)
 385		return PCIBIOS_DEVICE_NOT_FOUND;
 386
 387	return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
 388}
 389
 390static struct pci_ops mtk_pcie_ops_v2 = {
 391	.read  = mtk_pcie_config_read,
 392	.write = mtk_pcie_config_write,
 393};
 394
 395static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 396{
 397	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 398	phys_addr_t addr;
 399
 400	/* MT2712/MT7622 only support 32-bit MSI addresses */
 401	addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
 402	msg->address_hi = 0;
 403	msg->address_lo = lower_32_bits(addr);
 404
 405	msg->data = data->hwirq;
 406
 407	dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
 408		(int)data->hwirq, msg->address_hi, msg->address_lo);
 409}
 410
 411static int mtk_msi_set_affinity(struct irq_data *irq_data,
 412				const struct cpumask *mask, bool force)
 413{
 414	 return -EINVAL;
 415}
 416
 417static void mtk_msi_ack_irq(struct irq_data *data)
 418{
 419	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 420	u32 hwirq = data->hwirq;
 421
 422	writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
 423}
 424
 425static struct irq_chip mtk_msi_bottom_irq_chip = {
 426	.name			= "MTK MSI",
 427	.irq_compose_msi_msg	= mtk_compose_msi_msg,
 428	.irq_set_affinity	= mtk_msi_set_affinity,
 429	.irq_ack		= mtk_msi_ack_irq,
 430};
 431
 432static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 433				     unsigned int nr_irqs, void *args)
 434{
 435	struct mtk_pcie_port *port = domain->host_data;
 436	unsigned long bit;
 437
 438	WARN_ON(nr_irqs != 1);
 439	mutex_lock(&port->lock);
 440
 441	bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
 442	if (bit >= MTK_MSI_IRQS_NUM) {
 443		mutex_unlock(&port->lock);
 444		return -ENOSPC;
 445	}
 446
 447	__set_bit(bit, port->msi_irq_in_use);
 448
 449	mutex_unlock(&port->lock);
 450
 451	irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
 452			    domain->host_data, handle_edge_irq,
 453			    NULL, NULL);
 454
 455	return 0;
 456}
 457
 458static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
 459				     unsigned int virq, unsigned int nr_irqs)
 460{
 461	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 462	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
 463
 464	mutex_lock(&port->lock);
 465
 466	if (!test_bit(d->hwirq, port->msi_irq_in_use))
 467		dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
 468			d->hwirq);
 469	else
 470		__clear_bit(d->hwirq, port->msi_irq_in_use);
 471
 472	mutex_unlock(&port->lock);
 473
 474	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 475}
 476
 477static const struct irq_domain_ops msi_domain_ops = {
 478	.alloc	= mtk_pcie_irq_domain_alloc,
 479	.free	= mtk_pcie_irq_domain_free,
 480};
 481
 482static struct irq_chip mtk_msi_irq_chip = {
 483	.name		= "MTK PCIe MSI",
 484	.irq_ack	= irq_chip_ack_parent,
 485	.irq_mask	= pci_msi_mask_irq,
 486	.irq_unmask	= pci_msi_unmask_irq,
 487};
 488
 489static struct msi_domain_info mtk_msi_domain_info = {
 490	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 491		   MSI_FLAG_PCI_MSIX),
 492	.chip	= &mtk_msi_irq_chip,
 493};
 494
 495static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
 496{
 497	struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
 498
 499	mutex_init(&port->lock);
 500
 501	port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
 502						      &msi_domain_ops, port);
 503	if (!port->inner_domain) {
 504		dev_err(port->pcie->dev, "failed to create IRQ domain\n");
 505		return -ENOMEM;
 506	}
 507
 508	port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
 509						     port->inner_domain);
 510	if (!port->msi_domain) {
 511		dev_err(port->pcie->dev, "failed to create MSI domain\n");
 512		irq_domain_remove(port->inner_domain);
 513		return -ENOMEM;
 514	}
 515
 516	return 0;
 517}
 518
 519static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
 520{
 521	u32 val;
 522	phys_addr_t msg_addr;
 523
 524	msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
 525	val = lower_32_bits(msg_addr);
 526	writel(val, port->base + PCIE_IMSI_ADDR);
 527
 528	val = readl(port->base + PCIE_INT_MASK);
 529	val &= ~MSI_MASK;
 530	writel(val, port->base + PCIE_INT_MASK);
 531}
 532
 533static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
 534{
 535	struct mtk_pcie_port *port, *tmp;
 536
 537	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
 538		irq_set_chained_handler_and_data(port->irq, NULL, NULL);
 539
 540		if (port->irq_domain)
 541			irq_domain_remove(port->irq_domain);
 542
 543		if (IS_ENABLED(CONFIG_PCI_MSI)) {
 544			if (port->msi_domain)
 545				irq_domain_remove(port->msi_domain);
 546			if (port->inner_domain)
 547				irq_domain_remove(port->inner_domain);
 548		}
 549
 550		irq_dispose_mapping(port->irq);
 551	}
 552}
 553
 554static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 555			     irq_hw_number_t hwirq)
 556{
 557	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 558	irq_set_chip_data(irq, domain->host_data);
 559
 560	return 0;
 561}
 562
 563static const struct irq_domain_ops intx_domain_ops = {
 564	.map = mtk_pcie_intx_map,
 565};
 566
 567static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
 568				    struct device_node *node)
 569{
 570	struct device *dev = port->pcie->dev;
 571	struct device_node *pcie_intc_node;
 572	int ret;
 573
 574	/* Setup INTx */
 575	pcie_intc_node = of_get_next_child(node, NULL);
 576	if (!pcie_intc_node) {
 577		dev_err(dev, "no PCIe Intc node found\n");
 578		return -ENODEV;
 579	}
 580
 581	port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
 582						 &intx_domain_ops, port);
 583	of_node_put(pcie_intc_node);
 584	if (!port->irq_domain) {
 585		dev_err(dev, "failed to get INTx IRQ domain\n");
 586		return -ENODEV;
 587	}
 588
 589	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 590		ret = mtk_pcie_allocate_msi_domains(port);
 591		if (ret)
 592			return ret;
 593	}
 594
 595	return 0;
 596}
 597
 598static void mtk_pcie_intr_handler(struct irq_desc *desc)
 599{
 600	struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
 601	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 602	unsigned long status;
 603	u32 virq;
 604	u32 bit = INTX_SHIFT;
 605
 606	chained_irq_enter(irqchip, desc);
 607
 608	status = readl(port->base + PCIE_INT_STATUS);
 609	if (status & INTX_MASK) {
 610		for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
 611			/* Clear the INTx */
 612			writel(1 << bit, port->base + PCIE_INT_STATUS);
 613			virq = irq_find_mapping(port->irq_domain,
 614						bit - INTX_SHIFT);
 615			generic_handle_irq(virq);
 616		}
 617	}
 618
 619	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 620		if (status & MSI_STATUS){
 621			unsigned long imsi_status;
 622
 623			while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
 624				for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
 625					virq = irq_find_mapping(port->inner_domain, bit);
 626					generic_handle_irq(virq);
 627				}
 628			}
 629			/* Clear MSI interrupt status */
 630			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
 631		}
 632	}
 633
 634	chained_irq_exit(irqchip, desc);
 635}
 636
 637static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
 638			      struct device_node *node)
 639{
 640	struct mtk_pcie *pcie = port->pcie;
 641	struct device *dev = pcie->dev;
 642	struct platform_device *pdev = to_platform_device(dev);
 643	int err;
 644
 645	err = mtk_pcie_init_irq_domain(port, node);
 646	if (err) {
 647		dev_err(dev, "failed to init PCIe IRQ domain\n");
 648		return err;
 649	}
 650
 651	port->irq = platform_get_irq(pdev, port->slot);
 652	if (port->irq < 0)
 653		return port->irq;
 654
 655	irq_set_chained_handler_and_data(port->irq,
 656					 mtk_pcie_intr_handler, port);
 657
 658	return 0;
 659}
 660
 661static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
 662{
 663	struct mtk_pcie *pcie = port->pcie;
 664	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 665	struct resource *mem = NULL;
 666	struct resource_entry *entry;
 667	const struct mtk_pcie_soc *soc = port->pcie->soc;
 668	u32 val;
 669	int err;
 670
 671	entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
 672	if (entry)
 673		mem = entry->res;
 674	if (!mem)
 675		return -EINVAL;
 676
 677	/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
 678	if (pcie->base) {
 679		val = readl(pcie->base + PCIE_SYS_CFG_V2);
 680		val |= PCIE_CSR_LTSSM_EN(port->slot) |
 681		       PCIE_CSR_ASPM_L1_EN(port->slot);
 682		writel(val, pcie->base + PCIE_SYS_CFG_V2);
 683	}
 684
 685	/* Assert all reset signals */
 686	writel(0, port->base + PCIE_RST_CTRL);
 687
 688	/*
 689	 * Enable PCIe link down reset, if link status changed from link up to
 690	 * link down, this will reset MAC control registers and configuration
 691	 * space.
 692	 */
 693	writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
 694
 695	/* De-assert PHY, PE, PIPE, MAC and configuration reset	*/
 696	val = readl(port->base + PCIE_RST_CTRL);
 697	val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
 698	       PCIE_MAC_SRSTB | PCIE_CRSTB;
 699	writel(val, port->base + PCIE_RST_CTRL);
 700
 701	/* Set up vendor ID and class code */
 702	if (soc->need_fix_class_id) {
 703		val = PCI_VENDOR_ID_MEDIATEK;
 704		writew(val, port->base + PCIE_CONF_VEND_ID);
 705
 706		val = PCI_CLASS_BRIDGE_PCI;
 707		writew(val, port->base + PCIE_CONF_CLASS_ID);
 708	}
 709
 710	if (soc->need_fix_device_id)
 711		writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
 712
 713	/* 100ms timeout value should be enough for Gen1/2 training */
 714	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
 715				 !!(val & PCIE_PORT_LINKUP_V2), 20,
 716				 100 * USEC_PER_MSEC);
 717	if (err)
 718		return -ETIMEDOUT;
 719
 720	/* Set INTx mask */
 721	val = readl(port->base + PCIE_INT_MASK);
 722	val &= ~INTX_MASK;
 723	writel(val, port->base + PCIE_INT_MASK);
 724
 725	if (IS_ENABLED(CONFIG_PCI_MSI))
 726		mtk_pcie_enable_msi(port);
 727
 728	/* Set AHB to PCIe translation windows */
 729	val = lower_32_bits(mem->start) |
 730	      AHB2PCIE_SIZE(fls(resource_size(mem)));
 731	writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
 732
 733	val = upper_32_bits(mem->start);
 734	writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
 735
 736	/* Set PCIe to AXI translation memory space.*/
 737	val = PCIE2AHB_SIZE | WIN_ENABLE;
 738	writel(val, port->base + PCIE_AXI_WINDOW0);
 739
 740	return 0;
 741}
 742
 743static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
 744				      unsigned int devfn, int where)
 745{
 746	struct mtk_pcie *pcie = bus->sysdata;
 747
 748	writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
 749			      bus->number), pcie->base + PCIE_CFG_ADDR);
 750
 751	return pcie->base + PCIE_CFG_DATA + (where & 3);
 752}
 753
 754static struct pci_ops mtk_pcie_ops = {
 755	.map_bus = mtk_pcie_map_bus,
 756	.read  = pci_generic_config_read,
 757	.write = pci_generic_config_write,
 758};
 759
 760static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
 761{
 762	struct mtk_pcie *pcie = port->pcie;
 763	u32 func = PCI_FUNC(port->slot << 3);
 764	u32 slot = PCI_SLOT(port->slot << 3);
 765	u32 val;
 766	int err;
 767
 768	/* assert port PERST_N */
 769	val = readl(pcie->base + PCIE_SYS_CFG);
 770	val |= PCIE_PORT_PERST(port->slot);
 771	writel(val, pcie->base + PCIE_SYS_CFG);
 772
 773	/* de-assert port PERST_N */
 774	val = readl(pcie->base + PCIE_SYS_CFG);
 775	val &= ~PCIE_PORT_PERST(port->slot);
 776	writel(val, pcie->base + PCIE_SYS_CFG);
 777
 778	/* 100ms timeout value should be enough for Gen1/2 training */
 779	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
 780				 !!(val & PCIE_PORT_LINKUP), 20,
 781				 100 * USEC_PER_MSEC);
 782	if (err)
 783		return -ETIMEDOUT;
 784
 785	/* enable interrupt */
 786	val = readl(pcie->base + PCIE_INT_ENABLE);
 787	val |= PCIE_PORT_INT_EN(port->slot);
 788	writel(val, pcie->base + PCIE_INT_ENABLE);
 789
 790	/* map to all DDR region. We need to set it before cfg operation. */
 791	writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
 792	       port->base + PCIE_BAR0_SETUP);
 793
 794	/* configure class code and revision ID */
 795	writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
 796
 797	/* configure FC credit */
 798	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
 799	       pcie->base + PCIE_CFG_ADDR);
 800	val = readl(pcie->base + PCIE_CFG_DATA);
 801	val &= ~PCIE_FC_CREDIT_MASK;
 802	val |= PCIE_FC_CREDIT_VAL(0x806c);
 803	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
 804	       pcie->base + PCIE_CFG_ADDR);
 805	writel(val, pcie->base + PCIE_CFG_DATA);
 806
 807	/* configure RC FTS number to 250 when it leaves L0s */
 808	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
 809	       pcie->base + PCIE_CFG_ADDR);
 810	val = readl(pcie->base + PCIE_CFG_DATA);
 811	val &= ~PCIE_FTS_NUM_MASK;
 812	val |= PCIE_FTS_NUM_L0(0x50);
 813	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
 814	       pcie->base + PCIE_CFG_ADDR);
 815	writel(val, pcie->base + PCIE_CFG_DATA);
 816
 817	return 0;
 818}
 819
 820static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
 821{
 822	struct mtk_pcie *pcie = port->pcie;
 823	struct device *dev = pcie->dev;
 824	int err;
 825
 826	err = clk_prepare_enable(port->sys_ck);
 827	if (err) {
 828		dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
 829		goto err_sys_clk;
 830	}
 831
 832	err = clk_prepare_enable(port->ahb_ck);
 833	if (err) {
 834		dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
 835		goto err_ahb_clk;
 836	}
 837
 838	err = clk_prepare_enable(port->aux_ck);
 839	if (err) {
 840		dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
 841		goto err_aux_clk;
 842	}
 843
 844	err = clk_prepare_enable(port->axi_ck);
 845	if (err) {
 846		dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
 847		goto err_axi_clk;
 848	}
 849
 850	err = clk_prepare_enable(port->obff_ck);
 851	if (err) {
 852		dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
 853		goto err_obff_clk;
 854	}
 855
 856	err = clk_prepare_enable(port->pipe_ck);
 857	if (err) {
 858		dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
 859		goto err_pipe_clk;
 860	}
 861
 862	reset_control_assert(port->reset);
 863	reset_control_deassert(port->reset);
 864
 865	err = phy_init(port->phy);
 866	if (err) {
 867		dev_err(dev, "failed to initialize port%d phy\n", port->slot);
 868		goto err_phy_init;
 869	}
 870
 871	err = phy_power_on(port->phy);
 872	if (err) {
 873		dev_err(dev, "failed to power on port%d phy\n", port->slot);
 874		goto err_phy_on;
 875	}
 876
 877	if (!pcie->soc->startup(port))
 878		return;
 879
 880	dev_info(dev, "Port%d link down\n", port->slot);
 881
 882	phy_power_off(port->phy);
 883err_phy_on:
 884	phy_exit(port->phy);
 885err_phy_init:
 886	clk_disable_unprepare(port->pipe_ck);
 887err_pipe_clk:
 888	clk_disable_unprepare(port->obff_ck);
 889err_obff_clk:
 890	clk_disable_unprepare(port->axi_ck);
 891err_axi_clk:
 892	clk_disable_unprepare(port->aux_ck);
 893err_aux_clk:
 894	clk_disable_unprepare(port->ahb_ck);
 895err_ahb_clk:
 896	clk_disable_unprepare(port->sys_ck);
 897err_sys_clk:
 898	mtk_pcie_port_free(port);
 899}
 900
 901static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
 902			       struct device_node *node,
 903			       int slot)
 904{
 905	struct mtk_pcie_port *port;
 906	struct device *dev = pcie->dev;
 907	struct platform_device *pdev = to_platform_device(dev);
 908	char name[10];
 909	int err;
 910
 911	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
 912	if (!port)
 913		return -ENOMEM;
 914
 915	snprintf(name, sizeof(name), "port%d", slot);
 916	port->base = devm_platform_ioremap_resource_byname(pdev, name);
 917	if (IS_ERR(port->base)) {
 918		dev_err(dev, "failed to map port%d base\n", slot);
 919		return PTR_ERR(port->base);
 920	}
 921
 922	snprintf(name, sizeof(name), "sys_ck%d", slot);
 923	port->sys_ck = devm_clk_get(dev, name);
 924	if (IS_ERR(port->sys_ck)) {
 925		dev_err(dev, "failed to get sys_ck%d clock\n", slot);
 926		return PTR_ERR(port->sys_ck);
 927	}
 928
 929	/* sys_ck might be divided into the following parts in some chips */
 930	snprintf(name, sizeof(name), "ahb_ck%d", slot);
 931	port->ahb_ck = devm_clk_get_optional(dev, name);
 932	if (IS_ERR(port->ahb_ck))
 933		return PTR_ERR(port->ahb_ck);
 934
 935	snprintf(name, sizeof(name), "axi_ck%d", slot);
 936	port->axi_ck = devm_clk_get_optional(dev, name);
 937	if (IS_ERR(port->axi_ck))
 938		return PTR_ERR(port->axi_ck);
 939
 940	snprintf(name, sizeof(name), "aux_ck%d", slot);
 941	port->aux_ck = devm_clk_get_optional(dev, name);
 942	if (IS_ERR(port->aux_ck))
 943		return PTR_ERR(port->aux_ck);
 944
 945	snprintf(name, sizeof(name), "obff_ck%d", slot);
 946	port->obff_ck = devm_clk_get_optional(dev, name);
 947	if (IS_ERR(port->obff_ck))
 948		return PTR_ERR(port->obff_ck);
 949
 950	snprintf(name, sizeof(name), "pipe_ck%d", slot);
 951	port->pipe_ck = devm_clk_get_optional(dev, name);
 952	if (IS_ERR(port->pipe_ck))
 953		return PTR_ERR(port->pipe_ck);
 954
 955	snprintf(name, sizeof(name), "pcie-rst%d", slot);
 956	port->reset = devm_reset_control_get_optional_exclusive(dev, name);
 957	if (PTR_ERR(port->reset) == -EPROBE_DEFER)
 958		return PTR_ERR(port->reset);
 959
 960	/* some platforms may use default PHY setting */
 961	snprintf(name, sizeof(name), "pcie-phy%d", slot);
 962	port->phy = devm_phy_optional_get(dev, name);
 963	if (IS_ERR(port->phy))
 964		return PTR_ERR(port->phy);
 965
 966	port->slot = slot;
 967	port->pcie = pcie;
 968
 969	if (pcie->soc->setup_irq) {
 970		err = pcie->soc->setup_irq(port, node);
 971		if (err)
 972			return err;
 973	}
 974
 975	INIT_LIST_HEAD(&port->list);
 976	list_add_tail(&port->list, &pcie->ports);
 977
 978	return 0;
 979}
 980
 981static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
 982{
 983	struct device *dev = pcie->dev;
 984	struct platform_device *pdev = to_platform_device(dev);
 985	struct resource *regs;
 986	int err;
 987
 988	/* get shared registers, which are optional */
 989	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
 990	if (regs) {
 991		pcie->base = devm_ioremap_resource(dev, regs);
 992		if (IS_ERR(pcie->base)) {
 993			dev_err(dev, "failed to map shared register\n");
 994			return PTR_ERR(pcie->base);
 995		}
 996	}
 997
 998	pcie->free_ck = devm_clk_get(dev, "free_ck");
 999	if (IS_ERR(pcie->free_ck)) {
1000		if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
1001			return -EPROBE_DEFER;
1002
1003		pcie->free_ck = NULL;
1004	}
1005
1006	pm_runtime_enable(dev);
1007	pm_runtime_get_sync(dev);
1008
1009	/* enable top level clock */
1010	err = clk_prepare_enable(pcie->free_ck);
1011	if (err) {
1012		dev_err(dev, "failed to enable free_ck\n");
1013		goto err_free_ck;
1014	}
1015
1016	return 0;
1017
1018err_free_ck:
1019	pm_runtime_put_sync(dev);
1020	pm_runtime_disable(dev);
1021
1022	return err;
1023}
1024
1025static int mtk_pcie_setup(struct mtk_pcie *pcie)
1026{
1027	struct device *dev = pcie->dev;
1028	struct device_node *node = dev->of_node, *child;
1029	struct mtk_pcie_port *port, *tmp;
1030	int err;
1031
1032	for_each_available_child_of_node(node, child) {
1033		int slot;
1034
1035		err = of_pci_get_devfn(child);
1036		if (err < 0) {
1037			dev_err(dev, "failed to parse devfn: %d\n", err);
1038			return err;
1039		}
1040
1041		slot = PCI_SLOT(err);
1042
1043		err = mtk_pcie_parse_port(pcie, child, slot);
1044		if (err)
1045			return err;
1046	}
1047
1048	err = mtk_pcie_subsys_powerup(pcie);
1049	if (err)
1050		return err;
1051
1052	/* enable each port, and then check link status */
1053	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1054		mtk_pcie_enable_port(port);
1055
1056	/* power down PCIe subsys if slots are all empty (link down) */
1057	if (list_empty(&pcie->ports))
1058		mtk_pcie_subsys_powerdown(pcie);
1059
1060	return 0;
 
 
 
1061}
1062
1063static int mtk_pcie_probe(struct platform_device *pdev)
1064{
1065	struct device *dev = &pdev->dev;
1066	struct mtk_pcie *pcie;
1067	struct pci_host_bridge *host;
1068	int err;
1069
1070	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1071	if (!host)
1072		return -ENOMEM;
1073
1074	pcie = pci_host_bridge_priv(host);
1075
1076	pcie->dev = dev;
1077	pcie->soc = of_device_get_match_data(dev);
1078	platform_set_drvdata(pdev, pcie);
1079	INIT_LIST_HEAD(&pcie->ports);
1080
1081	err = mtk_pcie_setup(pcie);
1082	if (err)
1083		return err;
1084
1085	host->ops = pcie->soc->ops;
1086	host->sysdata = pcie;
 
1087
1088	err = pci_host_probe(host);
1089	if (err)
1090		goto put_resources;
1091
1092	return 0;
1093
1094put_resources:
1095	if (!list_empty(&pcie->ports))
1096		mtk_pcie_put_resources(pcie);
1097
1098	return err;
1099}
1100
1101
1102static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
1103{
1104	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1105	struct list_head *windows = &host->windows;
1106
1107	pci_free_resource_list(windows);
1108}
1109
1110static int mtk_pcie_remove(struct platform_device *pdev)
1111{
1112	struct mtk_pcie *pcie = platform_get_drvdata(pdev);
1113	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1114
1115	pci_stop_root_bus(host->bus);
1116	pci_remove_root_bus(host->bus);
1117	mtk_pcie_free_resources(pcie);
1118
1119	mtk_pcie_irq_teardown(pcie);
1120
1121	mtk_pcie_put_resources(pcie);
1122
1123	return 0;
1124}
1125
1126static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1127{
1128	struct mtk_pcie *pcie = dev_get_drvdata(dev);
1129	struct mtk_pcie_port *port;
1130
1131	if (list_empty(&pcie->ports))
1132		return 0;
1133
1134	list_for_each_entry(port, &pcie->ports, list) {
1135		clk_disable_unprepare(port->pipe_ck);
1136		clk_disable_unprepare(port->obff_ck);
1137		clk_disable_unprepare(port->axi_ck);
1138		clk_disable_unprepare(port->aux_ck);
1139		clk_disable_unprepare(port->ahb_ck);
1140		clk_disable_unprepare(port->sys_ck);
1141		phy_power_off(port->phy);
1142		phy_exit(port->phy);
1143	}
1144
1145	clk_disable_unprepare(pcie->free_ck);
1146
1147	return 0;
1148}
1149
1150static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1151{
1152	struct mtk_pcie *pcie = dev_get_drvdata(dev);
1153	struct mtk_pcie_port *port, *tmp;
1154
1155	if (list_empty(&pcie->ports))
1156		return 0;
1157
1158	clk_prepare_enable(pcie->free_ck);
1159
1160	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1161		mtk_pcie_enable_port(port);
1162
1163	/* In case of EP was removed while system suspend. */
1164	if (list_empty(&pcie->ports))
1165		clk_disable_unprepare(pcie->free_ck);
1166
1167	return 0;
1168}
1169
1170static const struct dev_pm_ops mtk_pcie_pm_ops = {
1171	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1172				      mtk_pcie_resume_noirq)
1173};
1174
1175static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
 
1176	.ops = &mtk_pcie_ops,
1177	.startup = mtk_pcie_startup_port,
1178};
1179
1180static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
1181	.ops = &mtk_pcie_ops_v2,
1182	.startup = mtk_pcie_startup_port_v2,
1183	.setup_irq = mtk_pcie_setup_irq,
1184};
1185
1186static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
1187	.need_fix_class_id = true,
1188	.ops = &mtk_pcie_ops_v2,
1189	.startup = mtk_pcie_startup_port_v2,
1190	.setup_irq = mtk_pcie_setup_irq,
1191};
1192
1193static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
1194	.need_fix_class_id = true,
1195	.need_fix_device_id = true,
1196	.device_id = PCI_DEVICE_ID_MEDIATEK_7629,
1197	.ops = &mtk_pcie_ops_v2,
1198	.startup = mtk_pcie_startup_port_v2,
1199	.setup_irq = mtk_pcie_setup_irq,
1200};
1201
1202static const struct of_device_id mtk_pcie_ids[] = {
1203	{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1204	{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1205	{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1206	{ .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1207	{ .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
1208	{},
1209};
 
1210
1211static struct platform_driver mtk_pcie_driver = {
1212	.probe = mtk_pcie_probe,
1213	.remove = mtk_pcie_remove,
1214	.driver = {
1215		.name = "mtk-pcie",
1216		.of_match_table = mtk_pcie_ids,
1217		.suppress_bind_attrs = true,
1218		.pm = &mtk_pcie_pm_ops,
1219	},
1220};
1221module_platform_driver(mtk_pcie_driver);
1222MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MediaTek PCIe host controller driver.
   4 *
   5 * Copyright (c) 2017 MediaTek Inc.
   6 * Author: Ryder Lee <ryder.lee@mediatek.com>
   7 *	   Honghui Zhang <honghui.zhang@mediatek.com>
   8 */
   9
  10#include <linux/clk.h>
  11#include <linux/delay.h>
  12#include <linux/iopoll.h>
  13#include <linux/irq.h>
  14#include <linux/irqchip/chained_irq.h>
  15#include <linux/irqdomain.h>
  16#include <linux/kernel.h>
  17#include <linux/msi.h>
  18#include <linux/module.h>
  19#include <linux/of_address.h>
  20#include <linux/of_pci.h>
  21#include <linux/of_platform.h>
  22#include <linux/pci.h>
  23#include <linux/phy/phy.h>
  24#include <linux/platform_device.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/reset.h>
  27
  28#include "../pci.h"
  29
  30/* PCIe shared registers */
  31#define PCIE_SYS_CFG		0x00
  32#define PCIE_INT_ENABLE		0x0c
  33#define PCIE_CFG_ADDR		0x20
  34#define PCIE_CFG_DATA		0x24
  35
  36/* PCIe per port registers */
  37#define PCIE_BAR0_SETUP		0x10
  38#define PCIE_CLASS		0x34
  39#define PCIE_LINK_STATUS	0x50
  40
  41#define PCIE_PORT_INT_EN(x)	BIT(20 + (x))
  42#define PCIE_PORT_PERST(x)	BIT(1 + (x))
  43#define PCIE_PORT_LINKUP	BIT(0)
  44#define PCIE_BAR_MAP_MAX	GENMASK(31, 16)
  45
  46#define PCIE_BAR_ENABLE		BIT(0)
  47#define PCIE_REVISION_ID	BIT(0)
  48#define PCIE_CLASS_CODE		(0x60400 << 8)
  49#define PCIE_CONF_REG(regn)	(((regn) & GENMASK(7, 2)) | \
  50				((((regn) >> 8) & GENMASK(3, 0)) << 24))
  51#define PCIE_CONF_FUN(fun)	(((fun) << 8) & GENMASK(10, 8))
  52#define PCIE_CONF_DEV(dev)	(((dev) << 11) & GENMASK(15, 11))
  53#define PCIE_CONF_BUS(bus)	(((bus) << 16) & GENMASK(23, 16))
  54#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  55	(PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  56	 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  57
  58/* MediaTek specific configuration registers */
  59#define PCIE_FTS_NUM		0x70c
  60#define PCIE_FTS_NUM_MASK	GENMASK(15, 8)
  61#define PCIE_FTS_NUM_L0(x)	((x) & 0xff << 8)
  62
  63#define PCIE_FC_CREDIT		0x73c
  64#define PCIE_FC_CREDIT_MASK	(GENMASK(31, 31) | GENMASK(28, 16))
  65#define PCIE_FC_CREDIT_VAL(x)	((x) << 16)
  66
  67/* PCIe V2 share registers */
  68#define PCIE_SYS_CFG_V2		0x0
  69#define PCIE_CSR_LTSSM_EN(x)	BIT(0 + (x) * 8)
  70#define PCIE_CSR_ASPM_L1_EN(x)	BIT(1 + (x) * 8)
  71
  72/* PCIe V2 per-port registers */
  73#define PCIE_MSI_VECTOR		0x0c0
  74
  75#define PCIE_CONF_VEND_ID	0x100
  76#define PCIE_CONF_DEVICE_ID	0x102
  77#define PCIE_CONF_CLASS_ID	0x106
  78
  79#define PCIE_INT_MASK		0x420
  80#define INTX_MASK		GENMASK(19, 16)
  81#define INTX_SHIFT		16
  82#define PCIE_INT_STATUS		0x424
  83#define MSI_STATUS		BIT(23)
  84#define PCIE_IMSI_STATUS	0x42c
  85#define PCIE_IMSI_ADDR		0x430
  86#define MSI_MASK		BIT(23)
  87#define MTK_MSI_IRQS_NUM	32
  88
  89#define PCIE_AHB_TRANS_BASE0_L	0x438
  90#define PCIE_AHB_TRANS_BASE0_H	0x43c
  91#define AHB2PCIE_SIZE(x)	((x) & GENMASK(4, 0))
  92#define PCIE_AXI_WINDOW0	0x448
  93#define WIN_ENABLE		BIT(7)
  94/*
  95 * Define PCIe to AHB window size as 2^33 to support max 8GB address space
  96 * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
  97 * start from 0x40000000).
  98 */
  99#define PCIE2AHB_SIZE	0x21
 100
 101/* PCIe V2 configuration transaction header */
 102#define PCIE_CFG_HEADER0	0x460
 103#define PCIE_CFG_HEADER1	0x464
 104#define PCIE_CFG_HEADER2	0x468
 105#define PCIE_CFG_WDATA		0x470
 106#define PCIE_APP_TLP_REQ	0x488
 107#define PCIE_CFG_RDATA		0x48c
 108#define APP_CFG_REQ		BIT(0)
 109#define APP_CPL_STATUS		GENMASK(7, 5)
 110
 111#define CFG_WRRD_TYPE_0		4
 112#define CFG_WR_FMT		2
 113#define CFG_RD_FMT		0
 114
 115#define CFG_DW0_LENGTH(length)	((length) & GENMASK(9, 0))
 116#define CFG_DW0_TYPE(type)	(((type) << 24) & GENMASK(28, 24))
 117#define CFG_DW0_FMT(fmt)	(((fmt) << 29) & GENMASK(31, 29))
 118#define CFG_DW2_REGN(regn)	((regn) & GENMASK(11, 2))
 119#define CFG_DW2_FUN(fun)	(((fun) << 16) & GENMASK(18, 16))
 120#define CFG_DW2_DEV(dev)	(((dev) << 19) & GENMASK(23, 19))
 121#define CFG_DW2_BUS(bus)	(((bus) << 24) & GENMASK(31, 24))
 122#define CFG_HEADER_DW0(type, fmt) \
 123	(CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
 124#define CFG_HEADER_DW1(where, size) \
 125	(GENMASK(((size) - 1), 0) << ((where) & 0x3))
 126#define CFG_HEADER_DW2(regn, fun, dev, bus) \
 127	(CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
 128	CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
 129
 130#define PCIE_RST_CTRL		0x510
 131#define PCIE_PHY_RSTB		BIT(0)
 132#define PCIE_PIPE_SRSTB		BIT(1)
 133#define PCIE_MAC_SRSTB		BIT(2)
 134#define PCIE_CRSTB		BIT(3)
 135#define PCIE_PERSTB		BIT(8)
 136#define PCIE_LINKDOWN_RST_EN	GENMASK(15, 13)
 137#define PCIE_LINK_STATUS_V2	0x804
 138#define PCIE_PORT_LINKUP_V2	BIT(10)
 139
 140struct mtk_pcie_port;
 141
 142/**
 143 * struct mtk_pcie_soc - differentiate between host generations
 144 * @need_fix_class_id: whether this host's class ID needed to be fixed or not
 145 * @need_fix_device_id: whether this host's device ID needed to be fixed or not
 146 * @no_msi: Bridge has no MSI support, and relies on an external block
 147 * @device_id: device ID which this host need to be fixed
 148 * @ops: pointer to configuration access functions
 149 * @startup: pointer to controller setting functions
 150 * @setup_irq: pointer to initialize IRQ functions
 151 */
 152struct mtk_pcie_soc {
 153	bool need_fix_class_id;
 154	bool need_fix_device_id;
 155	bool no_msi;
 156	unsigned int device_id;
 157	struct pci_ops *ops;
 158	int (*startup)(struct mtk_pcie_port *port);
 159	int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
 160};
 161
 162/**
 163 * struct mtk_pcie_port - PCIe port information
 164 * @base: IO mapped register base
 165 * @list: port list
 166 * @pcie: pointer to PCIe host info
 167 * @reset: pointer to port reset control
 168 * @sys_ck: pointer to transaction/data link layer clock
 169 * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
 170 *          and RC initiated MMIO access
 171 * @axi_ck: pointer to application layer MMIO channel operating clock
 172 * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
 173 *          when pcie_mac_ck/pcie_pipe_ck is turned off
 174 * @obff_ck: pointer to OBFF functional block operating clock
 175 * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
 176 * @phy: pointer to PHY control block
 177 * @slot: port slot
 178 * @irq: GIC irq
 179 * @irq_domain: legacy INTx IRQ domain
 180 * @inner_domain: inner IRQ domain
 181 * @msi_domain: MSI IRQ domain
 182 * @lock: protect the msi_irq_in_use bitmap
 183 * @msi_irq_in_use: bit map for assigned MSI IRQ
 184 */
 185struct mtk_pcie_port {
 186	void __iomem *base;
 187	struct list_head list;
 188	struct mtk_pcie *pcie;
 189	struct reset_control *reset;
 190	struct clk *sys_ck;
 191	struct clk *ahb_ck;
 192	struct clk *axi_ck;
 193	struct clk *aux_ck;
 194	struct clk *obff_ck;
 195	struct clk *pipe_ck;
 196	struct phy *phy;
 197	u32 slot;
 198	int irq;
 199	struct irq_domain *irq_domain;
 200	struct irq_domain *inner_domain;
 201	struct irq_domain *msi_domain;
 202	struct mutex lock;
 203	DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
 204};
 205
 206/**
 207 * struct mtk_pcie - PCIe host information
 208 * @dev: pointer to PCIe device
 209 * @base: IO mapped register base
 210 * @free_ck: free-run reference clock
 211 * @mem: non-prefetchable memory resource
 212 * @ports: pointer to PCIe port information
 213 * @soc: pointer to SoC-dependent operations
 214 */
 215struct mtk_pcie {
 216	struct device *dev;
 217	void __iomem *base;
 218	struct clk *free_ck;
 219
 220	struct list_head ports;
 221	const struct mtk_pcie_soc *soc;
 222};
 223
 224static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
 225{
 226	struct device *dev = pcie->dev;
 227
 228	clk_disable_unprepare(pcie->free_ck);
 229
 230	pm_runtime_put_sync(dev);
 231	pm_runtime_disable(dev);
 232}
 233
 234static void mtk_pcie_port_free(struct mtk_pcie_port *port)
 235{
 236	struct mtk_pcie *pcie = port->pcie;
 237	struct device *dev = pcie->dev;
 238
 239	devm_iounmap(dev, port->base);
 240	list_del(&port->list);
 241	devm_kfree(dev, port);
 242}
 243
 244static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
 245{
 246	struct mtk_pcie_port *port, *tmp;
 247
 248	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
 249		phy_power_off(port->phy);
 250		phy_exit(port->phy);
 251		clk_disable_unprepare(port->pipe_ck);
 252		clk_disable_unprepare(port->obff_ck);
 253		clk_disable_unprepare(port->axi_ck);
 254		clk_disable_unprepare(port->aux_ck);
 255		clk_disable_unprepare(port->ahb_ck);
 256		clk_disable_unprepare(port->sys_ck);
 257		mtk_pcie_port_free(port);
 258	}
 259
 260	mtk_pcie_subsys_powerdown(pcie);
 261}
 262
 263static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
 264{
 265	u32 val;
 266	int err;
 267
 268	err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
 269					!(val & APP_CFG_REQ), 10,
 270					100 * USEC_PER_MSEC);
 271	if (err)
 272		return PCIBIOS_SET_FAILED;
 273
 274	if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
 275		return PCIBIOS_SET_FAILED;
 276
 277	return PCIBIOS_SUCCESSFUL;
 278}
 279
 280static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
 281			      int where, int size, u32 *val)
 282{
 283	u32 tmp;
 284
 285	/* Write PCIe configuration transaction header for Cfgrd */
 286	writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
 287	       port->base + PCIE_CFG_HEADER0);
 288	writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
 289	writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
 290	       port->base + PCIE_CFG_HEADER2);
 291
 292	/* Trigger h/w to transmit Cfgrd TLP */
 293	tmp = readl(port->base + PCIE_APP_TLP_REQ);
 294	tmp |= APP_CFG_REQ;
 295	writel(tmp, port->base + PCIE_APP_TLP_REQ);
 296
 297	/* Check completion status */
 298	if (mtk_pcie_check_cfg_cpld(port))
 299		return PCIBIOS_SET_FAILED;
 300
 301	/* Read cpld payload of Cfgrd */
 302	*val = readl(port->base + PCIE_CFG_RDATA);
 303
 304	if (size == 1)
 305		*val = (*val >> (8 * (where & 3))) & 0xff;
 306	else if (size == 2)
 307		*val = (*val >> (8 * (where & 3))) & 0xffff;
 308
 309	return PCIBIOS_SUCCESSFUL;
 310}
 311
 312static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
 313			      int where, int size, u32 val)
 314{
 315	/* Write PCIe configuration transaction header for Cfgwr */
 316	writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
 317	       port->base + PCIE_CFG_HEADER0);
 318	writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
 319	writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
 320	       port->base + PCIE_CFG_HEADER2);
 321
 322	/* Write Cfgwr data */
 323	val = val << 8 * (where & 3);
 324	writel(val, port->base + PCIE_CFG_WDATA);
 325
 326	/* Trigger h/w to transmit Cfgwr TLP */
 327	val = readl(port->base + PCIE_APP_TLP_REQ);
 328	val |= APP_CFG_REQ;
 329	writel(val, port->base + PCIE_APP_TLP_REQ);
 330
 331	/* Check completion status */
 332	return mtk_pcie_check_cfg_cpld(port);
 333}
 334
 335static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
 336						unsigned int devfn)
 337{
 338	struct mtk_pcie *pcie = bus->sysdata;
 339	struct mtk_pcie_port *port;
 340	struct pci_dev *dev = NULL;
 341
 342	/*
 343	 * Walk the bus hierarchy to get the devfn value
 344	 * of the port in the root bus.
 345	 */
 346	while (bus && bus->number) {
 347		dev = bus->self;
 348		bus = dev->bus;
 349		devfn = dev->devfn;
 350	}
 351
 352	list_for_each_entry(port, &pcie->ports, list)
 353		if (port->slot == PCI_SLOT(devfn))
 354			return port;
 355
 356	return NULL;
 357}
 358
 359static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 360				int where, int size, u32 *val)
 361{
 362	struct mtk_pcie_port *port;
 363	u32 bn = bus->number;
 364	int ret;
 365
 366	port = mtk_pcie_find_port(bus, devfn);
 367	if (!port) {
 368		*val = ~0;
 369		return PCIBIOS_DEVICE_NOT_FOUND;
 370	}
 371
 372	ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
 373	if (ret)
 374		*val = ~0;
 375
 376	return ret;
 377}
 378
 379static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 380				 int where, int size, u32 val)
 381{
 382	struct mtk_pcie_port *port;
 383	u32 bn = bus->number;
 384
 385	port = mtk_pcie_find_port(bus, devfn);
 386	if (!port)
 387		return PCIBIOS_DEVICE_NOT_FOUND;
 388
 389	return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
 390}
 391
 392static struct pci_ops mtk_pcie_ops_v2 = {
 393	.read  = mtk_pcie_config_read,
 394	.write = mtk_pcie_config_write,
 395};
 396
 397static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 398{
 399	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 400	phys_addr_t addr;
 401
 402	/* MT2712/MT7622 only support 32-bit MSI addresses */
 403	addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
 404	msg->address_hi = 0;
 405	msg->address_lo = lower_32_bits(addr);
 406
 407	msg->data = data->hwirq;
 408
 409	dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
 410		(int)data->hwirq, msg->address_hi, msg->address_lo);
 411}
 412
 413static int mtk_msi_set_affinity(struct irq_data *irq_data,
 414				const struct cpumask *mask, bool force)
 415{
 416	 return -EINVAL;
 417}
 418
 419static void mtk_msi_ack_irq(struct irq_data *data)
 420{
 421	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 422	u32 hwirq = data->hwirq;
 423
 424	writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
 425}
 426
 427static struct irq_chip mtk_msi_bottom_irq_chip = {
 428	.name			= "MTK MSI",
 429	.irq_compose_msi_msg	= mtk_compose_msi_msg,
 430	.irq_set_affinity	= mtk_msi_set_affinity,
 431	.irq_ack		= mtk_msi_ack_irq,
 432};
 433
 434static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 435				     unsigned int nr_irqs, void *args)
 436{
 437	struct mtk_pcie_port *port = domain->host_data;
 438	unsigned long bit;
 439
 440	WARN_ON(nr_irqs != 1);
 441	mutex_lock(&port->lock);
 442
 443	bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
 444	if (bit >= MTK_MSI_IRQS_NUM) {
 445		mutex_unlock(&port->lock);
 446		return -ENOSPC;
 447	}
 448
 449	__set_bit(bit, port->msi_irq_in_use);
 450
 451	mutex_unlock(&port->lock);
 452
 453	irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
 454			    domain->host_data, handle_edge_irq,
 455			    NULL, NULL);
 456
 457	return 0;
 458}
 459
 460static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
 461				     unsigned int virq, unsigned int nr_irqs)
 462{
 463	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 464	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
 465
 466	mutex_lock(&port->lock);
 467
 468	if (!test_bit(d->hwirq, port->msi_irq_in_use))
 469		dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
 470			d->hwirq);
 471	else
 472		__clear_bit(d->hwirq, port->msi_irq_in_use);
 473
 474	mutex_unlock(&port->lock);
 475
 476	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 477}
 478
 479static const struct irq_domain_ops msi_domain_ops = {
 480	.alloc	= mtk_pcie_irq_domain_alloc,
 481	.free	= mtk_pcie_irq_domain_free,
 482};
 483
 484static struct irq_chip mtk_msi_irq_chip = {
 485	.name		= "MTK PCIe MSI",
 486	.irq_ack	= irq_chip_ack_parent,
 487	.irq_mask	= pci_msi_mask_irq,
 488	.irq_unmask	= pci_msi_unmask_irq,
 489};
 490
 491static struct msi_domain_info mtk_msi_domain_info = {
 492	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 493		   MSI_FLAG_PCI_MSIX),
 494	.chip	= &mtk_msi_irq_chip,
 495};
 496
 497static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
 498{
 499	struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
 500
 501	mutex_init(&port->lock);
 502
 503	port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
 504						      &msi_domain_ops, port);
 505	if (!port->inner_domain) {
 506		dev_err(port->pcie->dev, "failed to create IRQ domain\n");
 507		return -ENOMEM;
 508	}
 509
 510	port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
 511						     port->inner_domain);
 512	if (!port->msi_domain) {
 513		dev_err(port->pcie->dev, "failed to create MSI domain\n");
 514		irq_domain_remove(port->inner_domain);
 515		return -ENOMEM;
 516	}
 517
 518	return 0;
 519}
 520
 521static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
 522{
 523	u32 val;
 524	phys_addr_t msg_addr;
 525
 526	msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
 527	val = lower_32_bits(msg_addr);
 528	writel(val, port->base + PCIE_IMSI_ADDR);
 529
 530	val = readl(port->base + PCIE_INT_MASK);
 531	val &= ~MSI_MASK;
 532	writel(val, port->base + PCIE_INT_MASK);
 533}
 534
 535static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
 536{
 537	struct mtk_pcie_port *port, *tmp;
 538
 539	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
 540		irq_set_chained_handler_and_data(port->irq, NULL, NULL);
 541
 542		if (port->irq_domain)
 543			irq_domain_remove(port->irq_domain);
 544
 545		if (IS_ENABLED(CONFIG_PCI_MSI)) {
 546			if (port->msi_domain)
 547				irq_domain_remove(port->msi_domain);
 548			if (port->inner_domain)
 549				irq_domain_remove(port->inner_domain);
 550		}
 551
 552		irq_dispose_mapping(port->irq);
 553	}
 554}
 555
 556static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 557			     irq_hw_number_t hwirq)
 558{
 559	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 560	irq_set_chip_data(irq, domain->host_data);
 561
 562	return 0;
 563}
 564
 565static const struct irq_domain_ops intx_domain_ops = {
 566	.map = mtk_pcie_intx_map,
 567};
 568
 569static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
 570				    struct device_node *node)
 571{
 572	struct device *dev = port->pcie->dev;
 573	struct device_node *pcie_intc_node;
 574	int ret;
 575
 576	/* Setup INTx */
 577	pcie_intc_node = of_get_next_child(node, NULL);
 578	if (!pcie_intc_node) {
 579		dev_err(dev, "no PCIe Intc node found\n");
 580		return -ENODEV;
 581	}
 582
 583	port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
 584						 &intx_domain_ops, port);
 585	of_node_put(pcie_intc_node);
 586	if (!port->irq_domain) {
 587		dev_err(dev, "failed to get INTx IRQ domain\n");
 588		return -ENODEV;
 589	}
 590
 591	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 592		ret = mtk_pcie_allocate_msi_domains(port);
 593		if (ret)
 594			return ret;
 595	}
 596
 597	return 0;
 598}
 599
 600static void mtk_pcie_intr_handler(struct irq_desc *desc)
 601{
 602	struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
 603	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 604	unsigned long status;
 605	u32 virq;
 606	u32 bit = INTX_SHIFT;
 607
 608	chained_irq_enter(irqchip, desc);
 609
 610	status = readl(port->base + PCIE_INT_STATUS);
 611	if (status & INTX_MASK) {
 612		for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
 613			/* Clear the INTx */
 614			writel(1 << bit, port->base + PCIE_INT_STATUS);
 615			virq = irq_find_mapping(port->irq_domain,
 616						bit - INTX_SHIFT);
 617			generic_handle_irq(virq);
 618		}
 619	}
 620
 621	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 622		if (status & MSI_STATUS){
 623			unsigned long imsi_status;
 624
 625			while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
 626				for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
 627					virq = irq_find_mapping(port->inner_domain, bit);
 628					generic_handle_irq(virq);
 629				}
 630			}
 631			/* Clear MSI interrupt status */
 632			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
 633		}
 634	}
 635
 636	chained_irq_exit(irqchip, desc);
 637}
 638
 639static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
 640			      struct device_node *node)
 641{
 642	struct mtk_pcie *pcie = port->pcie;
 643	struct device *dev = pcie->dev;
 644	struct platform_device *pdev = to_platform_device(dev);
 645	int err;
 646
 647	err = mtk_pcie_init_irq_domain(port, node);
 648	if (err) {
 649		dev_err(dev, "failed to init PCIe IRQ domain\n");
 650		return err;
 651	}
 652
 653	port->irq = platform_get_irq(pdev, port->slot);
 654	if (port->irq < 0)
 655		return port->irq;
 656
 657	irq_set_chained_handler_and_data(port->irq,
 658					 mtk_pcie_intr_handler, port);
 659
 660	return 0;
 661}
 662
 663static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
 664{
 665	struct mtk_pcie *pcie = port->pcie;
 666	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 667	struct resource *mem = NULL;
 668	struct resource_entry *entry;
 669	const struct mtk_pcie_soc *soc = port->pcie->soc;
 670	u32 val;
 671	int err;
 672
 673	entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
 674	if (entry)
 675		mem = entry->res;
 676	if (!mem)
 677		return -EINVAL;
 678
 679	/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
 680	if (pcie->base) {
 681		val = readl(pcie->base + PCIE_SYS_CFG_V2);
 682		val |= PCIE_CSR_LTSSM_EN(port->slot) |
 683		       PCIE_CSR_ASPM_L1_EN(port->slot);
 684		writel(val, pcie->base + PCIE_SYS_CFG_V2);
 685	}
 686
 687	/* Assert all reset signals */
 688	writel(0, port->base + PCIE_RST_CTRL);
 689
 690	/*
 691	 * Enable PCIe link down reset, if link status changed from link up to
 692	 * link down, this will reset MAC control registers and configuration
 693	 * space.
 694	 */
 695	writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
 696
 697	/* De-assert PHY, PE, PIPE, MAC and configuration reset	*/
 698	val = readl(port->base + PCIE_RST_CTRL);
 699	val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
 700	       PCIE_MAC_SRSTB | PCIE_CRSTB;
 701	writel(val, port->base + PCIE_RST_CTRL);
 702
 703	/* Set up vendor ID and class code */
 704	if (soc->need_fix_class_id) {
 705		val = PCI_VENDOR_ID_MEDIATEK;
 706		writew(val, port->base + PCIE_CONF_VEND_ID);
 707
 708		val = PCI_CLASS_BRIDGE_PCI;
 709		writew(val, port->base + PCIE_CONF_CLASS_ID);
 710	}
 711
 712	if (soc->need_fix_device_id)
 713		writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
 714
 715	/* 100ms timeout value should be enough for Gen1/2 training */
 716	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
 717				 !!(val & PCIE_PORT_LINKUP_V2), 20,
 718				 100 * USEC_PER_MSEC);
 719	if (err)
 720		return -ETIMEDOUT;
 721
 722	/* Set INTx mask */
 723	val = readl(port->base + PCIE_INT_MASK);
 724	val &= ~INTX_MASK;
 725	writel(val, port->base + PCIE_INT_MASK);
 726
 727	if (IS_ENABLED(CONFIG_PCI_MSI))
 728		mtk_pcie_enable_msi(port);
 729
 730	/* Set AHB to PCIe translation windows */
 731	val = lower_32_bits(mem->start) |
 732	      AHB2PCIE_SIZE(fls(resource_size(mem)));
 733	writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
 734
 735	val = upper_32_bits(mem->start);
 736	writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
 737
 738	/* Set PCIe to AXI translation memory space.*/
 739	val = PCIE2AHB_SIZE | WIN_ENABLE;
 740	writel(val, port->base + PCIE_AXI_WINDOW0);
 741
 742	return 0;
 743}
 744
 745static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
 746				      unsigned int devfn, int where)
 747{
 748	struct mtk_pcie *pcie = bus->sysdata;
 749
 750	writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
 751			      bus->number), pcie->base + PCIE_CFG_ADDR);
 752
 753	return pcie->base + PCIE_CFG_DATA + (where & 3);
 754}
 755
 756static struct pci_ops mtk_pcie_ops = {
 757	.map_bus = mtk_pcie_map_bus,
 758	.read  = pci_generic_config_read,
 759	.write = pci_generic_config_write,
 760};
 761
 762static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
 763{
 764	struct mtk_pcie *pcie = port->pcie;
 765	u32 func = PCI_FUNC(port->slot);
 766	u32 slot = PCI_SLOT(port->slot << 3);
 767	u32 val;
 768	int err;
 769
 770	/* assert port PERST_N */
 771	val = readl(pcie->base + PCIE_SYS_CFG);
 772	val |= PCIE_PORT_PERST(port->slot);
 773	writel(val, pcie->base + PCIE_SYS_CFG);
 774
 775	/* de-assert port PERST_N */
 776	val = readl(pcie->base + PCIE_SYS_CFG);
 777	val &= ~PCIE_PORT_PERST(port->slot);
 778	writel(val, pcie->base + PCIE_SYS_CFG);
 779
 780	/* 100ms timeout value should be enough for Gen1/2 training */
 781	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
 782				 !!(val & PCIE_PORT_LINKUP), 20,
 783				 100 * USEC_PER_MSEC);
 784	if (err)
 785		return -ETIMEDOUT;
 786
 787	/* enable interrupt */
 788	val = readl(pcie->base + PCIE_INT_ENABLE);
 789	val |= PCIE_PORT_INT_EN(port->slot);
 790	writel(val, pcie->base + PCIE_INT_ENABLE);
 791
 792	/* map to all DDR region. We need to set it before cfg operation. */
 793	writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
 794	       port->base + PCIE_BAR0_SETUP);
 795
 796	/* configure class code and revision ID */
 797	writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
 798
 799	/* configure FC credit */
 800	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
 801	       pcie->base + PCIE_CFG_ADDR);
 802	val = readl(pcie->base + PCIE_CFG_DATA);
 803	val &= ~PCIE_FC_CREDIT_MASK;
 804	val |= PCIE_FC_CREDIT_VAL(0x806c);
 805	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
 806	       pcie->base + PCIE_CFG_ADDR);
 807	writel(val, pcie->base + PCIE_CFG_DATA);
 808
 809	/* configure RC FTS number to 250 when it leaves L0s */
 810	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
 811	       pcie->base + PCIE_CFG_ADDR);
 812	val = readl(pcie->base + PCIE_CFG_DATA);
 813	val &= ~PCIE_FTS_NUM_MASK;
 814	val |= PCIE_FTS_NUM_L0(0x50);
 815	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
 816	       pcie->base + PCIE_CFG_ADDR);
 817	writel(val, pcie->base + PCIE_CFG_DATA);
 818
 819	return 0;
 820}
 821
 822static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
 823{
 824	struct mtk_pcie *pcie = port->pcie;
 825	struct device *dev = pcie->dev;
 826	int err;
 827
 828	err = clk_prepare_enable(port->sys_ck);
 829	if (err) {
 830		dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
 831		goto err_sys_clk;
 832	}
 833
 834	err = clk_prepare_enable(port->ahb_ck);
 835	if (err) {
 836		dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
 837		goto err_ahb_clk;
 838	}
 839
 840	err = clk_prepare_enable(port->aux_ck);
 841	if (err) {
 842		dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
 843		goto err_aux_clk;
 844	}
 845
 846	err = clk_prepare_enable(port->axi_ck);
 847	if (err) {
 848		dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
 849		goto err_axi_clk;
 850	}
 851
 852	err = clk_prepare_enable(port->obff_ck);
 853	if (err) {
 854		dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
 855		goto err_obff_clk;
 856	}
 857
 858	err = clk_prepare_enable(port->pipe_ck);
 859	if (err) {
 860		dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
 861		goto err_pipe_clk;
 862	}
 863
 864	reset_control_assert(port->reset);
 865	reset_control_deassert(port->reset);
 866
 867	err = phy_init(port->phy);
 868	if (err) {
 869		dev_err(dev, "failed to initialize port%d phy\n", port->slot);
 870		goto err_phy_init;
 871	}
 872
 873	err = phy_power_on(port->phy);
 874	if (err) {
 875		dev_err(dev, "failed to power on port%d phy\n", port->slot);
 876		goto err_phy_on;
 877	}
 878
 879	if (!pcie->soc->startup(port))
 880		return;
 881
 882	dev_info(dev, "Port%d link down\n", port->slot);
 883
 884	phy_power_off(port->phy);
 885err_phy_on:
 886	phy_exit(port->phy);
 887err_phy_init:
 888	clk_disable_unprepare(port->pipe_ck);
 889err_pipe_clk:
 890	clk_disable_unprepare(port->obff_ck);
 891err_obff_clk:
 892	clk_disable_unprepare(port->axi_ck);
 893err_axi_clk:
 894	clk_disable_unprepare(port->aux_ck);
 895err_aux_clk:
 896	clk_disable_unprepare(port->ahb_ck);
 897err_ahb_clk:
 898	clk_disable_unprepare(port->sys_ck);
 899err_sys_clk:
 900	mtk_pcie_port_free(port);
 901}
 902
 903static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
 904			       struct device_node *node,
 905			       int slot)
 906{
 907	struct mtk_pcie_port *port;
 908	struct device *dev = pcie->dev;
 909	struct platform_device *pdev = to_platform_device(dev);
 910	char name[10];
 911	int err;
 912
 913	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
 914	if (!port)
 915		return -ENOMEM;
 916
 917	snprintf(name, sizeof(name), "port%d", slot);
 918	port->base = devm_platform_ioremap_resource_byname(pdev, name);
 919	if (IS_ERR(port->base)) {
 920		dev_err(dev, "failed to map port%d base\n", slot);
 921		return PTR_ERR(port->base);
 922	}
 923
 924	snprintf(name, sizeof(name), "sys_ck%d", slot);
 925	port->sys_ck = devm_clk_get(dev, name);
 926	if (IS_ERR(port->sys_ck)) {
 927		dev_err(dev, "failed to get sys_ck%d clock\n", slot);
 928		return PTR_ERR(port->sys_ck);
 929	}
 930
 931	/* sys_ck might be divided into the following parts in some chips */
 932	snprintf(name, sizeof(name), "ahb_ck%d", slot);
 933	port->ahb_ck = devm_clk_get_optional(dev, name);
 934	if (IS_ERR(port->ahb_ck))
 935		return PTR_ERR(port->ahb_ck);
 936
 937	snprintf(name, sizeof(name), "axi_ck%d", slot);
 938	port->axi_ck = devm_clk_get_optional(dev, name);
 939	if (IS_ERR(port->axi_ck))
 940		return PTR_ERR(port->axi_ck);
 941
 942	snprintf(name, sizeof(name), "aux_ck%d", slot);
 943	port->aux_ck = devm_clk_get_optional(dev, name);
 944	if (IS_ERR(port->aux_ck))
 945		return PTR_ERR(port->aux_ck);
 946
 947	snprintf(name, sizeof(name), "obff_ck%d", slot);
 948	port->obff_ck = devm_clk_get_optional(dev, name);
 949	if (IS_ERR(port->obff_ck))
 950		return PTR_ERR(port->obff_ck);
 951
 952	snprintf(name, sizeof(name), "pipe_ck%d", slot);
 953	port->pipe_ck = devm_clk_get_optional(dev, name);
 954	if (IS_ERR(port->pipe_ck))
 955		return PTR_ERR(port->pipe_ck);
 956
 957	snprintf(name, sizeof(name), "pcie-rst%d", slot);
 958	port->reset = devm_reset_control_get_optional_exclusive(dev, name);
 959	if (PTR_ERR(port->reset) == -EPROBE_DEFER)
 960		return PTR_ERR(port->reset);
 961
 962	/* some platforms may use default PHY setting */
 963	snprintf(name, sizeof(name), "pcie-phy%d", slot);
 964	port->phy = devm_phy_optional_get(dev, name);
 965	if (IS_ERR(port->phy))
 966		return PTR_ERR(port->phy);
 967
 968	port->slot = slot;
 969	port->pcie = pcie;
 970
 971	if (pcie->soc->setup_irq) {
 972		err = pcie->soc->setup_irq(port, node);
 973		if (err)
 974			return err;
 975	}
 976
 977	INIT_LIST_HEAD(&port->list);
 978	list_add_tail(&port->list, &pcie->ports);
 979
 980	return 0;
 981}
 982
 983static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
 984{
 985	struct device *dev = pcie->dev;
 986	struct platform_device *pdev = to_platform_device(dev);
 987	struct resource *regs;
 988	int err;
 989
 990	/* get shared registers, which are optional */
 991	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
 992	if (regs) {
 993		pcie->base = devm_ioremap_resource(dev, regs);
 994		if (IS_ERR(pcie->base))
 
 995			return PTR_ERR(pcie->base);
 
 996	}
 997
 998	pcie->free_ck = devm_clk_get(dev, "free_ck");
 999	if (IS_ERR(pcie->free_ck)) {
1000		if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
1001			return -EPROBE_DEFER;
1002
1003		pcie->free_ck = NULL;
1004	}
1005
1006	pm_runtime_enable(dev);
1007	pm_runtime_get_sync(dev);
1008
1009	/* enable top level clock */
1010	err = clk_prepare_enable(pcie->free_ck);
1011	if (err) {
1012		dev_err(dev, "failed to enable free_ck\n");
1013		goto err_free_ck;
1014	}
1015
1016	return 0;
1017
1018err_free_ck:
1019	pm_runtime_put_sync(dev);
1020	pm_runtime_disable(dev);
1021
1022	return err;
1023}
1024
1025static int mtk_pcie_setup(struct mtk_pcie *pcie)
1026{
1027	struct device *dev = pcie->dev;
1028	struct device_node *node = dev->of_node, *child;
1029	struct mtk_pcie_port *port, *tmp;
1030	int err;
1031
1032	for_each_available_child_of_node(node, child) {
1033		int slot;
1034
1035		err = of_pci_get_devfn(child);
1036		if (err < 0) {
1037			dev_err(dev, "failed to parse devfn: %d\n", err);
1038			goto error_put_node;
1039		}
1040
1041		slot = PCI_SLOT(err);
1042
1043		err = mtk_pcie_parse_port(pcie, child, slot);
1044		if (err)
1045			goto error_put_node;
1046	}
1047
1048	err = mtk_pcie_subsys_powerup(pcie);
1049	if (err)
1050		return err;
1051
1052	/* enable each port, and then check link status */
1053	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1054		mtk_pcie_enable_port(port);
1055
1056	/* power down PCIe subsys if slots are all empty (link down) */
1057	if (list_empty(&pcie->ports))
1058		mtk_pcie_subsys_powerdown(pcie);
1059
1060	return 0;
1061error_put_node:
1062	of_node_put(child);
1063	return err;
1064}
1065
1066static int mtk_pcie_probe(struct platform_device *pdev)
1067{
1068	struct device *dev = &pdev->dev;
1069	struct mtk_pcie *pcie;
1070	struct pci_host_bridge *host;
1071	int err;
1072
1073	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1074	if (!host)
1075		return -ENOMEM;
1076
1077	pcie = pci_host_bridge_priv(host);
1078
1079	pcie->dev = dev;
1080	pcie->soc = of_device_get_match_data(dev);
1081	platform_set_drvdata(pdev, pcie);
1082	INIT_LIST_HEAD(&pcie->ports);
1083
1084	err = mtk_pcie_setup(pcie);
1085	if (err)
1086		return err;
1087
1088	host->ops = pcie->soc->ops;
1089	host->sysdata = pcie;
1090	host->msi_domain = pcie->soc->no_msi;
1091
1092	err = pci_host_probe(host);
1093	if (err)
1094		goto put_resources;
1095
1096	return 0;
1097
1098put_resources:
1099	if (!list_empty(&pcie->ports))
1100		mtk_pcie_put_resources(pcie);
1101
1102	return err;
1103}
1104
1105
1106static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
1107{
1108	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1109	struct list_head *windows = &host->windows;
1110
1111	pci_free_resource_list(windows);
1112}
1113
1114static int mtk_pcie_remove(struct platform_device *pdev)
1115{
1116	struct mtk_pcie *pcie = platform_get_drvdata(pdev);
1117	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1118
1119	pci_stop_root_bus(host->bus);
1120	pci_remove_root_bus(host->bus);
1121	mtk_pcie_free_resources(pcie);
1122
1123	mtk_pcie_irq_teardown(pcie);
1124
1125	mtk_pcie_put_resources(pcie);
1126
1127	return 0;
1128}
1129
1130static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1131{
1132	struct mtk_pcie *pcie = dev_get_drvdata(dev);
1133	struct mtk_pcie_port *port;
1134
1135	if (list_empty(&pcie->ports))
1136		return 0;
1137
1138	list_for_each_entry(port, &pcie->ports, list) {
1139		clk_disable_unprepare(port->pipe_ck);
1140		clk_disable_unprepare(port->obff_ck);
1141		clk_disable_unprepare(port->axi_ck);
1142		clk_disable_unprepare(port->aux_ck);
1143		clk_disable_unprepare(port->ahb_ck);
1144		clk_disable_unprepare(port->sys_ck);
1145		phy_power_off(port->phy);
1146		phy_exit(port->phy);
1147	}
1148
1149	clk_disable_unprepare(pcie->free_ck);
1150
1151	return 0;
1152}
1153
1154static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1155{
1156	struct mtk_pcie *pcie = dev_get_drvdata(dev);
1157	struct mtk_pcie_port *port, *tmp;
1158
1159	if (list_empty(&pcie->ports))
1160		return 0;
1161
1162	clk_prepare_enable(pcie->free_ck);
1163
1164	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1165		mtk_pcie_enable_port(port);
1166
1167	/* In case of EP was removed while system suspend. */
1168	if (list_empty(&pcie->ports))
1169		clk_disable_unprepare(pcie->free_ck);
1170
1171	return 0;
1172}
1173
1174static const struct dev_pm_ops mtk_pcie_pm_ops = {
1175	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1176				      mtk_pcie_resume_noirq)
1177};
1178
1179static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
1180	.no_msi = true,
1181	.ops = &mtk_pcie_ops,
1182	.startup = mtk_pcie_startup_port,
1183};
1184
1185static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
1186	.ops = &mtk_pcie_ops_v2,
1187	.startup = mtk_pcie_startup_port_v2,
1188	.setup_irq = mtk_pcie_setup_irq,
1189};
1190
1191static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
1192	.need_fix_class_id = true,
1193	.ops = &mtk_pcie_ops_v2,
1194	.startup = mtk_pcie_startup_port_v2,
1195	.setup_irq = mtk_pcie_setup_irq,
1196};
1197
1198static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
1199	.need_fix_class_id = true,
1200	.need_fix_device_id = true,
1201	.device_id = PCI_DEVICE_ID_MEDIATEK_7629,
1202	.ops = &mtk_pcie_ops_v2,
1203	.startup = mtk_pcie_startup_port_v2,
1204	.setup_irq = mtk_pcie_setup_irq,
1205};
1206
1207static const struct of_device_id mtk_pcie_ids[] = {
1208	{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1209	{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1210	{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1211	{ .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1212	{ .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
1213	{},
1214};
1215MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
1216
1217static struct platform_driver mtk_pcie_driver = {
1218	.probe = mtk_pcie_probe,
1219	.remove = mtk_pcie_remove,
1220	.driver = {
1221		.name = "mtk-pcie",
1222		.of_match_table = mtk_pcie_ids,
1223		.suppress_bind_attrs = true,
1224		.pm = &mtk_pcie_pm_ops,
1225	},
1226};
1227module_platform_driver(mtk_pcie_driver);
1228MODULE_LICENSE("GPL v2");