Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MediaTek PCIe host controller driver.
   4 *
   5 * Copyright (c) 2020 MediaTek Inc.
   6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/delay.h>
  11#include <linux/iopoll.h>
  12#include <linux/irq.h>
  13#include <linux/irqchip/chained_irq.h>
  14#include <linux/irqdomain.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/msi.h>
  18#include <linux/pci.h>
  19#include <linux/phy/phy.h>
  20#include <linux/platform_device.h>
  21#include <linux/pm_domain.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/reset.h>
  24
  25#include "../pci.h"
  26
  27#define PCIE_SETTING_REG		0x80
  28#define PCIE_PCI_IDS_1			0x9c
  29#define PCI_CLASS(class)		(class << 8)
  30#define PCIE_RC_MODE			BIT(0)
  31
  32#define PCIE_CFGNUM_REG			0x140
  33#define PCIE_CFG_DEVFN(devfn)		((devfn) & GENMASK(7, 0))
  34#define PCIE_CFG_BUS(bus)		(((bus) << 8) & GENMASK(15, 8))
  35#define PCIE_CFG_BYTE_EN(bytes)		(((bytes) << 16) & GENMASK(19, 16))
  36#define PCIE_CFG_FORCE_BYTE_EN		BIT(20)
  37#define PCIE_CFG_OFFSET_ADDR		0x1000
  38#define PCIE_CFG_HEADER(bus, devfn) \
  39	(PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
  40
  41#define PCIE_RST_CTRL_REG		0x148
  42#define PCIE_MAC_RSTB			BIT(0)
  43#define PCIE_PHY_RSTB			BIT(1)
  44#define PCIE_BRG_RSTB			BIT(2)
  45#define PCIE_PE_RSTB			BIT(3)
  46
  47#define PCIE_LTSSM_STATUS_REG		0x150
  48#define PCIE_LTSSM_STATE_MASK		GENMASK(28, 24)
  49#define PCIE_LTSSM_STATE(val)		((val & PCIE_LTSSM_STATE_MASK) >> 24)
  50#define PCIE_LTSSM_STATE_L2_IDLE	0x14
  51
  52#define PCIE_LINK_STATUS_REG		0x154
  53#define PCIE_PORT_LINKUP		BIT(8)
  54
  55#define PCIE_MSI_SET_NUM		8
  56#define PCIE_MSI_IRQS_PER_SET		32
  57#define PCIE_MSI_IRQS_NUM \
  58	(PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
  59
  60#define PCIE_INT_ENABLE_REG		0x180
  61#define PCIE_MSI_ENABLE			GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
  62#define PCIE_MSI_SHIFT			8
  63#define PCIE_INTX_SHIFT			24
  64#define PCIE_INTX_ENABLE \
  65	GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
  66
  67#define PCIE_INT_STATUS_REG		0x184
  68#define PCIE_MSI_SET_ENABLE_REG		0x190
  69#define PCIE_MSI_SET_ENABLE		GENMASK(PCIE_MSI_SET_NUM - 1, 0)
  70
  71#define PCIE_MSI_SET_BASE_REG		0xc00
  72#define PCIE_MSI_SET_OFFSET		0x10
  73#define PCIE_MSI_SET_STATUS_OFFSET	0x04
  74#define PCIE_MSI_SET_ENABLE_OFFSET	0x08
  75
  76#define PCIE_MSI_SET_ADDR_HI_BASE	0xc80
  77#define PCIE_MSI_SET_ADDR_HI_OFFSET	0x04
  78
  79#define PCIE_ICMD_PM_REG		0x198
  80#define PCIE_TURN_OFF_LINK		BIT(4)
  81
  82#define PCIE_MISC_CTRL_REG		0x348
  83#define PCIE_DISABLE_DVFSRC_VLT_REQ	BIT(1)
  84
  85#define PCIE_TRANS_TABLE_BASE_REG	0x800
  86#define PCIE_ATR_SRC_ADDR_MSB_OFFSET	0x4
  87#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET	0x8
  88#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET	0xc
  89#define PCIE_ATR_TRSL_PARAM_OFFSET	0x10
  90#define PCIE_ATR_TLB_SET_OFFSET		0x20
  91
  92#define PCIE_MAX_TRANS_TABLES		8
  93#define PCIE_ATR_EN			BIT(0)
  94#define PCIE_ATR_SIZE(size) \
  95	(((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
  96#define PCIE_ATR_ID(id)			((id) & GENMASK(3, 0))
  97#define PCIE_ATR_TYPE_MEM		PCIE_ATR_ID(0)
  98#define PCIE_ATR_TYPE_IO		PCIE_ATR_ID(1)
  99#define PCIE_ATR_TLP_TYPE(type)		(((type) << 16) & GENMASK(18, 16))
 100#define PCIE_ATR_TLP_TYPE_MEM		PCIE_ATR_TLP_TYPE(0)
 101#define PCIE_ATR_TLP_TYPE_IO		PCIE_ATR_TLP_TYPE(2)
 102
 103/**
 104 * struct mtk_msi_set - MSI information for each set
 105 * @base: IO mapped register base
 106 * @msg_addr: MSI message address
 107 * @saved_irq_state: IRQ enable state saved at suspend time
 108 */
 109struct mtk_msi_set {
 110	void __iomem *base;
 111	phys_addr_t msg_addr;
 112	u32 saved_irq_state;
 113};
 114
 115/**
 116 * struct mtk_gen3_pcie - PCIe port information
 117 * @dev: pointer to PCIe device
 118 * @base: IO mapped register base
 119 * @reg_base: physical register base
 120 * @mac_reset: MAC reset control
 121 * @phy_reset: PHY reset control
 122 * @phy: PHY controller block
 123 * @clks: PCIe clocks
 124 * @num_clks: PCIe clocks count for this port
 125 * @irq: PCIe controller interrupt number
 126 * @saved_irq_state: IRQ enable state saved at suspend time
 127 * @irq_lock: lock protecting IRQ register access
 128 * @intx_domain: legacy INTx IRQ domain
 129 * @msi_domain: MSI IRQ domain
 130 * @msi_bottom_domain: MSI IRQ bottom domain
 131 * @msi_sets: MSI sets information
 132 * @lock: lock protecting IRQ bit map
 133 * @msi_irq_in_use: bit map for assigned MSI IRQ
 134 */
 135struct mtk_gen3_pcie {
 136	struct device *dev;
 137	void __iomem *base;
 138	phys_addr_t reg_base;
 139	struct reset_control *mac_reset;
 140	struct reset_control *phy_reset;
 141	struct phy *phy;
 142	struct clk_bulk_data *clks;
 143	int num_clks;
 144
 145	int irq;
 146	u32 saved_irq_state;
 147	raw_spinlock_t irq_lock;
 148	struct irq_domain *intx_domain;
 149	struct irq_domain *msi_domain;
 150	struct irq_domain *msi_bottom_domain;
 151	struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
 152	struct mutex lock;
 153	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
 154};
 155
 156/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
 157static const char *const ltssm_str[] = {
 158	"detect.quiet",			/* 0x00 */
 159	"detect.active",		/* 0x01 */
 160	"polling.active",		/* 0x02 */
 161	"polling.compliance",		/* 0x03 */
 162	"polling.configuration",	/* 0x04 */
 163	"config.linkwidthstart",	/* 0x05 */
 164	"config.linkwidthaccept",	/* 0x06 */
 165	"config.lanenumwait",		/* 0x07 */
 166	"config.lanenumaccept",		/* 0x08 */
 167	"config.complete",		/* 0x09 */
 168	"config.idle",			/* 0x0A */
 169	"recovery.receiverlock",	/* 0x0B */
 170	"recovery.equalization",	/* 0x0C */
 171	"recovery.speed",		/* 0x0D */
 172	"recovery.receiverconfig",	/* 0x0E */
 173	"recovery.idle",		/* 0x0F */
 174	"L0",				/* 0x10 */
 175	"L0s",				/* 0x11 */
 176	"L1.entry",			/* 0x12 */
 177	"L1.idle",			/* 0x13 */
 178	"L2.idle",			/* 0x14 */
 179	"L2.transmitwake",		/* 0x15 */
 180	"disable",			/* 0x16 */
 181	"loopback.entry",		/* 0x17 */
 182	"loopback.active",		/* 0x18 */
 183	"loopback.exit",		/* 0x19 */
 184	"hotreset",			/* 0x1A */
 185};
 186
 187/**
 188 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
 189 * @bus: PCI bus to query
 190 * @devfn: device/function number
 191 * @where: offset in config space
 192 * @size: data size in TLP header
 193 *
 194 * Set byte enable field and device information in configuration TLP header.
 195 */
 196static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
 197					int where, int size)
 198{
 199	struct mtk_gen3_pcie *pcie = bus->sysdata;
 200	int bytes;
 201	u32 val;
 202
 203	bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
 204
 205	val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
 206	      PCIE_CFG_HEADER(bus->number, devfn);
 207
 208	writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
 209}
 210
 211static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 212				      int where)
 213{
 214	struct mtk_gen3_pcie *pcie = bus->sysdata;
 215
 216	return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
 217}
 218
 219static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 220				int where, int size, u32 *val)
 221{
 222	mtk_pcie_config_tlp_header(bus, devfn, where, size);
 223
 224	return pci_generic_config_read32(bus, devfn, where, size, val);
 225}
 226
 227static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 228				 int where, int size, u32 val)
 229{
 230	mtk_pcie_config_tlp_header(bus, devfn, where, size);
 231
 232	if (size <= 2)
 233		val <<= (where & 0x3) * 8;
 234
 235	return pci_generic_config_write32(bus, devfn, where, 4, val);
 236}
 237
 238static struct pci_ops mtk_pcie_ops = {
 239	.map_bus = mtk_pcie_map_bus,
 240	.read  = mtk_pcie_config_read,
 241	.write = mtk_pcie_config_write,
 242};
 243
 244static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
 245				    resource_size_t cpu_addr,
 246				    resource_size_t pci_addr,
 247				    resource_size_t size,
 248				    unsigned long type, int *num)
 249{
 250	resource_size_t remaining = size;
 251	resource_size_t table_size;
 252	resource_size_t addr_align;
 253	const char *range_type;
 254	void __iomem *table;
 255	u32 val;
 256
 257	while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
 258		/* Table size needs to be a power of 2 */
 259		table_size = BIT(fls(remaining) - 1);
 260
 261		if (cpu_addr > 0) {
 262			addr_align = BIT(ffs(cpu_addr) - 1);
 263			table_size = min(table_size, addr_align);
 264		}
 265
 266		/* Minimum size of translate table is 4KiB */
 267		if (table_size < 0x1000) {
 268			dev_err(pcie->dev, "illegal table size %#llx\n",
 269				(unsigned long long)table_size);
 270			return -EINVAL;
 271		}
 272
 273		table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
 274		writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
 275		writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
 276		writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
 277		writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
 278
 279		if (type == IORESOURCE_IO) {
 280			val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
 281			range_type = "IO";
 282		} else {
 283			val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
 284			range_type = "MEM";
 285		}
 286
 287		writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
 
 288
 289		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
 290			range_type, *num, (unsigned long long)cpu_addr,
 291			(unsigned long long)pci_addr, (unsigned long long)table_size);
 
 
 
 
 
 292
 293		cpu_addr += table_size;
 294		pci_addr += table_size;
 295		remaining -= table_size;
 296		(*num)++;
 297	}
 298
 299	if (remaining)
 300		dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
 301			 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
 302
 303	return 0;
 304}
 305
 306static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
 307{
 308	int i;
 309	u32 val;
 310
 311	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 312		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
 313
 314		msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
 315				i * PCIE_MSI_SET_OFFSET;
 316		msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
 317				    i * PCIE_MSI_SET_OFFSET;
 318
 319		/* Configure the MSI capture address */
 320		writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
 321		writel_relaxed(upper_32_bits(msi_set->msg_addr),
 322			       pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
 323			       i * PCIE_MSI_SET_ADDR_HI_OFFSET);
 324	}
 325
 326	val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
 327	val |= PCIE_MSI_SET_ENABLE;
 328	writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
 329
 330	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 331	val |= PCIE_MSI_ENABLE;
 332	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 333}
 334
 335static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
 336{
 337	struct resource_entry *entry;
 338	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 339	unsigned int table_index = 0;
 340	int err;
 341	u32 val;
 342
 343	/* Set as RC mode */
 344	val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
 345	val |= PCIE_RC_MODE;
 346	writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
 347
 348	/* Set class code */
 349	val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
 350	val &= ~GENMASK(31, 8);
 351	val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
 352	writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
 353
 354	/* Mask all INTx interrupts */
 355	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 356	val &= ~PCIE_INTX_ENABLE;
 357	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 358
 359	/* Disable DVFSRC voltage request */
 360	val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
 361	val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
 362	writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
 363
 364	/* Assert all reset signals */
 365	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
 366	val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
 367	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
 368
 369	/*
 370	 * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
 371	 * and 2.2.1 (Initial Power-Up (G3 to S0)).
 372	 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
 373	 * for the power and clock to become stable.
 374	 */
 375	msleep(100);
 376
 377	/* De-assert reset signals */
 378	val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
 379	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
 380
 381	/* Check if the link is up or not */
 382	err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
 383				 !!(val & PCIE_PORT_LINKUP), 20,
 384				 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
 385	if (err) {
 386		const char *ltssm_state;
 387		int ltssm_index;
 388
 389		val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
 390		ltssm_index = PCIE_LTSSM_STATE(val);
 391		ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
 392			      "Unknown state" : ltssm_str[ltssm_index];
 393		dev_err(pcie->dev,
 394			"PCIe link down, current LTSSM state: %s (%#x)\n",
 395			ltssm_state, val);
 396		return err;
 397	}
 398
 399	mtk_pcie_enable_msi(pcie);
 400
 401	/* Set PCIe translation windows */
 402	resource_list_for_each_entry(entry, &host->windows) {
 403		struct resource *res = entry->res;
 404		unsigned long type = resource_type(res);
 405		resource_size_t cpu_addr;
 406		resource_size_t pci_addr;
 407		resource_size_t size;
 
 408
 409		if (type == IORESOURCE_IO)
 410			cpu_addr = pci_pio_to_address(res->start);
 411		else if (type == IORESOURCE_MEM)
 
 412			cpu_addr = res->start;
 413		else
 
 414			continue;
 
 415
 416		pci_addr = res->start - entry->offset;
 417		size = resource_size(res);
 418		err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
 419					       type, &table_index);
 420		if (err)
 421			return err;
 
 
 
 
 
 
 422	}
 423
 424	return 0;
 425}
 426
 427static int mtk_pcie_set_affinity(struct irq_data *data,
 428				 const struct cpumask *mask, bool force)
 429{
 430	return -EINVAL;
 431}
 432
 433static void mtk_pcie_msi_irq_mask(struct irq_data *data)
 434{
 435	pci_msi_mask_irq(data);
 436	irq_chip_mask_parent(data);
 437}
 438
 439static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
 440{
 441	pci_msi_unmask_irq(data);
 442	irq_chip_unmask_parent(data);
 443}
 444
 445static struct irq_chip mtk_msi_irq_chip = {
 446	.irq_ack = irq_chip_ack_parent,
 447	.irq_mask = mtk_pcie_msi_irq_mask,
 448	.irq_unmask = mtk_pcie_msi_irq_unmask,
 449	.name = "MSI",
 450};
 451
 452static struct msi_domain_info mtk_msi_domain_info = {
 453	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 454		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 455	.chip	= &mtk_msi_irq_chip,
 456};
 457
 458static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 459{
 460	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 461	struct mtk_gen3_pcie *pcie = data->domain->host_data;
 462	unsigned long hwirq;
 463
 464	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 465
 466	msg->address_hi = upper_32_bits(msi_set->msg_addr);
 467	msg->address_lo = lower_32_bits(msi_set->msg_addr);
 468	msg->data = hwirq;
 469	dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
 470		hwirq, msg->address_hi, msg->address_lo, msg->data);
 471}
 472
 473static void mtk_msi_bottom_irq_ack(struct irq_data *data)
 474{
 475	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 476	unsigned long hwirq;
 477
 478	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 479
 480	writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
 481}
 482
 483static void mtk_msi_bottom_irq_mask(struct irq_data *data)
 484{
 485	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 486	struct mtk_gen3_pcie *pcie = data->domain->host_data;
 487	unsigned long hwirq, flags;
 488	u32 val;
 489
 490	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 491
 492	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 493	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 494	val &= ~BIT(hwirq);
 495	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 496	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 497}
 498
 499static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
 500{
 501	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 502	struct mtk_gen3_pcie *pcie = data->domain->host_data;
 503	unsigned long hwirq, flags;
 504	u32 val;
 505
 506	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 507
 508	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 509	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 510	val |= BIT(hwirq);
 511	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 512	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 513}
 514
 515static struct irq_chip mtk_msi_bottom_irq_chip = {
 516	.irq_ack		= mtk_msi_bottom_irq_ack,
 517	.irq_mask		= mtk_msi_bottom_irq_mask,
 518	.irq_unmask		= mtk_msi_bottom_irq_unmask,
 519	.irq_compose_msi_msg	= mtk_compose_msi_msg,
 520	.irq_set_affinity	= mtk_pcie_set_affinity,
 521	.name			= "MSI",
 522};
 523
 524static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
 525				       unsigned int virq, unsigned int nr_irqs,
 526				       void *arg)
 527{
 528	struct mtk_gen3_pcie *pcie = domain->host_data;
 529	struct mtk_msi_set *msi_set;
 530	int i, hwirq, set_idx;
 531
 532	mutex_lock(&pcie->lock);
 533
 534	hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
 535					order_base_2(nr_irqs));
 536
 537	mutex_unlock(&pcie->lock);
 538
 539	if (hwirq < 0)
 540		return -ENOSPC;
 541
 542	set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
 543	msi_set = &pcie->msi_sets[set_idx];
 544
 545	for (i = 0; i < nr_irqs; i++)
 546		irq_domain_set_info(domain, virq + i, hwirq + i,
 547				    &mtk_msi_bottom_irq_chip, msi_set,
 548				    handle_edge_irq, NULL, NULL);
 549
 550	return 0;
 551}
 552
 553static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
 554				       unsigned int virq, unsigned int nr_irqs)
 555{
 556	struct mtk_gen3_pcie *pcie = domain->host_data;
 557	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 558
 559	mutex_lock(&pcie->lock);
 560
 561	bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
 562			      order_base_2(nr_irqs));
 563
 564	mutex_unlock(&pcie->lock);
 565
 566	irq_domain_free_irqs_common(domain, virq, nr_irqs);
 567}
 568
 569static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
 570	.alloc = mtk_msi_bottom_domain_alloc,
 571	.free = mtk_msi_bottom_domain_free,
 572};
 573
 574static void mtk_intx_mask(struct irq_data *data)
 575{
 576	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
 577	unsigned long flags;
 578	u32 val;
 579
 580	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 581	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 582	val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
 583	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 584	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 585}
 586
 587static void mtk_intx_unmask(struct irq_data *data)
 588{
 589	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
 590	unsigned long flags;
 591	u32 val;
 592
 593	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 594	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 595	val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
 596	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 597	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 598}
 599
 600/**
 601 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
 602 * @data: pointer to chip specific data
 603 *
 604 * As an emulated level IRQ, its interrupt status will remain
 605 * until the corresponding de-assert message is received; hence that
 606 * the status can only be cleared when the interrupt has been serviced.
 607 */
 608static void mtk_intx_eoi(struct irq_data *data)
 609{
 610	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
 611	unsigned long hwirq;
 612
 613	hwirq = data->hwirq + PCIE_INTX_SHIFT;
 614	writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
 615}
 616
 617static struct irq_chip mtk_intx_irq_chip = {
 618	.irq_mask		= mtk_intx_mask,
 619	.irq_unmask		= mtk_intx_unmask,
 620	.irq_eoi		= mtk_intx_eoi,
 621	.irq_set_affinity	= mtk_pcie_set_affinity,
 622	.name			= "INTx",
 623};
 624
 625static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 626			     irq_hw_number_t hwirq)
 627{
 628	irq_set_chip_data(irq, domain->host_data);
 629	irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
 630				      handle_fasteoi_irq, "INTx");
 631	return 0;
 632}
 633
 634static const struct irq_domain_ops intx_domain_ops = {
 635	.map = mtk_pcie_intx_map,
 636};
 637
 638static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
 639{
 640	struct device *dev = pcie->dev;
 641	struct device_node *intc_node, *node = dev->of_node;
 642	int ret;
 643
 644	raw_spin_lock_init(&pcie->irq_lock);
 645
 646	/* Setup INTx */
 647	intc_node = of_get_child_by_name(node, "interrupt-controller");
 648	if (!intc_node) {
 649		dev_err(dev, "missing interrupt-controller node\n");
 650		return -ENODEV;
 651	}
 652
 653	pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
 654						  &intx_domain_ops, pcie);
 655	if (!pcie->intx_domain) {
 656		dev_err(dev, "failed to create INTx IRQ domain\n");
 657		ret = -ENODEV;
 658		goto out_put_node;
 659	}
 660
 661	/* Setup MSI */
 662	mutex_init(&pcie->lock);
 663
 664	pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
 665				  &mtk_msi_bottom_domain_ops, pcie);
 666	if (!pcie->msi_bottom_domain) {
 667		dev_err(dev, "failed to create MSI bottom domain\n");
 668		ret = -ENODEV;
 669		goto err_msi_bottom_domain;
 670	}
 671
 672	pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
 673						     &mtk_msi_domain_info,
 674						     pcie->msi_bottom_domain);
 675	if (!pcie->msi_domain) {
 676		dev_err(dev, "failed to create MSI domain\n");
 677		ret = -ENODEV;
 678		goto err_msi_domain;
 679	}
 680
 681	of_node_put(intc_node);
 682	return 0;
 683
 684err_msi_domain:
 685	irq_domain_remove(pcie->msi_bottom_domain);
 686err_msi_bottom_domain:
 687	irq_domain_remove(pcie->intx_domain);
 688out_put_node:
 689	of_node_put(intc_node);
 690	return ret;
 691}
 692
 693static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
 694{
 695	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
 696
 697	if (pcie->intx_domain)
 698		irq_domain_remove(pcie->intx_domain);
 699
 700	if (pcie->msi_domain)
 701		irq_domain_remove(pcie->msi_domain);
 702
 703	if (pcie->msi_bottom_domain)
 704		irq_domain_remove(pcie->msi_bottom_domain);
 705
 706	irq_dispose_mapping(pcie->irq);
 707}
 708
 709static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
 710{
 711	struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
 712	unsigned long msi_enable, msi_status;
 
 713	irq_hw_number_t bit, hwirq;
 714
 715	msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 716
 717	do {
 718		msi_status = readl_relaxed(msi_set->base +
 719					   PCIE_MSI_SET_STATUS_OFFSET);
 720		msi_status &= msi_enable;
 721		if (!msi_status)
 722			break;
 723
 724		for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
 725			hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
 726			generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
 
 727		}
 728	} while (true);
 729}
 730
 731static void mtk_pcie_irq_handler(struct irq_desc *desc)
 732{
 733	struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
 734	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 735	unsigned long status;
 
 736	irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
 737
 738	chained_irq_enter(irqchip, desc);
 739
 740	status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
 741	for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
 742			      PCIE_INTX_SHIFT)
 743		generic_handle_domain_irq(pcie->intx_domain,
 744					  irq_bit - PCIE_INTX_SHIFT);
 
 
 745
 746	irq_bit = PCIE_MSI_SHIFT;
 747	for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
 748			      PCIE_MSI_SHIFT) {
 749		mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
 750
 751		writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
 752	}
 753
 754	chained_irq_exit(irqchip, desc);
 755}
 756
 757static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
 758{
 759	struct device *dev = pcie->dev;
 760	struct platform_device *pdev = to_platform_device(dev);
 761	int err;
 762
 763	err = mtk_pcie_init_irq_domains(pcie);
 764	if (err)
 765		return err;
 766
 767	pcie->irq = platform_get_irq(pdev, 0);
 768	if (pcie->irq < 0)
 769		return pcie->irq;
 770
 771	irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
 772
 773	return 0;
 774}
 775
 776static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
 777{
 778	struct device *dev = pcie->dev;
 779	struct platform_device *pdev = to_platform_device(dev);
 780	struct resource *regs;
 781	int ret;
 782
 783	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
 784	if (!regs)
 785		return -EINVAL;
 786	pcie->base = devm_ioremap_resource(dev, regs);
 787	if (IS_ERR(pcie->base)) {
 788		dev_err(dev, "failed to map register base\n");
 789		return PTR_ERR(pcie->base);
 790	}
 791
 792	pcie->reg_base = regs->start;
 793
 794	pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
 795	if (IS_ERR(pcie->phy_reset)) {
 796		ret = PTR_ERR(pcie->phy_reset);
 797		if (ret != -EPROBE_DEFER)
 798			dev_err(dev, "failed to get PHY reset\n");
 799
 800		return ret;
 801	}
 802
 803	pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
 804	if (IS_ERR(pcie->mac_reset)) {
 805		ret = PTR_ERR(pcie->mac_reset);
 806		if (ret != -EPROBE_DEFER)
 807			dev_err(dev, "failed to get MAC reset\n");
 808
 809		return ret;
 810	}
 811
 812	pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
 813	if (IS_ERR(pcie->phy)) {
 814		ret = PTR_ERR(pcie->phy);
 815		if (ret != -EPROBE_DEFER)
 816			dev_err(dev, "failed to get PHY\n");
 817
 818		return ret;
 819	}
 820
 821	pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
 822	if (pcie->num_clks < 0) {
 823		dev_err(dev, "failed to get clocks\n");
 824		return pcie->num_clks;
 825	}
 826
 827	return 0;
 828}
 829
 830static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
 831{
 832	struct device *dev = pcie->dev;
 833	int err;
 834
 835	/* PHY power on and enable pipe clock */
 836	reset_control_deassert(pcie->phy_reset);
 837
 838	err = phy_init(pcie->phy);
 839	if (err) {
 840		dev_err(dev, "failed to initialize PHY\n");
 841		goto err_phy_init;
 842	}
 843
 844	err = phy_power_on(pcie->phy);
 845	if (err) {
 846		dev_err(dev, "failed to power on PHY\n");
 847		goto err_phy_on;
 848	}
 849
 850	/* MAC power on and enable transaction layer clocks */
 851	reset_control_deassert(pcie->mac_reset);
 852
 853	pm_runtime_enable(dev);
 854	pm_runtime_get_sync(dev);
 855
 856	err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
 857	if (err) {
 858		dev_err(dev, "failed to enable clocks\n");
 859		goto err_clk_init;
 860	}
 861
 862	return 0;
 863
 864err_clk_init:
 865	pm_runtime_put_sync(dev);
 866	pm_runtime_disable(dev);
 867	reset_control_assert(pcie->mac_reset);
 868	phy_power_off(pcie->phy);
 869err_phy_on:
 870	phy_exit(pcie->phy);
 871err_phy_init:
 872	reset_control_assert(pcie->phy_reset);
 873
 874	return err;
 875}
 876
 877static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
 878{
 879	clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
 880
 881	pm_runtime_put_sync(pcie->dev);
 882	pm_runtime_disable(pcie->dev);
 883	reset_control_assert(pcie->mac_reset);
 884
 885	phy_power_off(pcie->phy);
 886	phy_exit(pcie->phy);
 887	reset_control_assert(pcie->phy_reset);
 888}
 889
 890static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
 891{
 892	int err;
 893
 894	err = mtk_pcie_parse_port(pcie);
 895	if (err)
 896		return err;
 897
 898	/*
 899	 * The controller may have been left out of reset by the bootloader
 900	 * so make sure that we get a clean start by asserting resets here.
 901	 */
 902	reset_control_assert(pcie->phy_reset);
 903	reset_control_assert(pcie->mac_reset);
 904	usleep_range(10, 20);
 905
 906	/* Don't touch the hardware registers before power up */
 907	err = mtk_pcie_power_up(pcie);
 908	if (err)
 909		return err;
 910
 911	/* Try link up */
 912	err = mtk_pcie_startup_port(pcie);
 913	if (err)
 914		goto err_setup;
 915
 916	err = mtk_pcie_setup_irq(pcie);
 917	if (err)
 918		goto err_setup;
 919
 920	return 0;
 921
 922err_setup:
 923	mtk_pcie_power_down(pcie);
 924
 925	return err;
 926}
 927
 928static int mtk_pcie_probe(struct platform_device *pdev)
 929{
 930	struct device *dev = &pdev->dev;
 931	struct mtk_gen3_pcie *pcie;
 932	struct pci_host_bridge *host;
 933	int err;
 934
 935	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
 936	if (!host)
 937		return -ENOMEM;
 938
 939	pcie = pci_host_bridge_priv(host);
 940
 941	pcie->dev = dev;
 942	platform_set_drvdata(pdev, pcie);
 943
 944	err = mtk_pcie_setup(pcie);
 945	if (err)
 946		return err;
 947
 948	host->ops = &mtk_pcie_ops;
 949	host->sysdata = pcie;
 950
 951	err = pci_host_probe(host);
 952	if (err) {
 953		mtk_pcie_irq_teardown(pcie);
 954		mtk_pcie_power_down(pcie);
 955		return err;
 956	}
 957
 958	return 0;
 959}
 960
 961static void mtk_pcie_remove(struct platform_device *pdev)
 962{
 963	struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
 964	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 965
 966	pci_lock_rescan_remove();
 967	pci_stop_root_bus(host->bus);
 968	pci_remove_root_bus(host->bus);
 969	pci_unlock_rescan_remove();
 970
 971	mtk_pcie_irq_teardown(pcie);
 972	mtk_pcie_power_down(pcie);
 
 
 973}
 974
 975static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
 976{
 977	int i;
 978
 979	raw_spin_lock(&pcie->irq_lock);
 980
 981	pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 982
 983	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 984		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
 985
 986		msi_set->saved_irq_state = readl_relaxed(msi_set->base +
 987					   PCIE_MSI_SET_ENABLE_OFFSET);
 988	}
 989
 990	raw_spin_unlock(&pcie->irq_lock);
 991}
 992
 993static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
 994{
 995	int i;
 996
 997	raw_spin_lock(&pcie->irq_lock);
 998
 999	writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
1000
1001	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1002		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1003
1004		writel_relaxed(msi_set->saved_irq_state,
1005			       msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
1006	}
1007
1008	raw_spin_unlock(&pcie->irq_lock);
1009}
1010
1011static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
1012{
1013	u32 val;
1014
1015	val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
1016	val |= PCIE_TURN_OFF_LINK;
1017	writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
1018
1019	/* Check the link is L2 */
1020	return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
1021				  (PCIE_LTSSM_STATE(val) ==
1022				   PCIE_LTSSM_STATE_L2_IDLE), 20,
1023				   50 * USEC_PER_MSEC);
1024}
1025
1026static int mtk_pcie_suspend_noirq(struct device *dev)
1027{
1028	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1029	int err;
1030	u32 val;
1031
1032	/* Trigger link to L2 state */
1033	err = mtk_pcie_turn_off_link(pcie);
1034	if (err) {
1035		dev_err(pcie->dev, "cannot enter L2 state\n");
1036		return err;
1037	}
1038
1039	/* Pull down the PERST# pin */
1040	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
1041	val |= PCIE_PE_RSTB;
1042	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
1043
1044	dev_dbg(pcie->dev, "entered L2 states successfully");
1045
1046	mtk_pcie_irq_save(pcie);
1047	mtk_pcie_power_down(pcie);
1048
1049	return 0;
1050}
1051
1052static int mtk_pcie_resume_noirq(struct device *dev)
1053{
1054	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1055	int err;
1056
1057	err = mtk_pcie_power_up(pcie);
1058	if (err)
1059		return err;
1060
1061	err = mtk_pcie_startup_port(pcie);
1062	if (err) {
1063		mtk_pcie_power_down(pcie);
1064		return err;
1065	}
1066
1067	mtk_pcie_irq_restore(pcie);
1068
1069	return 0;
1070}
1071
1072static const struct dev_pm_ops mtk_pcie_pm_ops = {
1073	NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1074				  mtk_pcie_resume_noirq)
1075};
1076
1077static const struct of_device_id mtk_pcie_of_match[] = {
1078	{ .compatible = "mediatek,mt8192-pcie" },
1079	{},
1080};
1081MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1082
1083static struct platform_driver mtk_pcie_driver = {
1084	.probe = mtk_pcie_probe,
1085	.remove_new = mtk_pcie_remove,
1086	.driver = {
1087		.name = "mtk-pcie-gen3",
1088		.of_match_table = mtk_pcie_of_match,
1089		.pm = &mtk_pcie_pm_ops,
1090	},
1091};
1092
1093module_platform_driver(mtk_pcie_driver);
1094MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MediaTek PCIe host controller driver.
   4 *
   5 * Copyright (c) 2020 MediaTek Inc.
   6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/delay.h>
  11#include <linux/iopoll.h>
  12#include <linux/irq.h>
  13#include <linux/irqchip/chained_irq.h>
  14#include <linux/irqdomain.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/msi.h>
  18#include <linux/pci.h>
  19#include <linux/phy/phy.h>
  20#include <linux/platform_device.h>
  21#include <linux/pm_domain.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/reset.h>
  24
  25#include "../pci.h"
  26
  27#define PCIE_SETTING_REG		0x80
  28#define PCIE_PCI_IDS_1			0x9c
  29#define PCI_CLASS(class)		(class << 8)
  30#define PCIE_RC_MODE			BIT(0)
  31
  32#define PCIE_CFGNUM_REG			0x140
  33#define PCIE_CFG_DEVFN(devfn)		((devfn) & GENMASK(7, 0))
  34#define PCIE_CFG_BUS(bus)		(((bus) << 8) & GENMASK(15, 8))
  35#define PCIE_CFG_BYTE_EN(bytes)		(((bytes) << 16) & GENMASK(19, 16))
  36#define PCIE_CFG_FORCE_BYTE_EN		BIT(20)
  37#define PCIE_CFG_OFFSET_ADDR		0x1000
  38#define PCIE_CFG_HEADER(bus, devfn) \
  39	(PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
  40
  41#define PCIE_RST_CTRL_REG		0x148
  42#define PCIE_MAC_RSTB			BIT(0)
  43#define PCIE_PHY_RSTB			BIT(1)
  44#define PCIE_BRG_RSTB			BIT(2)
  45#define PCIE_PE_RSTB			BIT(3)
  46
  47#define PCIE_LTSSM_STATUS_REG		0x150
  48#define PCIE_LTSSM_STATE_MASK		GENMASK(28, 24)
  49#define PCIE_LTSSM_STATE(val)		((val & PCIE_LTSSM_STATE_MASK) >> 24)
  50#define PCIE_LTSSM_STATE_L2_IDLE	0x14
  51
  52#define PCIE_LINK_STATUS_REG		0x154
  53#define PCIE_PORT_LINKUP		BIT(8)
  54
  55#define PCIE_MSI_SET_NUM		8
  56#define PCIE_MSI_IRQS_PER_SET		32
  57#define PCIE_MSI_IRQS_NUM \
  58	(PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
  59
  60#define PCIE_INT_ENABLE_REG		0x180
  61#define PCIE_MSI_ENABLE			GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
  62#define PCIE_MSI_SHIFT			8
  63#define PCIE_INTX_SHIFT			24
  64#define PCIE_INTX_ENABLE \
  65	GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
  66
  67#define PCIE_INT_STATUS_REG		0x184
  68#define PCIE_MSI_SET_ENABLE_REG		0x190
  69#define PCIE_MSI_SET_ENABLE		GENMASK(PCIE_MSI_SET_NUM - 1, 0)
  70
  71#define PCIE_MSI_SET_BASE_REG		0xc00
  72#define PCIE_MSI_SET_OFFSET		0x10
  73#define PCIE_MSI_SET_STATUS_OFFSET	0x04
  74#define PCIE_MSI_SET_ENABLE_OFFSET	0x08
  75
  76#define PCIE_MSI_SET_ADDR_HI_BASE	0xc80
  77#define PCIE_MSI_SET_ADDR_HI_OFFSET	0x04
  78
  79#define PCIE_ICMD_PM_REG		0x198
  80#define PCIE_TURN_OFF_LINK		BIT(4)
  81
 
 
 
  82#define PCIE_TRANS_TABLE_BASE_REG	0x800
  83#define PCIE_ATR_SRC_ADDR_MSB_OFFSET	0x4
  84#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET	0x8
  85#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET	0xc
  86#define PCIE_ATR_TRSL_PARAM_OFFSET	0x10
  87#define PCIE_ATR_TLB_SET_OFFSET		0x20
  88
  89#define PCIE_MAX_TRANS_TABLES		8
  90#define PCIE_ATR_EN			BIT(0)
  91#define PCIE_ATR_SIZE(size) \
  92	(((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
  93#define PCIE_ATR_ID(id)			((id) & GENMASK(3, 0))
  94#define PCIE_ATR_TYPE_MEM		PCIE_ATR_ID(0)
  95#define PCIE_ATR_TYPE_IO		PCIE_ATR_ID(1)
  96#define PCIE_ATR_TLP_TYPE(type)		(((type) << 16) & GENMASK(18, 16))
  97#define PCIE_ATR_TLP_TYPE_MEM		PCIE_ATR_TLP_TYPE(0)
  98#define PCIE_ATR_TLP_TYPE_IO		PCIE_ATR_TLP_TYPE(2)
  99
 100/**
 101 * struct mtk_msi_set - MSI information for each set
 102 * @base: IO mapped register base
 103 * @msg_addr: MSI message address
 104 * @saved_irq_state: IRQ enable state saved at suspend time
 105 */
 106struct mtk_msi_set {
 107	void __iomem *base;
 108	phys_addr_t msg_addr;
 109	u32 saved_irq_state;
 110};
 111
 112/**
 113 * struct mtk_pcie_port - PCIe port information
 114 * @dev: pointer to PCIe device
 115 * @base: IO mapped register base
 116 * @reg_base: physical register base
 117 * @mac_reset: MAC reset control
 118 * @phy_reset: PHY reset control
 119 * @phy: PHY controller block
 120 * @clks: PCIe clocks
 121 * @num_clks: PCIe clocks count for this port
 122 * @irq: PCIe controller interrupt number
 123 * @saved_irq_state: IRQ enable state saved at suspend time
 124 * @irq_lock: lock protecting IRQ register access
 125 * @intx_domain: legacy INTx IRQ domain
 126 * @msi_domain: MSI IRQ domain
 127 * @msi_bottom_domain: MSI IRQ bottom domain
 128 * @msi_sets: MSI sets information
 129 * @lock: lock protecting IRQ bit map
 130 * @msi_irq_in_use: bit map for assigned MSI IRQ
 131 */
 132struct mtk_pcie_port {
 133	struct device *dev;
 134	void __iomem *base;
 135	phys_addr_t reg_base;
 136	struct reset_control *mac_reset;
 137	struct reset_control *phy_reset;
 138	struct phy *phy;
 139	struct clk_bulk_data *clks;
 140	int num_clks;
 141
 142	int irq;
 143	u32 saved_irq_state;
 144	raw_spinlock_t irq_lock;
 145	struct irq_domain *intx_domain;
 146	struct irq_domain *msi_domain;
 147	struct irq_domain *msi_bottom_domain;
 148	struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
 149	struct mutex lock;
 150	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
 151};
 152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153/**
 154 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
 155 * @bus: PCI bus to query
 156 * @devfn: device/function number
 157 * @where: offset in config space
 158 * @size: data size in TLP header
 159 *
 160 * Set byte enable field and device information in configuration TLP header.
 161 */
 162static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
 163					int where, int size)
 164{
 165	struct mtk_pcie_port *port = bus->sysdata;
 166	int bytes;
 167	u32 val;
 168
 169	bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
 170
 171	val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
 172	      PCIE_CFG_HEADER(bus->number, devfn);
 173
 174	writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
 175}
 176
 177static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 178				      int where)
 179{
 180	struct mtk_pcie_port *port = bus->sysdata;
 181
 182	return port->base + PCIE_CFG_OFFSET_ADDR + where;
 183}
 184
 185static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 186				int where, int size, u32 *val)
 187{
 188	mtk_pcie_config_tlp_header(bus, devfn, where, size);
 189
 190	return pci_generic_config_read32(bus, devfn, where, size, val);
 191}
 192
 193static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 194				 int where, int size, u32 val)
 195{
 196	mtk_pcie_config_tlp_header(bus, devfn, where, size);
 197
 198	if (size <= 2)
 199		val <<= (where & 0x3) * 8;
 200
 201	return pci_generic_config_write32(bus, devfn, where, 4, val);
 202}
 203
 204static struct pci_ops mtk_pcie_ops = {
 205	.map_bus = mtk_pcie_map_bus,
 206	.read  = mtk_pcie_config_read,
 207	.write = mtk_pcie_config_write,
 208};
 209
 210static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
 211				    resource_size_t cpu_addr,
 212				    resource_size_t pci_addr,
 213				    resource_size_t size,
 214				    unsigned long type, int num)
 215{
 
 
 
 
 216	void __iomem *table;
 217	u32 val;
 218
 219	if (num >= PCIE_MAX_TRANS_TABLES) {
 220		dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
 221			(unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
 222		return -ENODEV;
 223	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224
 225	table = port->base + PCIE_TRANS_TABLE_BASE_REG +
 226		num * PCIE_ATR_TLB_SET_OFFSET;
 227
 228	writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
 229		       table);
 230	writel_relaxed(upper_32_bits(cpu_addr),
 231		       table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
 232	writel_relaxed(lower_32_bits(pci_addr),
 233		       table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
 234	writel_relaxed(upper_32_bits(pci_addr),
 235		       table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
 236
 237	if (type == IORESOURCE_IO)
 238		val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
 239	else
 240		val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
 
 241
 242	writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
 
 
 243
 244	return 0;
 245}
 246
 247static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
 248{
 249	int i;
 250	u32 val;
 251
 252	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 253		struct mtk_msi_set *msi_set = &port->msi_sets[i];
 254
 255		msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
 256				i * PCIE_MSI_SET_OFFSET;
 257		msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
 258				    i * PCIE_MSI_SET_OFFSET;
 259
 260		/* Configure the MSI capture address */
 261		writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
 262		writel_relaxed(upper_32_bits(msi_set->msg_addr),
 263			       port->base + PCIE_MSI_SET_ADDR_HI_BASE +
 264			       i * PCIE_MSI_SET_ADDR_HI_OFFSET);
 265	}
 266
 267	val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
 268	val |= PCIE_MSI_SET_ENABLE;
 269	writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
 270
 271	val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 272	val |= PCIE_MSI_ENABLE;
 273	writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 274}
 275
 276static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
 277{
 278	struct resource_entry *entry;
 279	struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
 280	unsigned int table_index = 0;
 281	int err;
 282	u32 val;
 283
 284	/* Set as RC mode */
 285	val = readl_relaxed(port->base + PCIE_SETTING_REG);
 286	val |= PCIE_RC_MODE;
 287	writel_relaxed(val, port->base + PCIE_SETTING_REG);
 288
 289	/* Set class code */
 290	val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
 291	val &= ~GENMASK(31, 8);
 292	val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
 293	writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
 294
 295	/* Mask all INTx interrupts */
 296	val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 297	val &= ~PCIE_INTX_ENABLE;
 298	writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 
 
 
 
 
 299
 300	/* Assert all reset signals */
 301	val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
 302	val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
 303	writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
 304
 305	/*
 306	 * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
 307	 * and 2.2.1 (Initial Power-Up (G3 to S0)).
 308	 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
 309	 * for the power and clock to become stable.
 310	 */
 311	msleep(100);
 312
 313	/* De-assert reset signals */
 314	val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
 315	writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
 316
 317	/* Check if the link is up or not */
 318	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
 319				 !!(val & PCIE_PORT_LINKUP), 20,
 320				 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
 321	if (err) {
 322		val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
 323		dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
 
 
 
 
 
 
 
 
 324		return err;
 325	}
 326
 327	mtk_pcie_enable_msi(port);
 328
 329	/* Set PCIe translation windows */
 330	resource_list_for_each_entry(entry, &host->windows) {
 331		struct resource *res = entry->res;
 332		unsigned long type = resource_type(res);
 333		resource_size_t cpu_addr;
 334		resource_size_t pci_addr;
 335		resource_size_t size;
 336		const char *range_type;
 337
 338		if (type == IORESOURCE_IO) {
 339			cpu_addr = pci_pio_to_address(res->start);
 340			range_type = "IO";
 341		} else if (type == IORESOURCE_MEM) {
 342			cpu_addr = res->start;
 343			range_type = "MEM";
 344		} else {
 345			continue;
 346		}
 347
 348		pci_addr = res->start - entry->offset;
 349		size = resource_size(res);
 350		err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
 351					       type, table_index);
 352		if (err)
 353			return err;
 354
 355		dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
 356			range_type, table_index, (unsigned long long)cpu_addr,
 357			(unsigned long long)pci_addr, (unsigned long long)size);
 358
 359		table_index++;
 360	}
 361
 362	return 0;
 363}
 364
 365static int mtk_pcie_set_affinity(struct irq_data *data,
 366				 const struct cpumask *mask, bool force)
 367{
 368	return -EINVAL;
 369}
 370
 371static void mtk_pcie_msi_irq_mask(struct irq_data *data)
 372{
 373	pci_msi_mask_irq(data);
 374	irq_chip_mask_parent(data);
 375}
 376
 377static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
 378{
 379	pci_msi_unmask_irq(data);
 380	irq_chip_unmask_parent(data);
 381}
 382
 383static struct irq_chip mtk_msi_irq_chip = {
 384	.irq_ack = irq_chip_ack_parent,
 385	.irq_mask = mtk_pcie_msi_irq_mask,
 386	.irq_unmask = mtk_pcie_msi_irq_unmask,
 387	.name = "MSI",
 388};
 389
 390static struct msi_domain_info mtk_msi_domain_info = {
 391	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 392		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 393	.chip	= &mtk_msi_irq_chip,
 394};
 395
 396static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 397{
 398	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 399	struct mtk_pcie_port *port = data->domain->host_data;
 400	unsigned long hwirq;
 401
 402	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 403
 404	msg->address_hi = upper_32_bits(msi_set->msg_addr);
 405	msg->address_lo = lower_32_bits(msi_set->msg_addr);
 406	msg->data = hwirq;
 407	dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
 408		hwirq, msg->address_hi, msg->address_lo, msg->data);
 409}
 410
 411static void mtk_msi_bottom_irq_ack(struct irq_data *data)
 412{
 413	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 414	unsigned long hwirq;
 415
 416	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 417
 418	writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
 419}
 420
 421static void mtk_msi_bottom_irq_mask(struct irq_data *data)
 422{
 423	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 424	struct mtk_pcie_port *port = data->domain->host_data;
 425	unsigned long hwirq, flags;
 426	u32 val;
 427
 428	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 429
 430	raw_spin_lock_irqsave(&port->irq_lock, flags);
 431	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 432	val &= ~BIT(hwirq);
 433	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 434	raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 435}
 436
 437static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
 438{
 439	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 440	struct mtk_pcie_port *port = data->domain->host_data;
 441	unsigned long hwirq, flags;
 442	u32 val;
 443
 444	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
 445
 446	raw_spin_lock_irqsave(&port->irq_lock, flags);
 447	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 448	val |= BIT(hwirq);
 449	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 450	raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 451}
 452
 453static struct irq_chip mtk_msi_bottom_irq_chip = {
 454	.irq_ack		= mtk_msi_bottom_irq_ack,
 455	.irq_mask		= mtk_msi_bottom_irq_mask,
 456	.irq_unmask		= mtk_msi_bottom_irq_unmask,
 457	.irq_compose_msi_msg	= mtk_compose_msi_msg,
 458	.irq_set_affinity	= mtk_pcie_set_affinity,
 459	.name			= "MSI",
 460};
 461
 462static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
 463				       unsigned int virq, unsigned int nr_irqs,
 464				       void *arg)
 465{
 466	struct mtk_pcie_port *port = domain->host_data;
 467	struct mtk_msi_set *msi_set;
 468	int i, hwirq, set_idx;
 469
 470	mutex_lock(&port->lock);
 471
 472	hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
 473					order_base_2(nr_irqs));
 474
 475	mutex_unlock(&port->lock);
 476
 477	if (hwirq < 0)
 478		return -ENOSPC;
 479
 480	set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
 481	msi_set = &port->msi_sets[set_idx];
 482
 483	for (i = 0; i < nr_irqs; i++)
 484		irq_domain_set_info(domain, virq + i, hwirq + i,
 485				    &mtk_msi_bottom_irq_chip, msi_set,
 486				    handle_edge_irq, NULL, NULL);
 487
 488	return 0;
 489}
 490
 491static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
 492				       unsigned int virq, unsigned int nr_irqs)
 493{
 494	struct mtk_pcie_port *port = domain->host_data;
 495	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 496
 497	mutex_lock(&port->lock);
 498
 499	bitmap_release_region(port->msi_irq_in_use, data->hwirq,
 500			      order_base_2(nr_irqs));
 501
 502	mutex_unlock(&port->lock);
 503
 504	irq_domain_free_irqs_common(domain, virq, nr_irqs);
 505}
 506
 507static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
 508	.alloc = mtk_msi_bottom_domain_alloc,
 509	.free = mtk_msi_bottom_domain_free,
 510};
 511
 512static void mtk_intx_mask(struct irq_data *data)
 513{
 514	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 515	unsigned long flags;
 516	u32 val;
 517
 518	raw_spin_lock_irqsave(&port->irq_lock, flags);
 519	val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 520	val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
 521	writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 522	raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 523}
 524
 525static void mtk_intx_unmask(struct irq_data *data)
 526{
 527	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 528	unsigned long flags;
 529	u32 val;
 530
 531	raw_spin_lock_irqsave(&port->irq_lock, flags);
 532	val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 533	val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
 534	writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 535	raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 536}
 537
 538/**
 539 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
 540 * @data: pointer to chip specific data
 541 *
 542 * As an emulated level IRQ, its interrupt status will remain
 543 * until the corresponding de-assert message is received; hence that
 544 * the status can only be cleared when the interrupt has been serviced.
 545 */
 546static void mtk_intx_eoi(struct irq_data *data)
 547{
 548	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 549	unsigned long hwirq;
 550
 551	hwirq = data->hwirq + PCIE_INTX_SHIFT;
 552	writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
 553}
 554
 555static struct irq_chip mtk_intx_irq_chip = {
 556	.irq_mask		= mtk_intx_mask,
 557	.irq_unmask		= mtk_intx_unmask,
 558	.irq_eoi		= mtk_intx_eoi,
 559	.irq_set_affinity	= mtk_pcie_set_affinity,
 560	.name			= "INTx",
 561};
 562
 563static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 564			     irq_hw_number_t hwirq)
 565{
 566	irq_set_chip_data(irq, domain->host_data);
 567	irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
 568				      handle_fasteoi_irq, "INTx");
 569	return 0;
 570}
 571
 572static const struct irq_domain_ops intx_domain_ops = {
 573	.map = mtk_pcie_intx_map,
 574};
 575
 576static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
 577{
 578	struct device *dev = port->dev;
 579	struct device_node *intc_node, *node = dev->of_node;
 580	int ret;
 581
 582	raw_spin_lock_init(&port->irq_lock);
 583
 584	/* Setup INTx */
 585	intc_node = of_get_child_by_name(node, "interrupt-controller");
 586	if (!intc_node) {
 587		dev_err(dev, "missing interrupt-controller node\n");
 588		return -ENODEV;
 589	}
 590
 591	port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
 592						  &intx_domain_ops, port);
 593	if (!port->intx_domain) {
 594		dev_err(dev, "failed to create INTx IRQ domain\n");
 595		return -ENODEV;
 
 596	}
 597
 598	/* Setup MSI */
 599	mutex_init(&port->lock);
 600
 601	port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
 602				  &mtk_msi_bottom_domain_ops, port);
 603	if (!port->msi_bottom_domain) {
 604		dev_err(dev, "failed to create MSI bottom domain\n");
 605		ret = -ENODEV;
 606		goto err_msi_bottom_domain;
 607	}
 608
 609	port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
 610						     &mtk_msi_domain_info,
 611						     port->msi_bottom_domain);
 612	if (!port->msi_domain) {
 613		dev_err(dev, "failed to create MSI domain\n");
 614		ret = -ENODEV;
 615		goto err_msi_domain;
 616	}
 617
 
 618	return 0;
 619
 620err_msi_domain:
 621	irq_domain_remove(port->msi_bottom_domain);
 622err_msi_bottom_domain:
 623	irq_domain_remove(port->intx_domain);
 624
 
 625	return ret;
 626}
 627
 628static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
 629{
 630	irq_set_chained_handler_and_data(port->irq, NULL, NULL);
 631
 632	if (port->intx_domain)
 633		irq_domain_remove(port->intx_domain);
 634
 635	if (port->msi_domain)
 636		irq_domain_remove(port->msi_domain);
 637
 638	if (port->msi_bottom_domain)
 639		irq_domain_remove(port->msi_bottom_domain);
 640
 641	irq_dispose_mapping(port->irq);
 642}
 643
 644static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
 645{
 646	struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
 647	unsigned long msi_enable, msi_status;
 648	unsigned int virq;
 649	irq_hw_number_t bit, hwirq;
 650
 651	msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 652
 653	do {
 654		msi_status = readl_relaxed(msi_set->base +
 655					   PCIE_MSI_SET_STATUS_OFFSET);
 656		msi_status &= msi_enable;
 657		if (!msi_status)
 658			break;
 659
 660		for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
 661			hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
 662			virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
 663			generic_handle_irq(virq);
 664		}
 665	} while (true);
 666}
 667
 668static void mtk_pcie_irq_handler(struct irq_desc *desc)
 669{
 670	struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
 671	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 672	unsigned long status;
 673	unsigned int virq;
 674	irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
 675
 676	chained_irq_enter(irqchip, desc);
 677
 678	status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
 679	for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
 680			      PCIE_INTX_SHIFT) {
 681		virq = irq_find_mapping(port->intx_domain,
 682					irq_bit - PCIE_INTX_SHIFT);
 683		generic_handle_irq(virq);
 684	}
 685
 686	irq_bit = PCIE_MSI_SHIFT;
 687	for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
 688			      PCIE_MSI_SHIFT) {
 689		mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
 690
 691		writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
 692	}
 693
 694	chained_irq_exit(irqchip, desc);
 695}
 696
 697static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
 698{
 699	struct device *dev = port->dev;
 700	struct platform_device *pdev = to_platform_device(dev);
 701	int err;
 702
 703	err = mtk_pcie_init_irq_domains(port);
 704	if (err)
 705		return err;
 706
 707	port->irq = platform_get_irq(pdev, 0);
 708	if (port->irq < 0)
 709		return port->irq;
 710
 711	irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
 712
 713	return 0;
 714}
 715
 716static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
 717{
 718	struct device *dev = port->dev;
 719	struct platform_device *pdev = to_platform_device(dev);
 720	struct resource *regs;
 721	int ret;
 722
 723	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
 724	if (!regs)
 725		return -EINVAL;
 726	port->base = devm_ioremap_resource(dev, regs);
 727	if (IS_ERR(port->base)) {
 728		dev_err(dev, "failed to map register base\n");
 729		return PTR_ERR(port->base);
 730	}
 731
 732	port->reg_base = regs->start;
 733
 734	port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
 735	if (IS_ERR(port->phy_reset)) {
 736		ret = PTR_ERR(port->phy_reset);
 737		if (ret != -EPROBE_DEFER)
 738			dev_err(dev, "failed to get PHY reset\n");
 739
 740		return ret;
 741	}
 742
 743	port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
 744	if (IS_ERR(port->mac_reset)) {
 745		ret = PTR_ERR(port->mac_reset);
 746		if (ret != -EPROBE_DEFER)
 747			dev_err(dev, "failed to get MAC reset\n");
 748
 749		return ret;
 750	}
 751
 752	port->phy = devm_phy_optional_get(dev, "pcie-phy");
 753	if (IS_ERR(port->phy)) {
 754		ret = PTR_ERR(port->phy);
 755		if (ret != -EPROBE_DEFER)
 756			dev_err(dev, "failed to get PHY\n");
 757
 758		return ret;
 759	}
 760
 761	port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
 762	if (port->num_clks < 0) {
 763		dev_err(dev, "failed to get clocks\n");
 764		return port->num_clks;
 765	}
 766
 767	return 0;
 768}
 769
 770static int mtk_pcie_power_up(struct mtk_pcie_port *port)
 771{
 772	struct device *dev = port->dev;
 773	int err;
 774
 775	/* PHY power on and enable pipe clock */
 776	reset_control_deassert(port->phy_reset);
 777
 778	err = phy_init(port->phy);
 779	if (err) {
 780		dev_err(dev, "failed to initialize PHY\n");
 781		goto err_phy_init;
 782	}
 783
 784	err = phy_power_on(port->phy);
 785	if (err) {
 786		dev_err(dev, "failed to power on PHY\n");
 787		goto err_phy_on;
 788	}
 789
 790	/* MAC power on and enable transaction layer clocks */
 791	reset_control_deassert(port->mac_reset);
 792
 793	pm_runtime_enable(dev);
 794	pm_runtime_get_sync(dev);
 795
 796	err = clk_bulk_prepare_enable(port->num_clks, port->clks);
 797	if (err) {
 798		dev_err(dev, "failed to enable clocks\n");
 799		goto err_clk_init;
 800	}
 801
 802	return 0;
 803
 804err_clk_init:
 805	pm_runtime_put_sync(dev);
 806	pm_runtime_disable(dev);
 807	reset_control_assert(port->mac_reset);
 808	phy_power_off(port->phy);
 809err_phy_on:
 810	phy_exit(port->phy);
 811err_phy_init:
 812	reset_control_assert(port->phy_reset);
 813
 814	return err;
 815}
 816
 817static void mtk_pcie_power_down(struct mtk_pcie_port *port)
 818{
 819	clk_bulk_disable_unprepare(port->num_clks, port->clks);
 820
 821	pm_runtime_put_sync(port->dev);
 822	pm_runtime_disable(port->dev);
 823	reset_control_assert(port->mac_reset);
 824
 825	phy_power_off(port->phy);
 826	phy_exit(port->phy);
 827	reset_control_assert(port->phy_reset);
 828}
 829
 830static int mtk_pcie_setup(struct mtk_pcie_port *port)
 831{
 832	int err;
 833
 834	err = mtk_pcie_parse_port(port);
 835	if (err)
 836		return err;
 837
 
 
 
 
 
 
 
 
 838	/* Don't touch the hardware registers before power up */
 839	err = mtk_pcie_power_up(port);
 840	if (err)
 841		return err;
 842
 843	/* Try link up */
 844	err = mtk_pcie_startup_port(port);
 845	if (err)
 846		goto err_setup;
 847
 848	err = mtk_pcie_setup_irq(port);
 849	if (err)
 850		goto err_setup;
 851
 852	return 0;
 853
 854err_setup:
 855	mtk_pcie_power_down(port);
 856
 857	return err;
 858}
 859
 860static int mtk_pcie_probe(struct platform_device *pdev)
 861{
 862	struct device *dev = &pdev->dev;
 863	struct mtk_pcie_port *port;
 864	struct pci_host_bridge *host;
 865	int err;
 866
 867	host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
 868	if (!host)
 869		return -ENOMEM;
 870
 871	port = pci_host_bridge_priv(host);
 872
 873	port->dev = dev;
 874	platform_set_drvdata(pdev, port);
 875
 876	err = mtk_pcie_setup(port);
 877	if (err)
 878		return err;
 879
 880	host->ops = &mtk_pcie_ops;
 881	host->sysdata = port;
 882
 883	err = pci_host_probe(host);
 884	if (err) {
 885		mtk_pcie_irq_teardown(port);
 886		mtk_pcie_power_down(port);
 887		return err;
 888	}
 889
 890	return 0;
 891}
 892
 893static int mtk_pcie_remove(struct platform_device *pdev)
 894{
 895	struct mtk_pcie_port *port = platform_get_drvdata(pdev);
 896	struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
 897
 898	pci_lock_rescan_remove();
 899	pci_stop_root_bus(host->bus);
 900	pci_remove_root_bus(host->bus);
 901	pci_unlock_rescan_remove();
 902
 903	mtk_pcie_irq_teardown(port);
 904	mtk_pcie_power_down(port);
 905
 906	return 0;
 907}
 908
 909static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
 910{
 911	int i;
 912
 913	raw_spin_lock(&port->irq_lock);
 914
 915	port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 916
 917	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 918		struct mtk_msi_set *msi_set = &port->msi_sets[i];
 919
 920		msi_set->saved_irq_state = readl_relaxed(msi_set->base +
 921					   PCIE_MSI_SET_ENABLE_OFFSET);
 922	}
 923
 924	raw_spin_unlock(&port->irq_lock);
 925}
 926
 927static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
 928{
 929	int i;
 930
 931	raw_spin_lock(&port->irq_lock);
 932
 933	writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
 934
 935	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 936		struct mtk_msi_set *msi_set = &port->msi_sets[i];
 937
 938		writel_relaxed(msi_set->saved_irq_state,
 939			       msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 940	}
 941
 942	raw_spin_unlock(&port->irq_lock);
 943}
 944
 945static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
 946{
 947	u32 val;
 948
 949	val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
 950	val |= PCIE_TURN_OFF_LINK;
 951	writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
 952
 953	/* Check the link is L2 */
 954	return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
 955				  (PCIE_LTSSM_STATE(val) ==
 956				   PCIE_LTSSM_STATE_L2_IDLE), 20,
 957				   50 * USEC_PER_MSEC);
 958}
 959
 960static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
 961{
 962	struct mtk_pcie_port *port = dev_get_drvdata(dev);
 963	int err;
 964	u32 val;
 965
 966	/* Trigger link to L2 state */
 967	err = mtk_pcie_turn_off_link(port);
 968	if (err) {
 969		dev_err(port->dev, "cannot enter L2 state\n");
 970		return err;
 971	}
 972
 973	/* Pull down the PERST# pin */
 974	val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
 975	val |= PCIE_PE_RSTB;
 976	writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
 977
 978	dev_dbg(port->dev, "entered L2 states successfully");
 979
 980	mtk_pcie_irq_save(port);
 981	mtk_pcie_power_down(port);
 982
 983	return 0;
 984}
 985
 986static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
 987{
 988	struct mtk_pcie_port *port = dev_get_drvdata(dev);
 989	int err;
 990
 991	err = mtk_pcie_power_up(port);
 992	if (err)
 993		return err;
 994
 995	err = mtk_pcie_startup_port(port);
 996	if (err) {
 997		mtk_pcie_power_down(port);
 998		return err;
 999	}
1000
1001	mtk_pcie_irq_restore(port);
1002
1003	return 0;
1004}
1005
1006static const struct dev_pm_ops mtk_pcie_pm_ops = {
1007	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1008				      mtk_pcie_resume_noirq)
1009};
1010
1011static const struct of_device_id mtk_pcie_of_match[] = {
1012	{ .compatible = "mediatek,mt8192-pcie" },
1013	{},
1014};
1015MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1016
1017static struct platform_driver mtk_pcie_driver = {
1018	.probe = mtk_pcie_probe,
1019	.remove = mtk_pcie_remove,
1020	.driver = {
1021		.name = "mtk-pcie",
1022		.of_match_table = mtk_pcie_of_match,
1023		.pm = &mtk_pcie_pm_ops,
1024	},
1025};
1026
1027module_platform_driver(mtk_pcie_driver);
1028MODULE_LICENSE("GPL v2");