Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe host controller driver for Texas Instruments Keystone SoCs
   4 *
   5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
   6 *		https://www.ti.com
   7 *
   8 * Author: Murali Karicheri <m-karicheri2@ti.com>
   9 * Implementation based on pci-exynos.c and pcie-designware.c
  10 */
  11
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/gpio/consumer.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/irqchip/chained_irq.h>
  18#include <linux/irqdomain.h>
  19#include <linux/mfd/syscon.h>
  20#include <linux/msi.h>
  21#include <linux/of.h>
 
  22#include <linux/of_irq.h>
  23#include <linux/of_pci.h>
  24#include <linux/phy/phy.h>
  25#include <linux/platform_device.h>
  26#include <linux/regmap.h>
  27#include <linux/resource.h>
  28#include <linux/signal.h>
  29
  30#include "../../pci.h"
  31#include "pcie-designware.h"
  32
  33#define PCIE_VENDORID_MASK	0xffff
  34#define PCIE_DEVICEID_SHIFT	16
  35
  36/* Application registers */
  37#define PID				0x000
  38#define RTL				GENMASK(15, 11)
  39#define RTL_SHIFT			11
  40#define AM6_PCI_PG1_RTL_VER		0x15
  41
  42#define CMD_STATUS			0x004
  43#define LTSSM_EN_VAL		        BIT(0)
  44#define OB_XLAT_EN_VAL		        BIT(1)
  45#define DBI_CS2				BIT(5)
  46
  47#define CFG_SETUP			0x008
  48#define CFG_BUS(x)			(((x) & 0xff) << 16)
  49#define CFG_DEVICE(x)			(((x) & 0x1f) << 8)
  50#define CFG_FUNC(x)			((x) & 0x7)
  51#define CFG_TYPE1			BIT(24)
  52
  53#define OB_SIZE				0x030
  54#define OB_OFFSET_INDEX(n)		(0x200 + (8 * (n)))
  55#define OB_OFFSET_HI(n)			(0x204 + (8 * (n)))
  56#define OB_ENABLEN			BIT(0)
  57#define OB_WIN_SIZE			8	/* 8MB */
  58
  59#define PCIE_LEGACY_IRQ_ENABLE_SET(n)	(0x188 + (0x10 * ((n) - 1)))
  60#define PCIE_LEGACY_IRQ_ENABLE_CLR(n)	(0x18c + (0x10 * ((n) - 1)))
  61#define PCIE_EP_IRQ_SET			0x64
  62#define PCIE_EP_IRQ_CLR			0x68
  63#define INT_ENABLE			BIT(0)
  64
  65/* IRQ register defines */
  66#define IRQ_EOI				0x050
  67
  68#define MSI_IRQ				0x054
  69#define MSI_IRQ_STATUS(n)		(0x104 + ((n) << 4))
  70#define MSI_IRQ_ENABLE_SET(n)		(0x108 + ((n) << 4))
  71#define MSI_IRQ_ENABLE_CLR(n)		(0x10c + ((n) << 4))
  72#define MSI_IRQ_OFFSET			4
  73
  74#define IRQ_STATUS(n)			(0x184 + ((n) << 4))
  75#define IRQ_ENABLE_SET(n)		(0x188 + ((n) << 4))
  76#define INTx_EN				BIT(0)
  77
  78#define ERR_IRQ_STATUS			0x1c4
  79#define ERR_IRQ_ENABLE_SET		0x1c8
  80#define ERR_AER				BIT(5)	/* ECRC error */
  81#define AM6_ERR_AER			BIT(4)	/* AM6 ECRC error */
  82#define ERR_AXI				BIT(4)	/* AXI tag lookup fatal error */
  83#define ERR_CORR			BIT(3)	/* Correctable error */
  84#define ERR_NONFATAL			BIT(2)	/* Non-fatal error */
  85#define ERR_FATAL			BIT(1)	/* Fatal error */
  86#define ERR_SYS				BIT(0)	/* System error */
  87#define ERR_IRQ_ALL			(ERR_AER | ERR_AXI | ERR_CORR | \
  88					 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
  89
  90/* PCIE controller device IDs */
  91#define PCIE_RC_K2HK			0xb008
  92#define PCIE_RC_K2E			0xb009
  93#define PCIE_RC_K2L			0xb00a
  94#define PCIE_RC_K2G			0xb00b
  95
  96#define KS_PCIE_DEV_TYPE_MASK		(0x3 << 1)
  97#define KS_PCIE_DEV_TYPE(mode)		((mode) << 1)
  98
  99#define EP				0x0
 100#define LEG_EP				0x1
 101#define RC				0x2
 102
 
 
 103#define KS_PCIE_SYSCLOCKOUTEN		BIT(0)
 104
 105#define AM654_PCIE_DEV_TYPE_MASK	0x3
 106#define AM654_WIN_SIZE			SZ_64K
 107
 108#define APP_ADDR_SPACE_0		(16 * SZ_1K)
 109
 110#define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
 111
 112#define PCI_DEVICE_ID_TI_AM654X		0xb00c
 113
 114struct ks_pcie_of_data {
 115	enum dw_pcie_device_mode mode;
 116	const struct dw_pcie_host_ops *host_ops;
 117	const struct dw_pcie_ep_ops *ep_ops;
 118	u32 version;
 119};
 120
 121struct keystone_pcie {
 122	struct dw_pcie		*pci;
 123	/* PCI Device ID */
 124	u32			device_id;
 125	int			intx_host_irqs[PCI_NUM_INTX];
 
 126
 127	int			msi_host_irq;
 128	int			num_lanes;
 129	u32			num_viewport;
 130	struct phy		**phy;
 131	struct device_link	**link;
 132	struct			device_node *msi_intc_np;
 133	struct irq_domain	*intx_irq_domain;
 134	struct device_node	*np;
 135
 136	/* Application register space */
 137	void __iomem		*va_app_base;	/* DT 1st resource */
 138	struct resource		app;
 139	bool			is_am6;
 140};
 141
 142static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
 143{
 144	return readl(ks_pcie->va_app_base + offset);
 145}
 146
 147static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
 148			       u32 val)
 149{
 150	writel(val, ks_pcie->va_app_base + offset);
 151}
 152
 153static void ks_pcie_msi_irq_ack(struct irq_data *data)
 154{
 155	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(data);
 156	struct keystone_pcie *ks_pcie;
 157	u32 irq = data->hwirq;
 158	struct dw_pcie *pci;
 159	u32 reg_offset;
 160	u32 bit_pos;
 161
 162	pci = to_dw_pcie_from_pp(pp);
 163	ks_pcie = to_keystone_pcie(pci);
 164
 165	reg_offset = irq % 8;
 166	bit_pos = irq >> 3;
 167
 168	ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
 169			   BIT(bit_pos));
 170	ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
 171}
 172
 173static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 174{
 175	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
 176	struct keystone_pcie *ks_pcie;
 177	struct dw_pcie *pci;
 178	u64 msi_target;
 179
 180	pci = to_dw_pcie_from_pp(pp);
 181	ks_pcie = to_keystone_pcie(pci);
 182
 183	msi_target = ks_pcie->app.start + MSI_IRQ;
 184	msg->address_lo = lower_32_bits(msi_target);
 185	msg->address_hi = upper_32_bits(msi_target);
 186	msg->data = data->hwirq;
 187
 188	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
 189		(int)data->hwirq, msg->address_hi, msg->address_lo);
 190}
 191
 
 
 
 
 
 
 192static void ks_pcie_msi_mask(struct irq_data *data)
 193{
 194	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
 195	struct keystone_pcie *ks_pcie;
 196	u32 irq = data->hwirq;
 197	struct dw_pcie *pci;
 198	unsigned long flags;
 199	u32 reg_offset;
 200	u32 bit_pos;
 201
 202	raw_spin_lock_irqsave(&pp->lock, flags);
 203
 204	pci = to_dw_pcie_from_pp(pp);
 205	ks_pcie = to_keystone_pcie(pci);
 206
 207	reg_offset = irq % 8;
 208	bit_pos = irq >> 3;
 209
 210	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
 211			   BIT(bit_pos));
 212
 213	raw_spin_unlock_irqrestore(&pp->lock, flags);
 214}
 215
 216static void ks_pcie_msi_unmask(struct irq_data *data)
 217{
 218	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
 219	struct keystone_pcie *ks_pcie;
 220	u32 irq = data->hwirq;
 221	struct dw_pcie *pci;
 222	unsigned long flags;
 223	u32 reg_offset;
 224	u32 bit_pos;
 225
 226	raw_spin_lock_irqsave(&pp->lock, flags);
 227
 228	pci = to_dw_pcie_from_pp(pp);
 229	ks_pcie = to_keystone_pcie(pci);
 230
 231	reg_offset = irq % 8;
 232	bit_pos = irq >> 3;
 233
 234	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
 235			   BIT(bit_pos));
 236
 237	raw_spin_unlock_irqrestore(&pp->lock, flags);
 238}
 239
 240static struct irq_chip ks_pcie_msi_irq_chip = {
 241	.name = "KEYSTONE-PCI-MSI",
 242	.irq_ack = ks_pcie_msi_irq_ack,
 243	.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
 
 244	.irq_mask = ks_pcie_msi_mask,
 245	.irq_unmask = ks_pcie_msi_unmask,
 246};
 247
 248/**
 249 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
 250 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
 251 *	     PCIe host controller driver information.
 252 *
 253 * Since modification of dbi_cs2 involves different clock domain, read the
 254 * status back to ensure the transition is complete.
 255 */
 256static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
 257{
 258	u32 val;
 259
 260	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 261	val |= DBI_CS2;
 262	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 263
 264	do {
 265		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 266	} while (!(val & DBI_CS2));
 267}
 268
 269/**
 270 * ks_pcie_clear_dbi_mode() - Disable DBI mode
 271 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
 272 *	     PCIe host controller driver information.
 273 *
 274 * Since modification of dbi_cs2 involves different clock domain, read the
 275 * status back to ensure the transition is complete.
 276 */
 277static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
 278{
 279	u32 val;
 280
 281	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 282	val &= ~DBI_CS2;
 283	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 284
 285	do {
 286		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 287	} while (val & DBI_CS2);
 288}
 289
 290static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
 291{
 292	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 293	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 294
 295	/* Configure and set up BAR0 */
 296	ks_pcie_set_dbi_mode(ks_pcie);
 297
 298	/* Enable BAR0 */
 299	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
 300	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
 301
 302	ks_pcie_clear_dbi_mode(ks_pcie);
 303
 304	/*
 305	 * For BAR0, just setting bus address for inbound writes (MSI) should
 306	 * be sufficient.  Use physical address to avoid any conflicts.
 307	 */
 308	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
 309
 310	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
 311	return dw_pcie_allocate_domains(pp);
 312}
 313
 314static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
 315				    int offset)
 316{
 317	struct dw_pcie *pci = ks_pcie->pci;
 318	struct device *dev = pci->dev;
 319	u32 pending;
 
 320
 321	pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
 322
 323	if (BIT(0) & pending) {
 324		dev_dbg(dev, ": irq: irq_offset %d", offset);
 325		generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
 
 326	}
 327
 328	/* EOI the INTx interrupt */
 329	ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
 330}
 331
 
 
 
 
 
 
 
 
 332static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
 333{
 334	ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
 335}
 336
 337static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
 338{
 339	u32 reg;
 340	struct device *dev = ks_pcie->pci->dev;
 341
 342	reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
 343	if (!reg)
 344		return IRQ_NONE;
 345
 346	if (reg & ERR_SYS)
 347		dev_err(dev, "System Error\n");
 348
 349	if (reg & ERR_FATAL)
 350		dev_err(dev, "Fatal Error\n");
 351
 352	if (reg & ERR_NONFATAL)
 353		dev_dbg(dev, "Non Fatal Error\n");
 354
 355	if (reg & ERR_CORR)
 356		dev_dbg(dev, "Correctable Error\n");
 357
 358	if (!ks_pcie->is_am6 && (reg & ERR_AXI))
 359		dev_err(dev, "AXI tag lookup fatal Error\n");
 360
 361	if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
 362		dev_err(dev, "ECRC Error\n");
 363
 364	ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
 365
 366	return IRQ_HANDLED;
 367}
 368
 369static void ks_pcie_ack_intx_irq(struct irq_data *d)
 370{
 371}
 372
 373static void ks_pcie_mask_intx_irq(struct irq_data *d)
 374{
 375}
 376
 377static void ks_pcie_unmask_intx_irq(struct irq_data *d)
 378{
 379}
 380
 381static struct irq_chip ks_pcie_intx_irq_chip = {
 382	.name = "Keystone-PCI-INTX-IRQ",
 383	.irq_ack = ks_pcie_ack_intx_irq,
 384	.irq_mask = ks_pcie_mask_intx_irq,
 385	.irq_unmask = ks_pcie_unmask_intx_irq,
 386};
 387
 388static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
 389				     unsigned int irq, irq_hw_number_t hw_irq)
 
 390{
 391	irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
 392				 handle_level_irq);
 393	irq_set_chip_data(irq, d->host_data);
 394
 395	return 0;
 396}
 397
 398static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
 399	.map = ks_pcie_init_intx_irq_map,
 400	.xlate = irq_domain_xlate_onetwocell,
 401};
 402
 403static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404{
 405	u32 val;
 406	u32 num_viewport = ks_pcie->num_viewport;
 407	struct dw_pcie *pci = ks_pcie->pci;
 408	struct dw_pcie_rp *pp = &pci->pp;
 409	struct resource_entry *entry;
 410	struct resource *mem;
 411	u64 start, end;
 412	int i;
 413
 414	entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
 415	if (!entry)
 416		return -ENODEV;
 417
 418	mem = entry->res;
 419	start = mem->start;
 420	end = mem->end;
 421
 422	/* Disable BARs for inbound access */
 423	ks_pcie_set_dbi_mode(ks_pcie);
 424	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
 425	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
 426	ks_pcie_clear_dbi_mode(ks_pcie);
 427
 428	if (ks_pcie->is_am6)
 429		return 0;
 430
 431	val = ilog2(OB_WIN_SIZE);
 432	ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
 433
 434	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
 435	for (i = 0; i < num_viewport && (start < end); i++) {
 436		ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
 437				   lower_32_bits(start) | OB_ENABLEN);
 438		ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
 439				   upper_32_bits(start));
 440		start += OB_WIN_SIZE * SZ_1M;
 441	}
 442
 443	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 444	val |= OB_XLAT_EN_VAL;
 445	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 446
 447	return 0;
 448}
 449
 450static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
 451					   unsigned int devfn, int where)
 
 452{
 453	struct dw_pcie_rp *pp = bus->sysdata;
 454	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 455	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 456	u32 reg;
 457
 458	/*
 459	 * Checking whether the link is up here is a last line of defense
 460	 * against platforms that forward errors on the system bus as
 461	 * SError upon PCI configuration transactions issued when the link
 462	 * is down. This check is racy by definition and does not stop
 463	 * the system from triggering an SError if the link goes down
 464	 * after this check is performed.
 465	 */
 466	if (!dw_pcie_link_up(pci))
 467		return NULL;
 
 
 
 
 
 
 468
 469	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
 470		CFG_FUNC(PCI_FUNC(devfn));
 471	if (!pci_is_root_bus(bus->parent))
 472		reg |= CFG_TYPE1;
 473	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
 474
 475	return pp->va_cfg0_base + where;
 476}
 477
 478static struct pci_ops ks_child_pcie_ops = {
 479	.map_bus = ks_pcie_other_map_bus,
 480	.read = pci_generic_config_read,
 481	.write = pci_generic_config_write,
 482};
 
 
 
 
 483
 484static struct pci_ops ks_pcie_ops = {
 485	.map_bus = dw_pcie_own_conf_map_bus,
 486	.read = pci_generic_config_read,
 487	.write = pci_generic_config_write,
 488};
 
 
 
 
 
 
 
 
 
 
 489
 490/**
 491 * ks_pcie_link_up() - Check if link up
 492 * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
 493 *	 controller driver information.
 494 */
 495static int ks_pcie_link_up(struct dw_pcie *pci)
 496{
 497	u32 val;
 498
 499	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
 500	val &= PORT_LOGIC_LTSSM_STATE_MASK;
 501	return (val == PORT_LOGIC_LTSSM_STATE_L0);
 502}
 503
 504static void ks_pcie_stop_link(struct dw_pcie *pci)
 505{
 506	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 507	u32 val;
 508
 509	/* Disable Link training */
 510	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 511	val &= ~LTSSM_EN_VAL;
 512	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 513}
 514
 515static int ks_pcie_start_link(struct dw_pcie *pci)
 516{
 517	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 
 518	u32 val;
 519
 
 
 
 
 
 520	/* Initiate Link Training */
 521	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 522	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
 523
 524	return 0;
 525}
 526
 527static void ks_pcie_quirk(struct pci_dev *dev)
 528{
 529	struct pci_bus *bus = dev->bus;
 530	struct keystone_pcie *ks_pcie;
 531	struct device *bridge_dev;
 532	struct pci_dev *bridge;
 533	u32 val;
 534
 535	static const struct pci_device_id rc_pci_devids[] = {
 536		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
 537		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
 538		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
 539		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
 540		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
 541		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
 542		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
 543		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
 544		{ 0, },
 545	};
 546	static const struct pci_device_id am6_pci_devids[] = {
 547		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
 548		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
 549		{ 0, },
 550	};
 551
 552	if (pci_is_root_bus(bus))
 553		bridge = dev;
 554
 555	/* look for the host bridge */
 556	while (!pci_is_root_bus(bus)) {
 557		bridge = bus->self;
 558		bus = bus->parent;
 559	}
 560
 561	if (!bridge)
 562		return;
 563
 564	/*
 565	 * Keystone PCI controller has a h/w limitation of
 566	 * 256 bytes maximum read request size.  It can't handle
 567	 * anything higher than this.  So force this limit on
 568	 * all downstream devices.
 569	 */
 570	if (pci_match_id(rc_pci_devids, bridge)) {
 571		if (pcie_get_readrq(dev) > 256) {
 572			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
 573			pcie_set_readrq(dev, 256);
 574		}
 575	}
 576
 577	/*
 578	 * Memory transactions fail with PCI controller in AM654 PG1.0
 579	 * when MRRS is set to more than 128 bytes. Force the MRRS to
 580	 * 128 bytes in all downstream devices.
 581	 */
 582	if (pci_match_id(am6_pci_devids, bridge)) {
 583		bridge_dev = pci_get_host_bridge_device(dev);
 584		if (!bridge_dev || !bridge_dev->parent)
 585			return;
 586
 587		ks_pcie = dev_get_drvdata(bridge_dev->parent);
 588		if (!ks_pcie)
 589			return;
 590
 591		val = ks_pcie_app_readl(ks_pcie, PID);
 592		val &= RTL;
 593		val >>= RTL_SHIFT;
 594		if (val != AM6_PCI_PG1_RTL_VER)
 595			return;
 596
 597		if (pcie_get_readrq(dev) > 128) {
 598			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
 599			pcie_set_readrq(dev, 128);
 600		}
 601	}
 602}
 603DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
 604
 605static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
 606{
 607	unsigned int irq = desc->irq_data.hwirq;
 608	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 609	u32 offset = irq - ks_pcie->msi_host_irq;
 610	struct dw_pcie *pci = ks_pcie->pci;
 611	struct dw_pcie_rp *pp = &pci->pp;
 612	struct device *dev = pci->dev;
 613	struct irq_chip *chip = irq_desc_get_chip(desc);
 614	u32 vector, reg, pos;
 615
 616	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
 617
 618	/*
 619	 * The chained irq handler installation would have replaced normal
 620	 * interrupt driver handler so we need to take care of mask/unmask and
 621	 * ack operation.
 622	 */
 623	chained_irq_enter(chip, desc);
 624
 625	reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
 626	/*
 627	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
 628	 * shows 1, 9, 17, 25 and so forth
 629	 */
 630	for (pos = 0; pos < 4; pos++) {
 631		if (!(reg & BIT(pos)))
 632			continue;
 633
 634		vector = offset + (pos << 3);
 635		dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
 636		generic_handle_domain_irq(pp->irq_domain, vector);
 
 
 637	}
 638
 639	chained_irq_exit(chip, desc);
 640}
 641
 642/**
 643 * ks_pcie_intx_irq_handler() - Handle INTX interrupt
 
 644 * @desc: Pointer to irq descriptor
 645 *
 646 * Traverse through pending INTX interrupts and invoke handler for each. Also
 647 * takes care of interrupt controller level mask/ack operation.
 648 */
 649static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
 650{
 651	unsigned int irq = irq_desc_get_irq(desc);
 652	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 653	struct dw_pcie *pci = ks_pcie->pci;
 654	struct device *dev = pci->dev;
 655	u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
 656	struct irq_chip *chip = irq_desc_get_chip(desc);
 657
 658	dev_dbg(dev, ": Handling INTX irq %d\n", irq);
 659
 660	/*
 661	 * The chained irq handler installation would have replaced normal
 662	 * interrupt driver handler so we need to take care of mask/unmask and
 663	 * ack operation.
 664	 */
 665	chained_irq_enter(chip, desc);
 666	ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
 667	chained_irq_exit(chip, desc);
 668}
 669
 670static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
 671{
 672	struct device *dev = ks_pcie->pci->dev;
 673	struct device_node *np = ks_pcie->np;
 674	struct device_node *intc_np;
 675	struct irq_data *irq_data;
 676	int irq_count, irq, ret, i;
 677
 678	if (!IS_ENABLED(CONFIG_PCI_MSI))
 679		return 0;
 680
 681	intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
 682	if (!intc_np) {
 683		if (ks_pcie->is_am6)
 684			return 0;
 685		dev_warn(dev, "msi-interrupt-controller node is absent\n");
 686		return -EINVAL;
 687	}
 688
 689	irq_count = of_irq_count(intc_np);
 690	if (!irq_count) {
 691		dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
 692		ret = -EINVAL;
 693		goto err;
 694	}
 695
 696	for (i = 0; i < irq_count; i++) {
 697		irq = irq_of_parse_and_map(intc_np, i);
 698		if (!irq) {
 699			ret = -EINVAL;
 700			goto err;
 701		}
 702
 703		if (!ks_pcie->msi_host_irq) {
 704			irq_data = irq_get_irq_data(irq);
 705			if (!irq_data) {
 706				ret = -EINVAL;
 707				goto err;
 708			}
 709			ks_pcie->msi_host_irq = irq_data->hwirq;
 710		}
 711
 712		irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
 713						 ks_pcie);
 714	}
 715
 716	of_node_put(intc_np);
 717	return 0;
 718
 719err:
 720	of_node_put(intc_np);
 721	return ret;
 722}
 723
 724static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
 725{
 726	struct device *dev = ks_pcie->pci->dev;
 727	struct irq_domain *intx_irq_domain;
 728	struct device_node *np = ks_pcie->np;
 729	struct device_node *intc_np;
 730	int irq_count, irq, ret = 0, i;
 731
 732	intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
 733	if (!intc_np) {
 734		/*
 735		 * Since INTX interrupts are modeled as edge-interrupts in
 736		 * AM6, keep it disabled for now.
 737		 */
 738		if (ks_pcie->is_am6)
 739			return 0;
 740		dev_warn(dev, "legacy-interrupt-controller node is absent\n");
 741		return -EINVAL;
 742	}
 743
 744	irq_count = of_irq_count(intc_np);
 745	if (!irq_count) {
 746		dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
 747		ret = -EINVAL;
 748		goto err;
 749	}
 750
 751	for (i = 0; i < irq_count; i++) {
 752		irq = irq_of_parse_and_map(intc_np, i);
 753		if (!irq) {
 754			ret = -EINVAL;
 755			goto err;
 756		}
 757		ks_pcie->intx_host_irqs[i] = irq;
 758
 759		irq_set_chained_handler_and_data(irq,
 760						 ks_pcie_intx_irq_handler,
 761						 ks_pcie);
 762	}
 763
 764	intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
 765					&ks_pcie_intx_irq_domain_ops, NULL);
 766	if (!intx_irq_domain) {
 767		dev_err(dev, "Failed to add irq domain for INTX irqs\n");
 
 768		ret = -EINVAL;
 769		goto err;
 770	}
 771	ks_pcie->intx_irq_domain = intx_irq_domain;
 772
 773	for (i = 0; i < PCI_NUM_INTX; i++)
 774		ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
 775
 776err:
 777	of_node_put(intc_np);
 778	return ret;
 779}
 780
 781#ifdef CONFIG_ARM
 782/*
 783 * When a PCI device does not exist during config cycles, keystone host
 784 * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
 785 * This handler always returns 0 for this kind of fault.
 786 */
 787static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
 788			 struct pt_regs *regs)
 789{
 790	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
 791
 792	if ((instr & 0x0e100090) == 0x00100090) {
 793		int reg = (instr >> 12) & 15;
 794
 795		regs->uregs[reg] = -1;
 796		regs->ARM_pc += 4;
 797	}
 798
 799	return 0;
 800}
 801#endif
 802
 803static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
 804{
 805	int ret;
 806	unsigned int id;
 807	struct regmap *devctrl_regs;
 808	struct dw_pcie *pci = ks_pcie->pci;
 809	struct device *dev = pci->dev;
 810	struct device_node *np = dev->of_node;
 811	struct of_phandle_args args;
 812	unsigned int offset = 0;
 813
 814	devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
 815	if (IS_ERR(devctrl_regs))
 816		return PTR_ERR(devctrl_regs);
 817
 818	/* Do not error out to maintain old DT compatibility */
 819	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
 820	if (!ret)
 821		offset = args.args[0];
 822
 823	ret = regmap_read(devctrl_regs, offset, &id);
 824	if (ret)
 825		return ret;
 826
 827	dw_pcie_dbi_ro_wr_en(pci);
 828	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
 829	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
 830	dw_pcie_dbi_ro_wr_dis(pci);
 831
 832	return 0;
 833}
 834
 835static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
 836{
 837	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 838	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 839	int ret;
 840
 841	pp->bridge->ops = &ks_pcie_ops;
 842	if (!ks_pcie->is_am6)
 843		pp->bridge->child_ops = &ks_child_pcie_ops;
 844
 845	ret = ks_pcie_config_intx_irq(ks_pcie);
 846	if (ret)
 847		return ret;
 848
 849	ret = ks_pcie_config_msi_irq(ks_pcie);
 850	if (ret)
 851		return ret;
 852
 853	ks_pcie_stop_link(pci);
 854	ret = ks_pcie_setup_rc_app_regs(ks_pcie);
 855	if (ret)
 856		return ret;
 857
 
 
 858	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
 859			pci->dbi_base + PCI_IO_BASE);
 860
 861	ret = ks_pcie_init_id(ks_pcie);
 862	if (ret < 0)
 863		return ret;
 864
 865#ifdef CONFIG_ARM
 866	/*
 867	 * PCIe access errors that result into OCP errors are caught by ARM as
 868	 * "External aborts"
 869	 */
 870	hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
 871			"Asynchronous external abort");
 872#endif
 873
 
 
 
 874	return 0;
 875}
 876
 877static const struct dw_pcie_host_ops ks_pcie_host_ops = {
 878	.init = ks_pcie_host_init,
 879	.msi_init = ks_pcie_msi_host_init,
 
 
 
 880};
 881
 882static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
 883	.init = ks_pcie_host_init,
 
 884};
 885
 886static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
 887{
 888	struct keystone_pcie *ks_pcie = priv;
 889
 890	return ks_pcie_handle_error_irq(ks_pcie);
 891}
 892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 893static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
 894				     u32 reg, size_t size, u32 val)
 895{
 896	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 897
 898	ks_pcie_set_dbi_mode(ks_pcie);
 899	dw_pcie_write(base + reg, size, val);
 900	ks_pcie_clear_dbi_mode(ks_pcie);
 901}
 902
 903static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
 904	.start_link = ks_pcie_start_link,
 905	.stop_link = ks_pcie_stop_link,
 906	.link_up = ks_pcie_link_up,
 
 907	.write_dbi2 = ks_pcie_am654_write_dbi2,
 908};
 909
 910static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
 911{
 912	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 913	int flags;
 914
 915	ep->page_size = AM654_WIN_SIZE;
 916	flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
 917	dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
 918	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
 919}
 920
 921static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
 922{
 923	struct dw_pcie *pci = ks_pcie->pci;
 924	u8 int_pin;
 925
 926	int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
 927	if (int_pin == 0 || int_pin > 4)
 928		return;
 929
 930	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
 931			   INT_ENABLE);
 932	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
 933	mdelay(1);
 934	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
 935	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
 936			   INT_ENABLE);
 937}
 938
 939static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
 940				   unsigned int type, u16 interrupt_num)
 
 941{
 942	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 943	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 944
 945	switch (type) {
 946	case PCI_IRQ_INTX:
 947		ks_pcie_am654_raise_intx_irq(ks_pcie);
 948		break;
 949	case PCI_IRQ_MSI:
 950		dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
 951		break;
 952	case PCI_IRQ_MSIX:
 953		dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
 954		break;
 955	default:
 956		dev_err(pci->dev, "UNKNOWN IRQ type\n");
 957		return -EINVAL;
 958	}
 959
 960	return 0;
 961}
 962
 963static const struct pci_epc_features ks_pcie_am654_epc_features = {
 964	.linkup_notifier = false,
 965	.msi_capable = true,
 966	.msix_capable = true,
 967	.bar[BAR_0] = { .type = BAR_RESERVED, },
 968	.bar[BAR_1] = { .type = BAR_RESERVED, },
 969	.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
 970	.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
 971	.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
 972	.bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
 973	.align = SZ_1M,
 974};
 975
 976static const struct pci_epc_features*
 977ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
 978{
 979	return &ks_pcie_am654_epc_features;
 980}
 981
 982static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
 983	.init = ks_pcie_am654_ep_init,
 984	.raise_irq = ks_pcie_am654_raise_irq,
 985	.get_features = &ks_pcie_am654_get_features,
 986};
 987
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
 989{
 990	int num_lanes = ks_pcie->num_lanes;
 991
 992	while (num_lanes--) {
 993		phy_power_off(ks_pcie->phy[num_lanes]);
 994		phy_exit(ks_pcie->phy[num_lanes]);
 995	}
 996}
 997
 998static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
 999{
1000	int i;
1001	int ret;
1002	int num_lanes = ks_pcie->num_lanes;
1003
1004	for (i = 0; i < num_lanes; i++) {
1005		ret = phy_reset(ks_pcie->phy[i]);
1006		if (ret < 0)
1007			goto err_phy;
1008
1009		ret = phy_init(ks_pcie->phy[i]);
1010		if (ret < 0)
1011			goto err_phy;
1012
1013		ret = phy_power_on(ks_pcie->phy[i]);
1014		if (ret < 0) {
1015			phy_exit(ks_pcie->phy[i]);
1016			goto err_phy;
1017		}
1018	}
1019
1020	return 0;
1021
1022err_phy:
1023	while (--i >= 0) {
1024		phy_power_off(ks_pcie->phy[i]);
1025		phy_exit(ks_pcie->phy[i]);
1026	}
1027
1028	return ret;
1029}
1030
1031static int ks_pcie_set_mode(struct device *dev)
1032{
1033	struct device_node *np = dev->of_node;
1034	struct of_phandle_args args;
1035	unsigned int offset = 0;
1036	struct regmap *syscon;
1037	u32 val;
1038	u32 mask;
1039	int ret = 0;
1040
1041	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1042	if (IS_ERR(syscon))
1043		return 0;
1044
1045	/* Do not error out to maintain old DT compatibility */
1046	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1047	if (!ret)
1048		offset = args.args[0];
1049
1050	mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1051	val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1052
1053	ret = regmap_update_bits(syscon, offset, mask, val);
1054	if (ret) {
1055		dev_err(dev, "failed to set pcie mode\n");
1056		return ret;
1057	}
1058
1059	return 0;
1060}
1061
1062static int ks_pcie_am654_set_mode(struct device *dev,
1063				  enum dw_pcie_device_mode mode)
1064{
1065	struct device_node *np = dev->of_node;
1066	struct of_phandle_args args;
1067	unsigned int offset = 0;
1068	struct regmap *syscon;
1069	u32 val;
1070	u32 mask;
1071	int ret = 0;
1072
1073	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1074	if (IS_ERR(syscon))
1075		return 0;
1076
1077	/* Do not error out to maintain old DT compatibility */
1078	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1079	if (!ret)
1080		offset = args.args[0];
1081
1082	mask = AM654_PCIE_DEV_TYPE_MASK;
1083
1084	switch (mode) {
1085	case DW_PCIE_RC_TYPE:
1086		val = RC;
1087		break;
1088	case DW_PCIE_EP_TYPE:
1089		val = EP;
1090		break;
1091	default:
1092		dev_err(dev, "INVALID device type %d\n", mode);
1093		return -EINVAL;
1094	}
1095
1096	ret = regmap_update_bits(syscon, offset, mask, val);
1097	if (ret) {
1098		dev_err(dev, "failed to set pcie mode\n");
1099		return ret;
1100	}
1101
1102	return 0;
1103}
1104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1106	.host_ops = &ks_pcie_host_ops,
1107	.mode = DW_PCIE_RC_TYPE,
1108	.version = DW_PCIE_VER_365A,
1109};
1110
1111static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1112	.host_ops = &ks_pcie_am654_host_ops,
1113	.mode = DW_PCIE_RC_TYPE,
1114	.version = DW_PCIE_VER_490A,
1115};
1116
1117static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1118	.ep_ops = &ks_pcie_am654_ep_ops,
1119	.mode = DW_PCIE_EP_TYPE,
1120	.version = DW_PCIE_VER_490A,
1121};
1122
1123static const struct of_device_id ks_pcie_of_match[] = {
1124	{
1125		.type = "pci",
1126		.data = &ks_pcie_rc_of_data,
1127		.compatible = "ti,keystone-pcie",
1128	},
1129	{
1130		.data = &ks_pcie_am654_rc_of_data,
1131		.compatible = "ti,am654-pcie-rc",
1132	},
1133	{
1134		.data = &ks_pcie_am654_ep_of_data,
1135		.compatible = "ti,am654-pcie-ep",
1136	},
1137	{ },
1138};
1139
1140static int ks_pcie_probe(struct platform_device *pdev)
1141{
1142	const struct dw_pcie_host_ops *host_ops;
1143	const struct dw_pcie_ep_ops *ep_ops;
1144	struct device *dev = &pdev->dev;
1145	struct device_node *np = dev->of_node;
1146	const struct ks_pcie_of_data *data;
 
1147	enum dw_pcie_device_mode mode;
1148	struct dw_pcie *pci;
1149	struct keystone_pcie *ks_pcie;
1150	struct device_link **link;
1151	struct gpio_desc *gpiod;
 
1152	struct resource *res;
 
1153	void __iomem *base;
1154	u32 num_viewport;
1155	struct phy **phy;
 
1156	u32 num_lanes;
1157	char name[10];
1158	u32 version;
1159	int ret;
1160	int irq;
1161	int i;
1162
1163	data = of_device_get_match_data(dev);
 
1164	if (!data)
1165		return -EINVAL;
1166
1167	version = data->version;
1168	host_ops = data->host_ops;
1169	ep_ops = data->ep_ops;
1170	mode = data->mode;
1171
1172	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1173	if (!ks_pcie)
1174		return -ENOMEM;
1175
1176	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1177	if (!pci)
1178		return -ENOMEM;
1179
1180	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1181	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1182	if (IS_ERR(ks_pcie->va_app_base))
1183		return PTR_ERR(ks_pcie->va_app_base);
1184
1185	ks_pcie->app = *res;
1186
1187	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1188	base = devm_pci_remap_cfg_resource(dev, res);
1189	if (IS_ERR(base))
1190		return PTR_ERR(base);
1191
1192	if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1193		ks_pcie->is_am6 = true;
1194
1195	pci->dbi_base = base;
1196	pci->dbi_base2 = base;
1197	pci->dev = dev;
1198	pci->ops = &ks_pcie_dw_pcie_ops;
1199	pci->version = version;
1200
1201	irq = platform_get_irq(pdev, 0);
1202	if (irq < 0)
 
1203		return irq;
 
1204
1205	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1206			  "ks-pcie-error-irq", ks_pcie);
1207	if (ret < 0) {
1208		dev_err(dev, "failed to request error IRQ %d\n",
1209			irq);
1210		return ret;
1211	}
1212
1213	ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1214	if (ret)
1215		num_lanes = 1;
1216
1217	phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1218	if (!phy)
1219		return -ENOMEM;
1220
1221	link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1222	if (!link)
1223		return -ENOMEM;
1224
1225	for (i = 0; i < num_lanes; i++) {
1226		snprintf(name, sizeof(name), "pcie-phy%d", i);
1227		phy[i] = devm_phy_optional_get(dev, name);
1228		if (IS_ERR(phy[i])) {
1229			ret = PTR_ERR(phy[i]);
1230			goto err_link;
1231		}
1232
1233		if (!phy[i])
1234			continue;
1235
1236		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1237		if (!link[i]) {
1238			ret = -EINVAL;
1239			goto err_link;
1240		}
1241	}
1242
1243	ks_pcie->np = np;
1244	ks_pcie->pci = pci;
1245	ks_pcie->link = link;
1246	ks_pcie->num_lanes = num_lanes;
1247	ks_pcie->phy = phy;
1248
1249	gpiod = devm_gpiod_get_optional(dev, "reset",
1250					GPIOD_OUT_LOW);
1251	if (IS_ERR(gpiod)) {
1252		ret = PTR_ERR(gpiod);
1253		if (ret != -EPROBE_DEFER)
1254			dev_err(dev, "Failed to get reset GPIO\n");
1255		goto err_link;
1256	}
1257
1258	/* Obtain references to the PHYs */
1259	for (i = 0; i < num_lanes; i++)
1260		phy_pm_runtime_get_sync(ks_pcie->phy[i]);
1261
1262	ret = ks_pcie_enable_phy(ks_pcie);
1263
1264	/* Release references to the PHYs */
1265	for (i = 0; i < num_lanes; i++)
1266		phy_pm_runtime_put_sync(ks_pcie->phy[i]);
1267
1268	if (ret) {
1269		dev_err(dev, "failed to enable phy\n");
1270		goto err_link;
1271	}
1272
1273	platform_set_drvdata(pdev, ks_pcie);
1274	pm_runtime_enable(dev);
1275	ret = pm_runtime_get_sync(dev);
1276	if (ret < 0) {
1277		dev_err(dev, "pm_runtime_get_sync failed\n");
1278		goto err_get_sync;
1279	}
1280
1281	if (dw_pcie_ver_is_ge(pci, 480A))
 
 
 
 
 
 
 
 
 
1282		ret = ks_pcie_am654_set_mode(dev, mode);
1283	else
 
 
1284		ret = ks_pcie_set_mode(dev);
1285	if (ret < 0)
1286		goto err_get_sync;
 
 
 
 
 
 
 
1287
1288	switch (mode) {
1289	case DW_PCIE_RC_TYPE:
1290		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1291			ret = -ENODEV;
1292			goto err_get_sync;
1293		}
1294
1295		ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1296		if (ret < 0) {
1297			dev_err(dev, "unable to read *num-viewport* property\n");
1298			goto err_get_sync;
1299		}
1300
1301		/*
1302		 * "Power Sequencing and Reset Signal Timings" table in
1303		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1304		 * indicates PERST# should be deasserted after minimum of 100us
1305		 * once REFCLK is stable. The REFCLK to the connector in RC
1306		 * mode is selected while enabling the PHY. So deassert PERST#
1307		 * after 100 us.
1308		 */
1309		if (gpiod) {
1310			usleep_range(100, 200);
1311			gpiod_set_value_cansleep(gpiod, 1);
1312		}
1313
1314		ks_pcie->num_viewport = num_viewport;
1315		pci->pp.ops = host_ops;
1316		ret = dw_pcie_host_init(&pci->pp);
1317		if (ret < 0)
1318			goto err_get_sync;
1319		break;
1320	case DW_PCIE_EP_TYPE:
1321		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1322			ret = -ENODEV;
1323			goto err_get_sync;
1324		}
1325
1326		pci->ep.ops = ep_ops;
1327		ret = dw_pcie_ep_init(&pci->ep);
1328		if (ret < 0)
1329			goto err_get_sync;
1330
1331		ret = dw_pcie_ep_init_registers(&pci->ep);
1332		if (ret) {
1333			dev_err(dev, "Failed to initialize DWC endpoint registers\n");
1334			goto err_ep_init;
1335		}
1336
1337		pci_epc_init_notify(pci->ep.epc);
1338
1339		break;
1340	default:
1341		dev_err(dev, "INVALID device type %d\n", mode);
1342	}
1343
1344	ks_pcie_enable_error_irq(ks_pcie);
1345
1346	return 0;
1347
1348err_ep_init:
1349	dw_pcie_ep_deinit(&pci->ep);
1350err_get_sync:
1351	pm_runtime_put(dev);
1352	pm_runtime_disable(dev);
1353	ks_pcie_disable_phy(ks_pcie);
1354
1355err_link:
1356	while (--i >= 0 && link[i])
1357		device_link_del(link[i]);
1358
1359	return ret;
1360}
1361
1362static void ks_pcie_remove(struct platform_device *pdev)
1363{
1364	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1365	struct device_link **link = ks_pcie->link;
1366	int num_lanes = ks_pcie->num_lanes;
1367	struct device *dev = &pdev->dev;
1368
1369	pm_runtime_put(dev);
1370	pm_runtime_disable(dev);
1371	ks_pcie_disable_phy(ks_pcie);
1372	while (num_lanes--)
1373		device_link_del(link[num_lanes]);
 
 
1374}
1375
1376static struct platform_driver ks_pcie_driver = {
1377	.probe  = ks_pcie_probe,
1378	.remove = ks_pcie_remove,
1379	.driver = {
1380		.name	= "keystone-pcie",
1381		.of_match_table = ks_pcie_of_match,
1382	},
1383};
1384builtin_platform_driver(ks_pcie_driver);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe host controller driver for Texas Instruments Keystone SoCs
   4 *
   5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
   6 *		http://www.ti.com
   7 *
   8 * Author: Murali Karicheri <m-karicheri2@ti.com>
   9 * Implementation based on pci-exynos.c and pcie-designware.c
  10 */
  11
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/gpio/consumer.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/irqchip/chained_irq.h>
  18#include <linux/irqdomain.h>
  19#include <linux/mfd/syscon.h>
  20#include <linux/msi.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/of_irq.h>
  24#include <linux/of_pci.h>
  25#include <linux/phy/phy.h>
  26#include <linux/platform_device.h>
  27#include <linux/regmap.h>
  28#include <linux/resource.h>
  29#include <linux/signal.h>
  30
  31#include "../../pci.h"
  32#include "pcie-designware.h"
  33
  34#define PCIE_VENDORID_MASK	0xffff
  35#define PCIE_DEVICEID_SHIFT	16
  36
  37/* Application registers */
 
 
 
 
 
  38#define CMD_STATUS			0x004
  39#define LTSSM_EN_VAL		        BIT(0)
  40#define OB_XLAT_EN_VAL		        BIT(1)
  41#define DBI_CS2				BIT(5)
  42
  43#define CFG_SETUP			0x008
  44#define CFG_BUS(x)			(((x) & 0xff) << 16)
  45#define CFG_DEVICE(x)			(((x) & 0x1f) << 8)
  46#define CFG_FUNC(x)			((x) & 0x7)
  47#define CFG_TYPE1			BIT(24)
  48
  49#define OB_SIZE				0x030
  50#define OB_OFFSET_INDEX(n)		(0x200 + (8 * (n)))
  51#define OB_OFFSET_HI(n)			(0x204 + (8 * (n)))
  52#define OB_ENABLEN			BIT(0)
  53#define OB_WIN_SIZE			8	/* 8MB */
  54
  55#define PCIE_LEGACY_IRQ_ENABLE_SET(n)	(0x188 + (0x10 * ((n) - 1)))
  56#define PCIE_LEGACY_IRQ_ENABLE_CLR(n)	(0x18c + (0x10 * ((n) - 1)))
  57#define PCIE_EP_IRQ_SET			0x64
  58#define PCIE_EP_IRQ_CLR			0x68
  59#define INT_ENABLE			BIT(0)
  60
  61/* IRQ register defines */
  62#define IRQ_EOI				0x050
  63
  64#define MSI_IRQ				0x054
  65#define MSI_IRQ_STATUS(n)		(0x104 + ((n) << 4))
  66#define MSI_IRQ_ENABLE_SET(n)		(0x108 + ((n) << 4))
  67#define MSI_IRQ_ENABLE_CLR(n)		(0x10c + ((n) << 4))
  68#define MSI_IRQ_OFFSET			4
  69
  70#define IRQ_STATUS(n)			(0x184 + ((n) << 4))
  71#define IRQ_ENABLE_SET(n)		(0x188 + ((n) << 4))
  72#define INTx_EN				BIT(0)
  73
  74#define ERR_IRQ_STATUS			0x1c4
  75#define ERR_IRQ_ENABLE_SET		0x1c8
  76#define ERR_AER				BIT(5)	/* ECRC error */
  77#define AM6_ERR_AER			BIT(4)	/* AM6 ECRC error */
  78#define ERR_AXI				BIT(4)	/* AXI tag lookup fatal error */
  79#define ERR_CORR			BIT(3)	/* Correctable error */
  80#define ERR_NONFATAL			BIT(2)	/* Non-fatal error */
  81#define ERR_FATAL			BIT(1)	/* Fatal error */
  82#define ERR_SYS				BIT(0)	/* System error */
  83#define ERR_IRQ_ALL			(ERR_AER | ERR_AXI | ERR_CORR | \
  84					 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
  85
  86/* PCIE controller device IDs */
  87#define PCIE_RC_K2HK			0xb008
  88#define PCIE_RC_K2E			0xb009
  89#define PCIE_RC_K2L			0xb00a
  90#define PCIE_RC_K2G			0xb00b
  91
  92#define KS_PCIE_DEV_TYPE_MASK		(0x3 << 1)
  93#define KS_PCIE_DEV_TYPE(mode)		((mode) << 1)
  94
  95#define EP				0x0
  96#define LEG_EP				0x1
  97#define RC				0x2
  98
  99#define EXP_CAP_ID_OFFSET		0x70
 100
 101#define KS_PCIE_SYSCLOCKOUTEN		BIT(0)
 102
 103#define AM654_PCIE_DEV_TYPE_MASK	0x3
 104#define AM654_WIN_SIZE			SZ_64K
 105
 106#define APP_ADDR_SPACE_0		(16 * SZ_1K)
 107
 108#define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
 109
 
 
 110struct ks_pcie_of_data {
 111	enum dw_pcie_device_mode mode;
 112	const struct dw_pcie_host_ops *host_ops;
 113	const struct dw_pcie_ep_ops *ep_ops;
 114	unsigned int version;
 115};
 116
 117struct keystone_pcie {
 118	struct dw_pcie		*pci;
 119	/* PCI Device ID */
 120	u32			device_id;
 121	int			legacy_host_irqs[PCI_NUM_INTX];
 122	struct			device_node *legacy_intc_np;
 123
 124	int			msi_host_irq;
 125	int			num_lanes;
 126	u32			num_viewport;
 127	struct phy		**phy;
 128	struct device_link	**link;
 129	struct			device_node *msi_intc_np;
 130	struct irq_domain	*legacy_irq_domain;
 131	struct device_node	*np;
 132
 133	/* Application register space */
 134	void __iomem		*va_app_base;	/* DT 1st resource */
 135	struct resource		app;
 136	bool			is_am6;
 137};
 138
 139static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
 140{
 141	return readl(ks_pcie->va_app_base + offset);
 142}
 143
 144static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
 145			       u32 val)
 146{
 147	writel(val, ks_pcie->va_app_base + offset);
 148}
 149
 150static void ks_pcie_msi_irq_ack(struct irq_data *data)
 151{
 152	struct pcie_port *pp  = irq_data_get_irq_chip_data(data);
 153	struct keystone_pcie *ks_pcie;
 154	u32 irq = data->hwirq;
 155	struct dw_pcie *pci;
 156	u32 reg_offset;
 157	u32 bit_pos;
 158
 159	pci = to_dw_pcie_from_pp(pp);
 160	ks_pcie = to_keystone_pcie(pci);
 161
 162	reg_offset = irq % 8;
 163	bit_pos = irq >> 3;
 164
 165	ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
 166			   BIT(bit_pos));
 167	ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
 168}
 169
 170static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 171{
 172	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 173	struct keystone_pcie *ks_pcie;
 174	struct dw_pcie *pci;
 175	u64 msi_target;
 176
 177	pci = to_dw_pcie_from_pp(pp);
 178	ks_pcie = to_keystone_pcie(pci);
 179
 180	msi_target = ks_pcie->app.start + MSI_IRQ;
 181	msg->address_lo = lower_32_bits(msi_target);
 182	msg->address_hi = upper_32_bits(msi_target);
 183	msg->data = data->hwirq;
 184
 185	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
 186		(int)data->hwirq, msg->address_hi, msg->address_lo);
 187}
 188
 189static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
 190				    const struct cpumask *mask, bool force)
 191{
 192	return -EINVAL;
 193}
 194
 195static void ks_pcie_msi_mask(struct irq_data *data)
 196{
 197	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 198	struct keystone_pcie *ks_pcie;
 199	u32 irq = data->hwirq;
 200	struct dw_pcie *pci;
 201	unsigned long flags;
 202	u32 reg_offset;
 203	u32 bit_pos;
 204
 205	raw_spin_lock_irqsave(&pp->lock, flags);
 206
 207	pci = to_dw_pcie_from_pp(pp);
 208	ks_pcie = to_keystone_pcie(pci);
 209
 210	reg_offset = irq % 8;
 211	bit_pos = irq >> 3;
 212
 213	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
 214			   BIT(bit_pos));
 215
 216	raw_spin_unlock_irqrestore(&pp->lock, flags);
 217}
 218
 219static void ks_pcie_msi_unmask(struct irq_data *data)
 220{
 221	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 222	struct keystone_pcie *ks_pcie;
 223	u32 irq = data->hwirq;
 224	struct dw_pcie *pci;
 225	unsigned long flags;
 226	u32 reg_offset;
 227	u32 bit_pos;
 228
 229	raw_spin_lock_irqsave(&pp->lock, flags);
 230
 231	pci = to_dw_pcie_from_pp(pp);
 232	ks_pcie = to_keystone_pcie(pci);
 233
 234	reg_offset = irq % 8;
 235	bit_pos = irq >> 3;
 236
 237	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
 238			   BIT(bit_pos));
 239
 240	raw_spin_unlock_irqrestore(&pp->lock, flags);
 241}
 242
 243static struct irq_chip ks_pcie_msi_irq_chip = {
 244	.name = "KEYSTONE-PCI-MSI",
 245	.irq_ack = ks_pcie_msi_irq_ack,
 246	.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
 247	.irq_set_affinity = ks_pcie_msi_set_affinity,
 248	.irq_mask = ks_pcie_msi_mask,
 249	.irq_unmask = ks_pcie_msi_unmask,
 250};
 251
 252static int ks_pcie_msi_host_init(struct pcie_port *pp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
 255	return dw_pcie_allocate_domains(pp);
 256}
 257
 258static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
 259				      int offset)
 260{
 261	struct dw_pcie *pci = ks_pcie->pci;
 262	struct device *dev = pci->dev;
 263	u32 pending;
 264	int virq;
 265
 266	pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
 267
 268	if (BIT(0) & pending) {
 269		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
 270		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
 271		generic_handle_irq(virq);
 272	}
 273
 274	/* EOI the INTx interrupt */
 275	ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
 276}
 277
 278/*
 279 * Dummy function so that DW core doesn't configure MSI
 280 */
 281static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
 282{
 283	return 0;
 284}
 285
 286static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
 287{
 288	ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
 289}
 290
 291static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
 292{
 293	u32 reg;
 294	struct device *dev = ks_pcie->pci->dev;
 295
 296	reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
 297	if (!reg)
 298		return IRQ_NONE;
 299
 300	if (reg & ERR_SYS)
 301		dev_err(dev, "System Error\n");
 302
 303	if (reg & ERR_FATAL)
 304		dev_err(dev, "Fatal Error\n");
 305
 306	if (reg & ERR_NONFATAL)
 307		dev_dbg(dev, "Non Fatal Error\n");
 308
 309	if (reg & ERR_CORR)
 310		dev_dbg(dev, "Correctable Error\n");
 311
 312	if (!ks_pcie->is_am6 && (reg & ERR_AXI))
 313		dev_err(dev, "AXI tag lookup fatal Error\n");
 314
 315	if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
 316		dev_err(dev, "ECRC Error\n");
 317
 318	ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
 319
 320	return IRQ_HANDLED;
 321}
 322
 323static void ks_pcie_ack_legacy_irq(struct irq_data *d)
 324{
 325}
 326
 327static void ks_pcie_mask_legacy_irq(struct irq_data *d)
 328{
 329}
 330
 331static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
 332{
 333}
 334
 335static struct irq_chip ks_pcie_legacy_irq_chip = {
 336	.name = "Keystone-PCI-Legacy-IRQ",
 337	.irq_ack = ks_pcie_ack_legacy_irq,
 338	.irq_mask = ks_pcie_mask_legacy_irq,
 339	.irq_unmask = ks_pcie_unmask_legacy_irq,
 340};
 341
 342static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
 343				       unsigned int irq,
 344				       irq_hw_number_t hw_irq)
 345{
 346	irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
 347				 handle_level_irq);
 348	irq_set_chip_data(irq, d->host_data);
 349
 350	return 0;
 351}
 352
 353static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
 354	.map = ks_pcie_init_legacy_irq_map,
 355	.xlate = irq_domain_xlate_onetwocell,
 356};
 357
 358/**
 359 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
 360 * registers
 361 *
 362 * Since modification of dbi_cs2 involves different clock domain, read the
 363 * status back to ensure the transition is complete.
 364 */
 365static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
 366{
 367	u32 val;
 368
 369	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 370	val |= DBI_CS2;
 371	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 372
 373	do {
 374		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 375	} while (!(val & DBI_CS2));
 376}
 377
 378/**
 379 * ks_pcie_clear_dbi_mode() - Disable DBI mode
 380 *
 381 * Since modification of dbi_cs2 involves different clock domain, read the
 382 * status back to ensure the transition is complete.
 383 */
 384static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
 385{
 386	u32 val;
 387
 388	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 389	val &= ~DBI_CS2;
 390	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 391
 392	do {
 393		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 394	} while (val & DBI_CS2);
 395}
 396
 397static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
 398{
 399	u32 val;
 400	u32 num_viewport = ks_pcie->num_viewport;
 401	struct dw_pcie *pci = ks_pcie->pci;
 402	struct pcie_port *pp = &pci->pp;
 403	u64 start = pp->mem->start;
 404	u64 end = pp->mem->end;
 
 405	int i;
 406
 
 
 
 
 
 
 
 
 407	/* Disable BARs for inbound access */
 408	ks_pcie_set_dbi_mode(ks_pcie);
 409	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
 410	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
 411	ks_pcie_clear_dbi_mode(ks_pcie);
 412
 413	if (ks_pcie->is_am6)
 414		return;
 415
 416	val = ilog2(OB_WIN_SIZE);
 417	ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
 418
 419	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
 420	for (i = 0; i < num_viewport && (start < end); i++) {
 421		ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
 422				   lower_32_bits(start) | OB_ENABLEN);
 423		ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
 424				   upper_32_bits(start));
 425		start += OB_WIN_SIZE;
 426	}
 427
 428	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 429	val |= OB_XLAT_EN_VAL;
 430	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 
 
 431}
 432
 433static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 434				 unsigned int devfn, int where, int size,
 435				 u32 *val)
 436{
 
 437	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 438	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 439	u32 reg;
 440
 441	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
 442		CFG_FUNC(PCI_FUNC(devfn));
 443	if (bus->parent->number != pp->root_bus_nr)
 444		reg |= CFG_TYPE1;
 445	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
 446
 447	return dw_pcie_read(pp->va_cfg0_base + where, size, val);
 448}
 449
 450static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 451				 unsigned int devfn, int where, int size,
 452				 u32 val)
 453{
 454	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 455	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 456	u32 reg;
 457
 458	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
 459		CFG_FUNC(PCI_FUNC(devfn));
 460	if (bus->parent->number != pp->root_bus_nr)
 461		reg |= CFG_TYPE1;
 462	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
 463
 464	return dw_pcie_write(pp->va_cfg0_base + where, size, val);
 465}
 466
 467/**
 468 * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
 469 *
 470 * This sets BAR0 to enable inbound access for MSI_IRQ register
 471 */
 472static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
 473{
 474	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 475	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 476
 477	/* Configure and set up BAR0 */
 478	ks_pcie_set_dbi_mode(ks_pcie);
 479
 480	/* Enable BAR0 */
 481	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
 482	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
 483
 484	ks_pcie_clear_dbi_mode(ks_pcie);
 485
 486	 /*
 487	  * For BAR0, just setting bus address for inbound writes (MSI) should
 488	  * be sufficient.  Use physical address to avoid any conflicts.
 489	  */
 490	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
 491}
 492
 493/**
 494 * ks_pcie_link_up() - Check if link up
 
 
 495 */
 496static int ks_pcie_link_up(struct dw_pcie *pci)
 497{
 498	u32 val;
 499
 500	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
 501	val &= PORT_LOGIC_LTSSM_STATE_MASK;
 502	return (val == PORT_LOGIC_LTSSM_STATE_L0);
 503}
 504
 505static void ks_pcie_stop_link(struct dw_pcie *pci)
 506{
 507	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 508	u32 val;
 509
 510	/* Disable Link training */
 511	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 512	val &= ~LTSSM_EN_VAL;
 513	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
 514}
 515
 516static int ks_pcie_start_link(struct dw_pcie *pci)
 517{
 518	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 519	struct device *dev = pci->dev;
 520	u32 val;
 521
 522	if (dw_pcie_link_up(pci)) {
 523		dev_dbg(dev, "link is already up\n");
 524		return 0;
 525	}
 526
 527	/* Initiate Link Training */
 528	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 529	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
 530
 531	return 0;
 532}
 533
 534static void ks_pcie_quirk(struct pci_dev *dev)
 535{
 536	struct pci_bus *bus = dev->bus;
 
 
 537	struct pci_dev *bridge;
 
 
 538	static const struct pci_device_id rc_pci_devids[] = {
 539		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
 540		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
 541		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
 542		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
 543		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
 544		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
 545		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
 
 
 
 
 
 546		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
 547		{ 0, },
 548	};
 549
 550	if (pci_is_root_bus(bus))
 551		bridge = dev;
 552
 553	/* look for the host bridge */
 554	while (!pci_is_root_bus(bus)) {
 555		bridge = bus->self;
 556		bus = bus->parent;
 557	}
 558
 559	if (!bridge)
 560		return;
 561
 562	/*
 563	 * Keystone PCI controller has a h/w limitation of
 564	 * 256 bytes maximum read request size.  It can't handle
 565	 * anything higher than this.  So force this limit on
 566	 * all downstream devices.
 567	 */
 568	if (pci_match_id(rc_pci_devids, bridge)) {
 569		if (pcie_get_readrq(dev) > 256) {
 570			dev_info(&dev->dev, "limiting MRRS to 256\n");
 571			pcie_set_readrq(dev, 256);
 572		}
 573	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574}
 575DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
 576
 577static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
 578{
 579	unsigned int irq = desc->irq_data.hwirq;
 580	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 581	u32 offset = irq - ks_pcie->msi_host_irq;
 582	struct dw_pcie *pci = ks_pcie->pci;
 583	struct pcie_port *pp = &pci->pp;
 584	struct device *dev = pci->dev;
 585	struct irq_chip *chip = irq_desc_get_chip(desc);
 586	u32 vector, virq, reg, pos;
 587
 588	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
 589
 590	/*
 591	 * The chained irq handler installation would have replaced normal
 592	 * interrupt driver handler so we need to take care of mask/unmask and
 593	 * ack operation.
 594	 */
 595	chained_irq_enter(chip, desc);
 596
 597	reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
 598	/*
 599	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
 600	 * shows 1, 9, 17, 25 and so forth
 601	 */
 602	for (pos = 0; pos < 4; pos++) {
 603		if (!(reg & BIT(pos)))
 604			continue;
 605
 606		vector = offset + (pos << 3);
 607		virq = irq_linear_revmap(pp->irq_domain, vector);
 608		dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
 609			virq);
 610		generic_handle_irq(virq);
 611	}
 612
 613	chained_irq_exit(chip, desc);
 614}
 615
 616/**
 617 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
 618 * @irq: IRQ line for legacy interrupts
 619 * @desc: Pointer to irq descriptor
 620 *
 621 * Traverse through pending legacy interrupts and invoke handler for each. Also
 622 * takes care of interrupt controller level mask/ack operation.
 623 */
 624static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
 625{
 626	unsigned int irq = irq_desc_get_irq(desc);
 627	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 628	struct dw_pcie *pci = ks_pcie->pci;
 629	struct device *dev = pci->dev;
 630	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
 631	struct irq_chip *chip = irq_desc_get_chip(desc);
 632
 633	dev_dbg(dev, ": Handling legacy irq %d\n", irq);
 634
 635	/*
 636	 * The chained irq handler installation would have replaced normal
 637	 * interrupt driver handler so we need to take care of mask/unmask and
 638	 * ack operation.
 639	 */
 640	chained_irq_enter(chip, desc);
 641	ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
 642	chained_irq_exit(chip, desc);
 643}
 644
 645static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
 646{
 647	struct device *dev = ks_pcie->pci->dev;
 648	struct device_node *np = ks_pcie->np;
 649	struct device_node *intc_np;
 650	struct irq_data *irq_data;
 651	int irq_count, irq, ret, i;
 652
 653	if (!IS_ENABLED(CONFIG_PCI_MSI))
 654		return 0;
 655
 656	intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
 657	if (!intc_np) {
 658		if (ks_pcie->is_am6)
 659			return 0;
 660		dev_warn(dev, "msi-interrupt-controller node is absent\n");
 661		return -EINVAL;
 662	}
 663
 664	irq_count = of_irq_count(intc_np);
 665	if (!irq_count) {
 666		dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
 667		ret = -EINVAL;
 668		goto err;
 669	}
 670
 671	for (i = 0; i < irq_count; i++) {
 672		irq = irq_of_parse_and_map(intc_np, i);
 673		if (!irq) {
 674			ret = -EINVAL;
 675			goto err;
 676		}
 677
 678		if (!ks_pcie->msi_host_irq) {
 679			irq_data = irq_get_irq_data(irq);
 680			if (!irq_data) {
 681				ret = -EINVAL;
 682				goto err;
 683			}
 684			ks_pcie->msi_host_irq = irq_data->hwirq;
 685		}
 686
 687		irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
 688						 ks_pcie);
 689	}
 690
 691	of_node_put(intc_np);
 692	return 0;
 693
 694err:
 695	of_node_put(intc_np);
 696	return ret;
 697}
 698
 699static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
 700{
 701	struct device *dev = ks_pcie->pci->dev;
 702	struct irq_domain *legacy_irq_domain;
 703	struct device_node *np = ks_pcie->np;
 704	struct device_node *intc_np;
 705	int irq_count, irq, ret = 0, i;
 706
 707	intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
 708	if (!intc_np) {
 709		/*
 710		 * Since legacy interrupts are modeled as edge-interrupts in
 711		 * AM6, keep it disabled for now.
 712		 */
 713		if (ks_pcie->is_am6)
 714			return 0;
 715		dev_warn(dev, "legacy-interrupt-controller node is absent\n");
 716		return -EINVAL;
 717	}
 718
 719	irq_count = of_irq_count(intc_np);
 720	if (!irq_count) {
 721		dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
 722		ret = -EINVAL;
 723		goto err;
 724	}
 725
 726	for (i = 0; i < irq_count; i++) {
 727		irq = irq_of_parse_and_map(intc_np, i);
 728		if (!irq) {
 729			ret = -EINVAL;
 730			goto err;
 731		}
 732		ks_pcie->legacy_host_irqs[i] = irq;
 733
 734		irq_set_chained_handler_and_data(irq,
 735						 ks_pcie_legacy_irq_handler,
 736						 ks_pcie);
 737	}
 738
 739	legacy_irq_domain =
 740		irq_domain_add_linear(intc_np, PCI_NUM_INTX,
 741				      &ks_pcie_legacy_irq_domain_ops, NULL);
 742	if (!legacy_irq_domain) {
 743		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
 744		ret = -EINVAL;
 745		goto err;
 746	}
 747	ks_pcie->legacy_irq_domain = legacy_irq_domain;
 748
 749	for (i = 0; i < PCI_NUM_INTX; i++)
 750		ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
 751
 752err:
 753	of_node_put(intc_np);
 754	return ret;
 755}
 756
 757#ifdef CONFIG_ARM
 758/*
 759 * When a PCI device does not exist during config cycles, keystone host gets a
 760 * bus error instead of returning 0xffffffff. This handler always returns 0
 761 * for this kind of faults.
 762 */
 763static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
 764			 struct pt_regs *regs)
 765{
 766	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
 767
 768	if ((instr & 0x0e100090) == 0x00100090) {
 769		int reg = (instr >> 12) & 15;
 770
 771		regs->uregs[reg] = -1;
 772		regs->ARM_pc += 4;
 773	}
 774
 775	return 0;
 776}
 777#endif
 778
 779static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
 780{
 781	int ret;
 782	unsigned int id;
 783	struct regmap *devctrl_regs;
 784	struct dw_pcie *pci = ks_pcie->pci;
 785	struct device *dev = pci->dev;
 786	struct device_node *np = dev->of_node;
 
 
 787
 788	devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
 789	if (IS_ERR(devctrl_regs))
 790		return PTR_ERR(devctrl_regs);
 791
 792	ret = regmap_read(devctrl_regs, 0, &id);
 
 
 
 
 
 793	if (ret)
 794		return ret;
 795
 796	dw_pcie_dbi_ro_wr_en(pci);
 797	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
 798	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
 799	dw_pcie_dbi_ro_wr_dis(pci);
 800
 801	return 0;
 802}
 803
 804static int __init ks_pcie_host_init(struct pcie_port *pp)
 805{
 806	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 807	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 808	int ret;
 809
 810	ret = ks_pcie_config_legacy_irq(ks_pcie);
 
 
 
 
 811	if (ret)
 812		return ret;
 813
 814	ret = ks_pcie_config_msi_irq(ks_pcie);
 815	if (ret)
 816		return ret;
 817
 818	dw_pcie_setup_rc(pp);
 
 
 
 819
 820	ks_pcie_stop_link(pci);
 821	ks_pcie_setup_rc_app_regs(ks_pcie);
 822	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
 823			pci->dbi_base + PCI_IO_BASE);
 824
 825	ret = ks_pcie_init_id(ks_pcie);
 826	if (ret < 0)
 827		return ret;
 828
 829#ifdef CONFIG_ARM
 830	/*
 831	 * PCIe access errors that result into OCP errors are caught by ARM as
 832	 * "External aborts"
 833	 */
 834	hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
 835			"Asynchronous external abort");
 836#endif
 837
 838	ks_pcie_start_link(pci);
 839	dw_pcie_wait_for_link(pci);
 840
 841	return 0;
 842}
 843
 844static const struct dw_pcie_host_ops ks_pcie_host_ops = {
 845	.rd_other_conf = ks_pcie_rd_other_conf,
 846	.wr_other_conf = ks_pcie_wr_other_conf,
 847	.host_init = ks_pcie_host_init,
 848	.msi_host_init = ks_pcie_msi_host_init,
 849	.scan_bus = ks_pcie_v3_65_scan_bus,
 850};
 851
 852static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
 853	.host_init = ks_pcie_host_init,
 854	.msi_host_init = ks_pcie_am654_msi_host_init,
 855};
 856
 857static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
 858{
 859	struct keystone_pcie *ks_pcie = priv;
 860
 861	return ks_pcie_handle_error_irq(ks_pcie);
 862}
 863
 864static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
 865					struct platform_device *pdev)
 866{
 867	struct dw_pcie *pci = ks_pcie->pci;
 868	struct pcie_port *pp = &pci->pp;
 869	struct device *dev = &pdev->dev;
 870	struct resource *res;
 871	int ret;
 872
 873	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
 874	pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
 875	if (IS_ERR(pp->va_cfg0_base))
 876		return PTR_ERR(pp->va_cfg0_base);
 877
 878	pp->va_cfg1_base = pp->va_cfg0_base;
 879
 880	ret = dw_pcie_host_init(pp);
 881	if (ret) {
 882		dev_err(dev, "failed to initialize host\n");
 883		return ret;
 884	}
 885
 886	return 0;
 887}
 888
 889static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
 890				   u32 reg, size_t size)
 891{
 892	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 893	u32 val;
 894
 895	ks_pcie_set_dbi_mode(ks_pcie);
 896	dw_pcie_read(base + reg, size, &val);
 897	ks_pcie_clear_dbi_mode(ks_pcie);
 898	return val;
 899}
 900
 901static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
 902				     u32 reg, size_t size, u32 val)
 903{
 904	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 905
 906	ks_pcie_set_dbi_mode(ks_pcie);
 907	dw_pcie_write(base + reg, size, val);
 908	ks_pcie_clear_dbi_mode(ks_pcie);
 909}
 910
 911static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
 912	.start_link = ks_pcie_start_link,
 913	.stop_link = ks_pcie_stop_link,
 914	.link_up = ks_pcie_link_up,
 915	.read_dbi2 = ks_pcie_am654_read_dbi2,
 916	.write_dbi2 = ks_pcie_am654_write_dbi2,
 917};
 918
 919static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
 920{
 921	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 922	int flags;
 923
 924	ep->page_size = AM654_WIN_SIZE;
 925	flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
 926	dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
 927	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
 928}
 929
 930static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
 931{
 932	struct dw_pcie *pci = ks_pcie->pci;
 933	u8 int_pin;
 934
 935	int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
 936	if (int_pin == 0 || int_pin > 4)
 937		return;
 938
 939	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
 940			   INT_ENABLE);
 941	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
 942	mdelay(1);
 943	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
 944	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
 945			   INT_ENABLE);
 946}
 947
 948static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
 949				   enum pci_epc_irq_type type,
 950				   u16 interrupt_num)
 951{
 952	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 953	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 954
 955	switch (type) {
 956	case PCI_EPC_IRQ_LEGACY:
 957		ks_pcie_am654_raise_legacy_irq(ks_pcie);
 958		break;
 959	case PCI_EPC_IRQ_MSI:
 960		dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
 961		break;
 
 
 
 962	default:
 963		dev_err(pci->dev, "UNKNOWN IRQ type\n");
 964		return -EINVAL;
 965	}
 966
 967	return 0;
 968}
 969
 970static const struct pci_epc_features ks_pcie_am654_epc_features = {
 971	.linkup_notifier = false,
 972	.msi_capable = true,
 973	.msix_capable = false,
 974	.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
 975	.bar_fixed_64bit = 1 << BAR_0,
 976	.bar_fixed_size[2] = SZ_1M,
 977	.bar_fixed_size[3] = SZ_64K,
 978	.bar_fixed_size[4] = 256,
 979	.bar_fixed_size[5] = SZ_1M,
 980	.align = SZ_1M,
 981};
 982
 983static const struct pci_epc_features*
 984ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
 985{
 986	return &ks_pcie_am654_epc_features;
 987}
 988
 989static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
 990	.ep_init = ks_pcie_am654_ep_init,
 991	.raise_irq = ks_pcie_am654_raise_irq,
 992	.get_features = &ks_pcie_am654_get_features,
 993};
 994
 995static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
 996				      struct platform_device *pdev)
 997{
 998	int ret;
 999	struct dw_pcie_ep *ep;
1000	struct resource *res;
1001	struct device *dev = &pdev->dev;
1002	struct dw_pcie *pci = ks_pcie->pci;
1003
1004	ep = &pci->ep;
1005
1006	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1007	if (!res)
1008		return -EINVAL;
1009
1010	ep->phys_base = res->start;
1011	ep->addr_size = resource_size(res);
1012
1013	ret = dw_pcie_ep_init(ep);
1014	if (ret) {
1015		dev_err(dev, "failed to initialize endpoint\n");
1016		return ret;
1017	}
1018
1019	return 0;
1020}
1021
1022static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
1023{
1024	int num_lanes = ks_pcie->num_lanes;
1025
1026	while (num_lanes--) {
1027		phy_power_off(ks_pcie->phy[num_lanes]);
1028		phy_exit(ks_pcie->phy[num_lanes]);
1029	}
1030}
1031
1032static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
1033{
1034	int i;
1035	int ret;
1036	int num_lanes = ks_pcie->num_lanes;
1037
1038	for (i = 0; i < num_lanes; i++) {
1039		ret = phy_reset(ks_pcie->phy[i]);
1040		if (ret < 0)
1041			goto err_phy;
1042
1043		ret = phy_init(ks_pcie->phy[i]);
1044		if (ret < 0)
1045			goto err_phy;
1046
1047		ret = phy_power_on(ks_pcie->phy[i]);
1048		if (ret < 0) {
1049			phy_exit(ks_pcie->phy[i]);
1050			goto err_phy;
1051		}
1052	}
1053
1054	return 0;
1055
1056err_phy:
1057	while (--i >= 0) {
1058		phy_power_off(ks_pcie->phy[i]);
1059		phy_exit(ks_pcie->phy[i]);
1060	}
1061
1062	return ret;
1063}
1064
1065static int ks_pcie_set_mode(struct device *dev)
1066{
1067	struct device_node *np = dev->of_node;
 
 
1068	struct regmap *syscon;
1069	u32 val;
1070	u32 mask;
1071	int ret = 0;
1072
1073	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1074	if (IS_ERR(syscon))
1075		return 0;
1076
 
 
 
 
 
1077	mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1078	val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1079
1080	ret = regmap_update_bits(syscon, 0, mask, val);
1081	if (ret) {
1082		dev_err(dev, "failed to set pcie mode\n");
1083		return ret;
1084	}
1085
1086	return 0;
1087}
1088
1089static int ks_pcie_am654_set_mode(struct device *dev,
1090				  enum dw_pcie_device_mode mode)
1091{
1092	struct device_node *np = dev->of_node;
 
 
1093	struct regmap *syscon;
1094	u32 val;
1095	u32 mask;
1096	int ret = 0;
1097
1098	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1099	if (IS_ERR(syscon))
1100		return 0;
1101
 
 
 
 
 
1102	mask = AM654_PCIE_DEV_TYPE_MASK;
1103
1104	switch (mode) {
1105	case DW_PCIE_RC_TYPE:
1106		val = RC;
1107		break;
1108	case DW_PCIE_EP_TYPE:
1109		val = EP;
1110		break;
1111	default:
1112		dev_err(dev, "INVALID device type %d\n", mode);
1113		return -EINVAL;
1114	}
1115
1116	ret = regmap_update_bits(syscon, 0, mask, val);
1117	if (ret) {
1118		dev_err(dev, "failed to set pcie mode\n");
1119		return ret;
1120	}
1121
1122	return 0;
1123}
1124
1125static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
1126{
1127	u32 val;
1128
1129	dw_pcie_dbi_ro_wr_en(pci);
1130
1131	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
1132	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
1133		val &= ~((u32)PCI_EXP_LNKCAP_SLS);
1134		val |= link_speed;
1135		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
1136				   val);
1137	}
1138
1139	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
1140	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
1141		val &= ~((u32)PCI_EXP_LNKCAP_SLS);
1142		val |= link_speed;
1143		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
1144				   val);
1145	}
1146
1147	dw_pcie_dbi_ro_wr_dis(pci);
1148}
1149
1150static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1151	.host_ops = &ks_pcie_host_ops,
1152	.version = 0x365A,
 
1153};
1154
1155static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1156	.host_ops = &ks_pcie_am654_host_ops,
1157	.mode = DW_PCIE_RC_TYPE,
1158	.version = 0x490A,
1159};
1160
1161static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1162	.ep_ops = &ks_pcie_am654_ep_ops,
1163	.mode = DW_PCIE_EP_TYPE,
1164	.version = 0x490A,
1165};
1166
1167static const struct of_device_id ks_pcie_of_match[] = {
1168	{
1169		.type = "pci",
1170		.data = &ks_pcie_rc_of_data,
1171		.compatible = "ti,keystone-pcie",
1172	},
1173	{
1174		.data = &ks_pcie_am654_rc_of_data,
1175		.compatible = "ti,am654-pcie-rc",
1176	},
1177	{
1178		.data = &ks_pcie_am654_ep_of_data,
1179		.compatible = "ti,am654-pcie-ep",
1180	},
1181	{ },
1182};
1183
1184static int __init ks_pcie_probe(struct platform_device *pdev)
1185{
1186	const struct dw_pcie_host_ops *host_ops;
1187	const struct dw_pcie_ep_ops *ep_ops;
1188	struct device *dev = &pdev->dev;
1189	struct device_node *np = dev->of_node;
1190	const struct ks_pcie_of_data *data;
1191	const struct of_device_id *match;
1192	enum dw_pcie_device_mode mode;
1193	struct dw_pcie *pci;
1194	struct keystone_pcie *ks_pcie;
1195	struct device_link **link;
1196	struct gpio_desc *gpiod;
1197	void __iomem *atu_base;
1198	struct resource *res;
1199	unsigned int version;
1200	void __iomem *base;
1201	u32 num_viewport;
1202	struct phy **phy;
1203	int link_speed;
1204	u32 num_lanes;
1205	char name[10];
 
1206	int ret;
1207	int irq;
1208	int i;
1209
1210	match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
1211	data = (struct ks_pcie_of_data *)match->data;
1212	if (!data)
1213		return -EINVAL;
1214
1215	version = data->version;
1216	host_ops = data->host_ops;
1217	ep_ops = data->ep_ops;
1218	mode = data->mode;
1219
1220	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1221	if (!ks_pcie)
1222		return -ENOMEM;
1223
1224	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1225	if (!pci)
1226		return -ENOMEM;
1227
1228	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1229	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1230	if (IS_ERR(ks_pcie->va_app_base))
1231		return PTR_ERR(ks_pcie->va_app_base);
1232
1233	ks_pcie->app = *res;
1234
1235	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1236	base = devm_pci_remap_cfg_resource(dev, res);
1237	if (IS_ERR(base))
1238		return PTR_ERR(base);
1239
1240	if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1241		ks_pcie->is_am6 = true;
1242
1243	pci->dbi_base = base;
1244	pci->dbi_base2 = base;
1245	pci->dev = dev;
1246	pci->ops = &ks_pcie_dw_pcie_ops;
1247	pci->version = version;
1248
1249	irq = platform_get_irq(pdev, 0);
1250	if (irq < 0) {
1251		dev_err(dev, "missing IRQ resource: %d\n", irq);
1252		return irq;
1253	}
1254
1255	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1256			  "ks-pcie-error-irq", ks_pcie);
1257	if (ret < 0) {
1258		dev_err(dev, "failed to request error IRQ %d\n",
1259			irq);
1260		return ret;
1261	}
1262
1263	ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1264	if (ret)
1265		num_lanes = 1;
1266
1267	phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1268	if (!phy)
1269		return -ENOMEM;
1270
1271	link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1272	if (!link)
1273		return -ENOMEM;
1274
1275	for (i = 0; i < num_lanes; i++) {
1276		snprintf(name, sizeof(name), "pcie-phy%d", i);
1277		phy[i] = devm_phy_optional_get(dev, name);
1278		if (IS_ERR(phy[i])) {
1279			ret = PTR_ERR(phy[i]);
1280			goto err_link;
1281		}
1282
1283		if (!phy[i])
1284			continue;
1285
1286		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1287		if (!link[i]) {
1288			ret = -EINVAL;
1289			goto err_link;
1290		}
1291	}
1292
1293	ks_pcie->np = np;
1294	ks_pcie->pci = pci;
1295	ks_pcie->link = link;
1296	ks_pcie->num_lanes = num_lanes;
1297	ks_pcie->phy = phy;
1298
1299	gpiod = devm_gpiod_get_optional(dev, "reset",
1300					GPIOD_OUT_LOW);
1301	if (IS_ERR(gpiod)) {
1302		ret = PTR_ERR(gpiod);
1303		if (ret != -EPROBE_DEFER)
1304			dev_err(dev, "Failed to get reset GPIO\n");
1305		goto err_link;
1306	}
1307
 
 
 
 
1308	ret = ks_pcie_enable_phy(ks_pcie);
 
 
 
 
 
1309	if (ret) {
1310		dev_err(dev, "failed to enable phy\n");
1311		goto err_link;
1312	}
1313
1314	platform_set_drvdata(pdev, ks_pcie);
1315	pm_runtime_enable(dev);
1316	ret = pm_runtime_get_sync(dev);
1317	if (ret < 0) {
1318		dev_err(dev, "pm_runtime_get_sync failed\n");
1319		goto err_get_sync;
1320	}
1321
1322	if (pci->version >= 0x480A) {
1323		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
1324		atu_base = devm_ioremap_resource(dev, res);
1325		if (IS_ERR(atu_base)) {
1326			ret = PTR_ERR(atu_base);
1327			goto err_get_sync;
1328		}
1329
1330		pci->atu_base = atu_base;
1331
1332		ret = ks_pcie_am654_set_mode(dev, mode);
1333		if (ret < 0)
1334			goto err_get_sync;
1335	} else {
1336		ret = ks_pcie_set_mode(dev);
1337		if (ret < 0)
1338			goto err_get_sync;
1339	}
1340
1341	link_speed = of_pci_get_max_link_speed(np);
1342	if (link_speed < 0)
1343		link_speed = 2;
1344
1345	ks_pcie_set_link_speed(pci, link_speed);
1346
1347	switch (mode) {
1348	case DW_PCIE_RC_TYPE:
1349		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1350			ret = -ENODEV;
1351			goto err_get_sync;
1352		}
1353
1354		ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1355		if (ret < 0) {
1356			dev_err(dev, "unable to read *num-viewport* property\n");
1357			return ret;
1358		}
1359
1360		/*
1361		 * "Power Sequencing and Reset Signal Timings" table in
1362		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1363		 * indicates PERST# should be deasserted after minimum of 100us
1364		 * once REFCLK is stable. The REFCLK to the connector in RC
1365		 * mode is selected while enabling the PHY. So deassert PERST#
1366		 * after 100 us.
1367		 */
1368		if (gpiod) {
1369			usleep_range(100, 200);
1370			gpiod_set_value_cansleep(gpiod, 1);
1371		}
1372
1373		ks_pcie->num_viewport = num_viewport;
1374		pci->pp.ops = host_ops;
1375		ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
1376		if (ret < 0)
1377			goto err_get_sync;
1378		break;
1379	case DW_PCIE_EP_TYPE:
1380		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1381			ret = -ENODEV;
1382			goto err_get_sync;
1383		}
1384
1385		pci->ep.ops = ep_ops;
1386		ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
1387		if (ret < 0)
1388			goto err_get_sync;
 
 
 
 
 
 
 
 
 
1389		break;
1390	default:
1391		dev_err(dev, "INVALID device type %d\n", mode);
1392	}
1393
1394	ks_pcie_enable_error_irq(ks_pcie);
1395
1396	return 0;
1397
 
 
1398err_get_sync:
1399	pm_runtime_put(dev);
1400	pm_runtime_disable(dev);
1401	ks_pcie_disable_phy(ks_pcie);
1402
1403err_link:
1404	while (--i >= 0 && link[i])
1405		device_link_del(link[i]);
1406
1407	return ret;
1408}
1409
1410static int __exit ks_pcie_remove(struct platform_device *pdev)
1411{
1412	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1413	struct device_link **link = ks_pcie->link;
1414	int num_lanes = ks_pcie->num_lanes;
1415	struct device *dev = &pdev->dev;
1416
1417	pm_runtime_put(dev);
1418	pm_runtime_disable(dev);
1419	ks_pcie_disable_phy(ks_pcie);
1420	while (num_lanes--)
1421		device_link_del(link[num_lanes]);
1422
1423	return 0;
1424}
1425
1426static struct platform_driver ks_pcie_driver __refdata = {
1427	.probe  = ks_pcie_probe,
1428	.remove = __exit_p(ks_pcie_remove),
1429	.driver = {
1430		.name	= "keystone-pcie",
1431		.of_match_table = of_match_ptr(ks_pcie_of_match),
1432	},
1433};
1434builtin_platform_driver(ks_pcie_driver);