Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
  16#include <linux/delay.h>
  17#include <linux/interrupt.h>
  18#include <linux/irq.h>
  19#include <linux/irqdomain.h>
  20#include <linux/kernel.h>
  21#include <linux/init.h>
  22#include <linux/msi.h>
  23#include <linux/of_address.h>
  24#include <linux/of_irq.h>
  25#include <linux/of_pci.h>
  26#include <linux/of_platform.h>
  27#include <linux/pci.h>
  28#include <linux/phy/phy.h>
  29#include <linux/platform_device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/slab.h>
  32
  33#include "pcie-rcar.h"
  34
  35struct rcar_msi {
  36	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  37	struct irq_domain *domain;
  38	struct msi_controller chip;
  39	unsigned long pages;
  40	struct mutex lock;
  41	int irq1;
  42	int irq2;
  43};
  44
  45static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
  46{
  47	return container_of(chip, struct rcar_msi, chip);
  48}
  49
  50/* Structure representing the PCIe interface */
  51struct rcar_pcie_host {
  52	struct rcar_pcie	pcie;
  53	struct device		*dev;
  54	struct phy		*phy;
  55	void __iomem		*base;
  56	struct clk		*bus_clk;
  57	struct			rcar_msi msi;
  58	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  59};
  60
 
 
 
 
 
  61static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  62{
  63	unsigned int shift = BITS_PER_BYTE * (where & 3);
  64	u32 val = rcar_pci_read_reg(pcie, where & ~3);
  65
  66	return val >> shift;
  67}
  68
  69/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  70static int rcar_pcie_config_access(struct rcar_pcie_host *host,
  71		unsigned char access_type, struct pci_bus *bus,
  72		unsigned int devfn, int where, u32 *data)
  73{
  74	struct rcar_pcie *pcie = &host->pcie;
  75	unsigned int dev, func, reg, index;
  76
  77	dev = PCI_SLOT(devfn);
  78	func = PCI_FUNC(devfn);
  79	reg = where & ~3;
  80	index = reg / 4;
  81
  82	/*
  83	 * While each channel has its own memory-mapped extended config
  84	 * space, it's generally only accessible when in endpoint mode.
  85	 * When in root complex mode, the controller is unable to target
  86	 * itself with either type 0 or type 1 accesses, and indeed, any
  87	 * controller initiated target transfer to its own config space
  88	 * result in a completer abort.
  89	 *
  90	 * Each channel effectively only supports a single device, but as
  91	 * the same channel <-> device access works for any PCI_SLOT()
  92	 * value, we cheat a bit here and bind the controller's config
  93	 * space to devfn 0 in order to enable self-enumeration. In this
  94	 * case the regular ECAR/ECDR path is sidelined and the mangled
  95	 * config access itself is initiated as an internal bus transaction.
  96	 */
  97	if (pci_is_root_bus(bus)) {
  98		if (dev != 0)
  99			return PCIBIOS_DEVICE_NOT_FOUND;
 100
 101		if (access_type == RCAR_PCI_ACCESS_READ)
 102			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 103		else
 104			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 105
 106		return PCIBIOS_SUCCESSFUL;
 107	}
 108
 109	/* Clear errors */
 110	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 111
 112	/* Set the PIO address */
 113	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 114		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 115
 116	/* Enable the configuration access */
 117	if (pci_is_root_bus(bus->parent))
 118		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
 119	else
 120		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 121
 122	/* Check for errors */
 123	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 124		return PCIBIOS_DEVICE_NOT_FOUND;
 125
 126	/* Check for master and target aborts */
 127	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 128		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 129		return PCIBIOS_DEVICE_NOT_FOUND;
 130
 131	if (access_type == RCAR_PCI_ACCESS_READ)
 132		*data = rcar_pci_read_reg(pcie, PCIECDR);
 133	else
 134		rcar_pci_write_reg(pcie, *data, PCIECDR);
 135
 136	/* Disable the configuration access */
 137	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 138
 139	return PCIBIOS_SUCCESSFUL;
 140}
 141
 142static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 143			       int where, int size, u32 *val)
 144{
 145	struct rcar_pcie_host *host = bus->sysdata;
 146	int ret;
 147
 148	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 149				      bus, devfn, where, val);
 150	if (ret != PCIBIOS_SUCCESSFUL) {
 151		*val = 0xffffffff;
 152		return ret;
 153	}
 154
 155	if (size == 1)
 156		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 157	else if (size == 2)
 158		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 159
 160	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 161		bus->number, devfn, where, size, *val);
 162
 163	return ret;
 164}
 165
 166/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 167static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 168				int where, int size, u32 val)
 169{
 170	struct rcar_pcie_host *host = bus->sysdata;
 171	unsigned int shift;
 172	u32 data;
 173	int ret;
 174
 175	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 176				      bus, devfn, where, &data);
 177	if (ret != PCIBIOS_SUCCESSFUL)
 178		return ret;
 179
 180	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 181		bus->number, devfn, where, size, val);
 182
 183	if (size == 1) {
 184		shift = BITS_PER_BYTE * (where & 3);
 185		data &= ~(0xff << shift);
 186		data |= ((val & 0xff) << shift);
 187	} else if (size == 2) {
 188		shift = BITS_PER_BYTE * (where & 2);
 189		data &= ~(0xffff << shift);
 190		data |= ((val & 0xffff) << shift);
 191	} else
 192		data = val;
 193
 194	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 195				      bus, devfn, where, &data);
 196
 197	return ret;
 198}
 199
 200static struct pci_ops rcar_pcie_ops = {
 201	.read	= rcar_pcie_read_conf,
 202	.write	= rcar_pcie_write_conf,
 203};
 204
 205static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 206{
 207	struct device *dev = pcie->dev;
 208	unsigned int timeout = 1000;
 209	u32 macsr;
 210
 211	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 212		return;
 213
 214	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 215		dev_err(dev, "Speed change already in progress\n");
 216		return;
 217	}
 218
 219	macsr = rcar_pci_read_reg(pcie, MACSR);
 220	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 221		goto done;
 222
 223	/* Set target link speed to 5.0 GT/s */
 224	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 225		   PCI_EXP_LNKSTA_CLS_5_0GB);
 226
 227	/* Set speed change reason as intentional factor */
 228	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 229
 230	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 231	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 232		rcar_pci_write_reg(pcie, macsr, MACSR);
 233
 234	/* Start link speed change */
 235	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 236
 237	while (timeout--) {
 238		macsr = rcar_pci_read_reg(pcie, MACSR);
 239		if (macsr & SPCHGFIN) {
 240			/* Clear the interrupt bits */
 241			rcar_pci_write_reg(pcie, macsr, MACSR);
 242
 243			if (macsr & SPCHGFAIL)
 244				dev_err(dev, "Speed change failed\n");
 245
 246			goto done;
 247		}
 248
 249		msleep(1);
 250	}
 251
 252	dev_err(dev, "Speed change timed out\n");
 253
 254done:
 255	dev_info(dev, "Current link speed is %s GT/s\n",
 256		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 257}
 258
 259static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 260{
 261	struct rcar_pcie *pcie = &host->pcie;
 262	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 263	struct resource_entry *win;
 264	LIST_HEAD(res);
 265	int i = 0;
 266
 267	/* Try setting 5 GT/s link speed */
 268	rcar_pcie_force_speedup(pcie);
 269
 270	/* Setup PCI resources */
 271	resource_list_for_each_entry(win, &bridge->windows) {
 272		struct resource *res = win->res;
 273
 274		if (!res->flags)
 275			continue;
 276
 277		switch (resource_type(res)) {
 278		case IORESOURCE_IO:
 279		case IORESOURCE_MEM:
 280			rcar_pcie_set_outbound(pcie, i, win);
 281			i++;
 282			break;
 283		}
 284	}
 285}
 286
 287static int rcar_pcie_enable(struct rcar_pcie_host *host)
 288{
 289	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 290
 291	rcar_pcie_hw_enable(host);
 292
 293	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 294
 295	bridge->sysdata = host;
 296	bridge->ops = &rcar_pcie_ops;
 297	if (IS_ENABLED(CONFIG_PCI_MSI))
 298		bridge->msi = &host->msi.chip;
 299
 300	return pci_host_probe(bridge);
 301}
 302
 303static int phy_wait_for_ack(struct rcar_pcie *pcie)
 304{
 305	struct device *dev = pcie->dev;
 306	unsigned int timeout = 100;
 307
 308	while (timeout--) {
 309		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 310			return 0;
 311
 312		udelay(100);
 313	}
 314
 315	dev_err(dev, "Access to PCIe phy timed out\n");
 316
 317	return -ETIMEDOUT;
 318}
 319
 320static void phy_write_reg(struct rcar_pcie *pcie,
 321			  unsigned int rate, u32 addr,
 322			  unsigned int lane, u32 data)
 323{
 324	u32 phyaddr;
 325
 326	phyaddr = WRITE_CMD |
 327		((rate & 1) << RATE_POS) |
 328		((lane & 0xf) << LANE_POS) |
 329		((addr & 0xff) << ADR_POS);
 330
 331	/* Set write data */
 332	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 333	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 334
 335	/* Ignore errors as they will be dealt with if the data link is down */
 336	phy_wait_for_ack(pcie);
 337
 338	/* Clear command */
 339	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 340	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 341
 342	/* Ignore errors as they will be dealt with if the data link is down */
 343	phy_wait_for_ack(pcie);
 344}
 345
 346static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 347{
 348	int err;
 349
 350	/* Begin initialization */
 351	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 352
 353	/* Set mode */
 354	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 355
 356	err = rcar_pcie_wait_for_phyrdy(pcie);
 357	if (err)
 358		return err;
 359
 360	/*
 361	 * Initial header for port config space is type 1, set the device
 362	 * class to match. Hardware takes care of propagating the IDSETR
 363	 * settings, so there is no need to bother with a quirk.
 364	 */
 365	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
 366
 367	/*
 368	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 369	 * they aren't used, to avoid bridge being detected as broken.
 370	 */
 371	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 372	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 373
 374	/* Initialize default capabilities. */
 375	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 376	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 377		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 378	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
 379		PCI_HEADER_TYPE_BRIDGE);
 380
 381	/* Enable data link layer active state reporting */
 382	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 383		PCI_EXP_LNKCAP_DLLLARC);
 384
 385	/* Write out the physical slot number = 0 */
 386	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 387
 388	/* Set the completion timer timeout to the maximum 50ms. */
 389	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 390
 391	/* Terminate list of capabilities (Next Capability Offset=0) */
 392	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 393
 394	/* Enable MSI */
 395	if (IS_ENABLED(CONFIG_PCI_MSI))
 396		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 397
 398	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 399
 400	/* Finish initialization - establish a PCI Express link */
 401	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 402
 403	/* This will timeout if we don't have a link. */
 404	err = rcar_pcie_wait_for_dl(pcie);
 405	if (err)
 406		return err;
 407
 408	/* Enable INTx interrupts */
 409	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 410
 411	wmb();
 412
 413	return 0;
 414}
 415
 416static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 417{
 418	struct rcar_pcie *pcie = &host->pcie;
 419
 420	/* Initialize the phy */
 421	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 422	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 423	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 424	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 425	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 426	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 427	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 428	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 429	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 430	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 431	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 432	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 433
 434	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 435	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 436	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 437
 438	return 0;
 439}
 440
 441static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 442{
 443	struct rcar_pcie *pcie = &host->pcie;
 444
 445	/*
 446	 * These settings come from the R-Car Series, 2nd Generation User's
 447	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 448	 */
 449	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 450	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 451	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 452	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 453
 454	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 455	/* The following value is for DC connection, no termination resistor */
 456	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 457	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 458	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 459
 460	return 0;
 461}
 462
 463static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 464{
 465	int err;
 466
 467	err = phy_init(host->phy);
 468	if (err)
 469		return err;
 470
 471	err = phy_power_on(host->phy);
 472	if (err)
 473		phy_exit(host->phy);
 474
 475	return err;
 476}
 477
 478static int rcar_msi_alloc(struct rcar_msi *chip)
 479{
 480	int msi;
 481
 482	mutex_lock(&chip->lock);
 483
 484	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
 485	if (msi < INT_PCI_MSI_NR)
 486		set_bit(msi, chip->used);
 487	else
 488		msi = -ENOSPC;
 489
 490	mutex_unlock(&chip->lock);
 491
 492	return msi;
 493}
 494
 495static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
 496{
 497	int msi;
 498
 499	mutex_lock(&chip->lock);
 500	msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
 501				      order_base_2(no_irqs));
 502	mutex_unlock(&chip->lock);
 503
 504	return msi;
 505}
 506
 507static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
 508{
 509	mutex_lock(&chip->lock);
 510	clear_bit(irq, chip->used);
 511	mutex_unlock(&chip->lock);
 512}
 513
 514static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 515{
 516	struct rcar_pcie_host *host = data;
 517	struct rcar_pcie *pcie = &host->pcie;
 518	struct rcar_msi *msi = &host->msi;
 519	struct device *dev = pcie->dev;
 520	unsigned long reg;
 521
 522	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 523
 524	/* MSI & INTx share an interrupt - we only handle MSI here */
 525	if (!reg)
 526		return IRQ_NONE;
 527
 528	while (reg) {
 529		unsigned int index = find_first_bit(&reg, 32);
 530		unsigned int msi_irq;
 531
 532		/* clear the interrupt */
 533		rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
 534
 535		msi_irq = irq_find_mapping(msi->domain, index);
 536		if (msi_irq) {
 537			if (test_bit(index, msi->used))
 538				generic_handle_irq(msi_irq);
 539			else
 540				dev_info(dev, "unhandled MSI\n");
 541		} else {
 542			/* Unknown MSI, just clear it */
 543			dev_dbg(dev, "unexpected MSI\n");
 
 544		}
 545
 546		/* see if there's any more pending in this vector */
 547		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 548	}
 549
 550	return IRQ_HANDLED;
 551}
 552
 553static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
 554			      struct msi_desc *desc)
 555{
 556	struct rcar_msi *msi = to_rcar_msi(chip);
 557	struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
 558						   msi.chip);
 559	struct rcar_pcie *pcie = &host->pcie;
 560	struct msi_msg msg;
 561	unsigned int irq;
 562	int hwirq;
 563
 564	hwirq = rcar_msi_alloc(msi);
 565	if (hwirq < 0)
 566		return hwirq;
 
 
 567
 568	irq = irq_find_mapping(msi->domain, hwirq);
 569	if (!irq) {
 570		rcar_msi_free(msi, hwirq);
 571		return -EINVAL;
 572	}
 573
 574	irq_set_msi_desc(irq, desc);
 
 
 
 
 
 
 
 
 
 
 575
 576	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 577	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 578	msg.data = hwirq;
 579
 580	pci_write_msi_msg(irq, &msg);
 
 
 
 
 
 581
 582	return 0;
 
 
 
 
 583}
 584
 585static int rcar_msi_setup_irqs(struct msi_controller *chip,
 586			       struct pci_dev *pdev, int nvec, int type)
 587{
 588	struct rcar_msi *msi = to_rcar_msi(chip);
 589	struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
 590						   msi.chip);
 591	struct rcar_pcie *pcie = &host->pcie;
 592	struct msi_desc *desc;
 593	struct msi_msg msg;
 594	unsigned int irq;
 595	int hwirq;
 596	int i;
 597
 598	/* MSI-X interrupts are not supported */
 599	if (type == PCI_CAP_ID_MSIX)
 600		return -EINVAL;
 
 
 
 601
 602	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
 603	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
 
 
 604
 605	hwirq = rcar_msi_alloc_region(msi, nvec);
 606	if (hwirq < 0)
 607		return -ENOSPC;
 
 608
 609	irq = irq_find_mapping(msi->domain, hwirq);
 610	if (!irq)
 611		return -ENOSPC;
 
 612
 613	for (i = 0; i < nvec; i++) {
 614		/*
 615		 * irq_create_mapping() called from rcar_pcie_probe() pre-
 616		 * allocates descs,  so there is no need to allocate descs here.
 617		 * We can therefore assume that if irq_find_mapping() above
 618		 * returns non-zero, then the descs are also successfully
 619		 * allocated.
 620		 */
 621		if (irq_set_msi_desc_off(irq, i, desc)) {
 622			/* TODO: clear */
 623			return -EINVAL;
 624		}
 625	}
 626
 627	desc->nvec_used = nvec;
 628	desc->msi_attrib.multiple = order_base_2(nvec);
 
 
 
 
 
 
 629
 630	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 631	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 632	msg.data = hwirq;
 633
 634	pci_write_msi_msg(irq, &msg);
 
 
 
 
 
 
 
 
 635
 636	return 0;
 637}
 638
 639static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
 
 640{
 641	struct rcar_msi *msi = to_rcar_msi(chip);
 642	struct irq_data *d = irq_get_irq_data(irq);
 643
 644	rcar_msi_free(msi, d->hwirq);
 645}
 646
 647static struct irq_chip rcar_msi_irq_chip = {
 648	.name = "R-Car PCIe MSI",
 649	.irq_enable = pci_msi_unmask_irq,
 650	.irq_disable = pci_msi_mask_irq,
 651	.irq_mask = pci_msi_mask_irq,
 652	.irq_unmask = pci_msi_unmask_irq,
 653};
 654
 655static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
 656			irq_hw_number_t hwirq)
 657{
 658	irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
 659	irq_set_chip_data(irq, domain->host_data);
 660
 661	return 0;
 662}
 663
 664static const struct irq_domain_ops msi_domain_ops = {
 665	.map = rcar_msi_map,
 
 
 
 
 
 
 
 666};
 667
 668static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
 669{
 670	struct rcar_msi *msi = &host->msi;
 671	int i, irq;
 
 
 
 
 
 
 
 
 
 672
 673	for (i = 0; i < INT_PCI_MSI_NR; i++) {
 674		irq = irq_find_mapping(msi->domain, i);
 675		if (irq > 0)
 676			irq_dispose_mapping(irq);
 
 677	}
 678
 679	irq_domain_remove(msi->domain);
 680}
 681
 682static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
 683{
 684	struct rcar_pcie *pcie = &host->pcie;
 685	struct rcar_msi *msi = &host->msi;
 686	unsigned long base;
 687
 688	/* setup MSI data target */
 689	base = virt_to_phys((void *)msi->pages);
 690
 691	rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
 692	rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
 693
 694	/* enable all MSI interrupts */
 695	rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
 696}
 697
 698static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 699{
 700	struct rcar_pcie *pcie = &host->pcie;
 701	struct device *dev = pcie->dev;
 702	struct rcar_msi *msi = &host->msi;
 703	int err, i;
 704
 705	mutex_init(&msi->lock);
 706
 707	msi->chip.dev = dev;
 708	msi->chip.setup_irq = rcar_msi_setup_irq;
 709	msi->chip.setup_irqs = rcar_msi_setup_irqs;
 710	msi->chip.teardown_irq = rcar_msi_teardown_irq;
 711
 712	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
 713					    &msi_domain_ops, &msi->chip);
 714	if (!msi->domain) {
 715		dev_err(dev, "failed to create IRQ domain\n");
 716		return -ENOMEM;
 717	}
 718
 719	for (i = 0; i < INT_PCI_MSI_NR; i++)
 720		irq_create_mapping(msi->domain, i);
 
 721
 722	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 723	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 724			       IRQF_SHARED | IRQF_NO_THREAD,
 725			       rcar_msi_irq_chip.name, host);
 726	if (err < 0) {
 727		dev_err(dev, "failed to request IRQ: %d\n", err);
 728		goto err;
 729	}
 730
 731	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 732			       IRQF_SHARED | IRQF_NO_THREAD,
 733			       rcar_msi_irq_chip.name, host);
 734	if (err < 0) {
 735		dev_err(dev, "failed to request IRQ: %d\n", err);
 736		goto err;
 737	}
 738
 739	/* setup MSI data target */
 740	msi->pages = __get_free_pages(GFP_KERNEL, 0);
 741	rcar_pcie_hw_enable_msi(host);
 
 
 
 
 
 
 742
 743	return 0;
 744
 745err:
 746	rcar_pcie_unmap_msi(host);
 747	return err;
 748}
 749
 750static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 751{
 752	struct rcar_pcie *pcie = &host->pcie;
 753	struct rcar_msi *msi = &host->msi;
 754
 755	/* Disable all MSI interrupts */
 756	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 757
 758	/* Disable address decoding of the MSI interrupt, MSIFE */
 759	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 760
 761	free_pages(msi->pages, 0);
 762
 763	rcar_pcie_unmap_msi(host);
 764}
 765
 766static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 767{
 768	struct rcar_pcie *pcie = &host->pcie;
 769	struct device *dev = pcie->dev;
 770	struct resource res;
 771	int err, i;
 772
 773	host->phy = devm_phy_optional_get(dev, "pcie");
 774	if (IS_ERR(host->phy))
 775		return PTR_ERR(host->phy);
 776
 777	err = of_address_to_resource(dev->of_node, 0, &res);
 778	if (err)
 779		return err;
 780
 781	pcie->base = devm_ioremap_resource(dev, &res);
 782	if (IS_ERR(pcie->base))
 783		return PTR_ERR(pcie->base);
 784
 785	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 786	if (IS_ERR(host->bus_clk)) {
 787		dev_err(dev, "cannot get pcie bus clock\n");
 788		return PTR_ERR(host->bus_clk);
 789	}
 790
 791	i = irq_of_parse_and_map(dev->of_node, 0);
 792	if (!i) {
 793		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 794		err = -ENOENT;
 795		goto err_irq1;
 796	}
 797	host->msi.irq1 = i;
 798
 799	i = irq_of_parse_and_map(dev->of_node, 1);
 800	if (!i) {
 801		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 802		err = -ENOENT;
 803		goto err_irq2;
 804	}
 805	host->msi.irq2 = i;
 806
 807	return 0;
 808
 809err_irq2:
 810	irq_dispose_mapping(host->msi.irq1);
 811err_irq1:
 812	return err;
 813}
 814
 815static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 816				    struct resource_entry *entry,
 817				    int *index)
 818{
 819	u64 restype = entry->res->flags;
 820	u64 cpu_addr = entry->res->start;
 821	u64 cpu_end = entry->res->end;
 822	u64 pci_addr = entry->res->start - entry->offset;
 823	u32 flags = LAM_64BIT | LAR_ENABLE;
 824	u64 mask;
 825	u64 size = resource_size(entry->res);
 826	int idx = *index;
 827
 828	if (restype & IORESOURCE_PREFETCH)
 829		flags |= LAM_PREFETCH;
 830
 831	while (cpu_addr < cpu_end) {
 832		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 833			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 834			return -EINVAL;
 835		}
 836		/*
 837		 * If the size of the range is larger than the alignment of
 838		 * the start address, we have to use multiple entries to
 839		 * perform the mapping.
 840		 */
 841		if (cpu_addr > 0) {
 842			unsigned long nr_zeros = __ffs64(cpu_addr);
 843			u64 alignment = 1ULL << nr_zeros;
 844
 845			size = min(size, alignment);
 846		}
 847		/* Hardware supports max 4GiB inbound region */
 848		size = min(size, 1ULL << 32);
 849
 850		mask = roundup_pow_of_two(size) - 1;
 851		mask &= ~0xf;
 852
 853		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 854				      lower_32_bits(mask) | flags, idx, true);
 855
 856		pci_addr += size;
 857		cpu_addr += size;
 858		idx += 2;
 859	}
 860	*index = idx;
 861
 862	return 0;
 863}
 864
 865static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 866{
 867	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 868	struct resource_entry *entry;
 869	int index = 0, err = 0;
 870
 871	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 872		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 873		if (err)
 874			break;
 875	}
 876
 877	return err;
 878}
 879
 880static const struct of_device_id rcar_pcie_of_match[] = {
 881	{ .compatible = "renesas,pcie-r8a7779",
 882	  .data = rcar_pcie_phy_init_h1 },
 883	{ .compatible = "renesas,pcie-r8a7790",
 884	  .data = rcar_pcie_phy_init_gen2 },
 885	{ .compatible = "renesas,pcie-r8a7791",
 886	  .data = rcar_pcie_phy_init_gen2 },
 887	{ .compatible = "renesas,pcie-rcar-gen2",
 888	  .data = rcar_pcie_phy_init_gen2 },
 889	{ .compatible = "renesas,pcie-r8a7795",
 890	  .data = rcar_pcie_phy_init_gen3 },
 891	{ .compatible = "renesas,pcie-rcar-gen3",
 892	  .data = rcar_pcie_phy_init_gen3 },
 893	{},
 894};
 895
 896static int rcar_pcie_probe(struct platform_device *pdev)
 897{
 898	struct device *dev = &pdev->dev;
 899	struct rcar_pcie_host *host;
 900	struct rcar_pcie *pcie;
 901	u32 data;
 902	int err;
 903	struct pci_host_bridge *bridge;
 904
 905	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 906	if (!bridge)
 907		return -ENOMEM;
 908
 909	host = pci_host_bridge_priv(bridge);
 910	pcie = &host->pcie;
 911	pcie->dev = dev;
 912	platform_set_drvdata(pdev, host);
 913
 914	pm_runtime_enable(pcie->dev);
 915	err = pm_runtime_get_sync(pcie->dev);
 916	if (err < 0) {
 917		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 918		goto err_pm_put;
 919	}
 920
 921	err = rcar_pcie_get_resources(host);
 922	if (err < 0) {
 923		dev_err(dev, "failed to request resources: %d\n", err);
 924		goto err_pm_put;
 925	}
 926
 927	err = clk_prepare_enable(host->bus_clk);
 928	if (err) {
 929		dev_err(dev, "failed to enable bus clock: %d\n", err);
 930		goto err_unmap_msi_irqs;
 931	}
 932
 933	err = rcar_pcie_parse_map_dma_ranges(host);
 934	if (err)
 935		goto err_clk_disable;
 936
 937	host->phy_init_fn = of_device_get_match_data(dev);
 938	err = host->phy_init_fn(host);
 939	if (err) {
 940		dev_err(dev, "failed to init PCIe PHY\n");
 941		goto err_clk_disable;
 942	}
 943
 944	/* Failure to get a link might just be that no cards are inserted */
 945	if (rcar_pcie_hw_init(pcie)) {
 946		dev_info(dev, "PCIe link down\n");
 947		err = -ENODEV;
 948		goto err_phy_shutdown;
 949	}
 950
 951	data = rcar_pci_read_reg(pcie, MACSR);
 952	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 953
 954	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 955		err = rcar_pcie_enable_msi(host);
 956		if (err < 0) {
 957			dev_err(dev,
 958				"failed to enable MSI support: %d\n",
 959				err);
 960			goto err_phy_shutdown;
 961		}
 962	}
 963
 964	err = rcar_pcie_enable(host);
 965	if (err)
 966		goto err_msi_teardown;
 967
 968	return 0;
 969
 970err_msi_teardown:
 971	if (IS_ENABLED(CONFIG_PCI_MSI))
 972		rcar_pcie_teardown_msi(host);
 973
 974err_phy_shutdown:
 975	if (host->phy) {
 976		phy_power_off(host->phy);
 977		phy_exit(host->phy);
 978	}
 979
 980err_clk_disable:
 981	clk_disable_unprepare(host->bus_clk);
 982
 983err_unmap_msi_irqs:
 984	irq_dispose_mapping(host->msi.irq2);
 985	irq_dispose_mapping(host->msi.irq1);
 986
 987err_pm_put:
 988	pm_runtime_put(dev);
 989	pm_runtime_disable(dev);
 990
 991	return err;
 992}
 993
 994static int __maybe_unused rcar_pcie_resume(struct device *dev)
 995{
 996	struct rcar_pcie_host *host = dev_get_drvdata(dev);
 997	struct rcar_pcie *pcie = &host->pcie;
 998	unsigned int data;
 999	int err;
1000
1001	err = rcar_pcie_parse_map_dma_ranges(host);
1002	if (err)
1003		return 0;
1004
1005	/* Failure to get a link might just be that no cards are inserted */
1006	err = host->phy_init_fn(host);
1007	if (err) {
1008		dev_info(dev, "PCIe link down\n");
1009		return 0;
1010	}
1011
1012	data = rcar_pci_read_reg(pcie, MACSR);
1013	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1014
1015	/* Enable MSI */
1016	if (IS_ENABLED(CONFIG_PCI_MSI))
1017		rcar_pcie_hw_enable_msi(host);
 
 
 
 
 
 
 
 
 
1018
1019	rcar_pcie_hw_enable(host);
1020
1021	return 0;
1022}
1023
1024static int rcar_pcie_resume_noirq(struct device *dev)
1025{
1026	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1027	struct rcar_pcie *pcie = &host->pcie;
1028
1029	if (rcar_pci_read_reg(pcie, PMSR) &&
1030	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1031		return 0;
1032
1033	/* Re-establish the PCIe link */
1034	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1035	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1036	return rcar_pcie_wait_for_dl(pcie);
1037}
1038
1039static const struct dev_pm_ops rcar_pcie_pm_ops = {
1040	SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1041	.resume_noirq = rcar_pcie_resume_noirq,
1042};
1043
1044static struct platform_driver rcar_pcie_driver = {
1045	.driver = {
1046		.name = "rcar-pcie",
1047		.of_match_table = rcar_pcie_of_match,
1048		.pm = &rcar_pcie_pm_ops,
1049		.suppress_bind_attrs = true,
1050	},
1051	.probe = rcar_pcie_probe,
1052};
1053builtin_platform_driver(rcar_pcie_driver);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
  16#include <linux/delay.h>
  17#include <linux/interrupt.h>
  18#include <linux/irq.h>
  19#include <linux/irqdomain.h>
  20#include <linux/kernel.h>
  21#include <linux/init.h>
  22#include <linux/msi.h>
  23#include <linux/of_address.h>
  24#include <linux/of_irq.h>
  25#include <linux/of_pci.h>
  26#include <linux/of_platform.h>
  27#include <linux/pci.h>
  28#include <linux/phy/phy.h>
  29#include <linux/platform_device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/slab.h>
  32
  33#include "pcie-rcar.h"
  34
  35struct rcar_msi {
  36	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  37	struct irq_domain *domain;
  38	struct mutex map_lock;
  39	spinlock_t mask_lock;
 
  40	int irq1;
  41	int irq2;
  42};
  43
 
 
 
 
 
  44/* Structure representing the PCIe interface */
  45struct rcar_pcie_host {
  46	struct rcar_pcie	pcie;
 
  47	struct phy		*phy;
 
  48	struct clk		*bus_clk;
  49	struct			rcar_msi msi;
  50	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  51};
  52
  53static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
  54{
  55	return container_of(msi, struct rcar_pcie_host, msi);
  56}
  57
  58static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  59{
  60	unsigned int shift = BITS_PER_BYTE * (where & 3);
  61	u32 val = rcar_pci_read_reg(pcie, where & ~3);
  62
  63	return val >> shift;
  64}
  65
  66/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  67static int rcar_pcie_config_access(struct rcar_pcie_host *host,
  68		unsigned char access_type, struct pci_bus *bus,
  69		unsigned int devfn, int where, u32 *data)
  70{
  71	struct rcar_pcie *pcie = &host->pcie;
  72	unsigned int dev, func, reg, index;
  73
  74	dev = PCI_SLOT(devfn);
  75	func = PCI_FUNC(devfn);
  76	reg = where & ~3;
  77	index = reg / 4;
  78
  79	/*
  80	 * While each channel has its own memory-mapped extended config
  81	 * space, it's generally only accessible when in endpoint mode.
  82	 * When in root complex mode, the controller is unable to target
  83	 * itself with either type 0 or type 1 accesses, and indeed, any
  84	 * controller initiated target transfer to its own config space
  85	 * result in a completer abort.
  86	 *
  87	 * Each channel effectively only supports a single device, but as
  88	 * the same channel <-> device access works for any PCI_SLOT()
  89	 * value, we cheat a bit here and bind the controller's config
  90	 * space to devfn 0 in order to enable self-enumeration. In this
  91	 * case the regular ECAR/ECDR path is sidelined and the mangled
  92	 * config access itself is initiated as an internal bus transaction.
  93	 */
  94	if (pci_is_root_bus(bus)) {
  95		if (dev != 0)
  96			return PCIBIOS_DEVICE_NOT_FOUND;
  97
  98		if (access_type == RCAR_PCI_ACCESS_READ)
  99			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 100		else
 101			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 102
 103		return PCIBIOS_SUCCESSFUL;
 104	}
 105
 106	/* Clear errors */
 107	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 108
 109	/* Set the PIO address */
 110	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 111		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 112
 113	/* Enable the configuration access */
 114	if (pci_is_root_bus(bus->parent))
 115		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
 116	else
 117		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 118
 119	/* Check for errors */
 120	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 121		return PCIBIOS_DEVICE_NOT_FOUND;
 122
 123	/* Check for master and target aborts */
 124	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 125		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 126		return PCIBIOS_DEVICE_NOT_FOUND;
 127
 128	if (access_type == RCAR_PCI_ACCESS_READ)
 129		*data = rcar_pci_read_reg(pcie, PCIECDR);
 130	else
 131		rcar_pci_write_reg(pcie, *data, PCIECDR);
 132
 133	/* Disable the configuration access */
 134	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 135
 136	return PCIBIOS_SUCCESSFUL;
 137}
 138
 139static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 140			       int where, int size, u32 *val)
 141{
 142	struct rcar_pcie_host *host = bus->sysdata;
 143	int ret;
 144
 145	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 146				      bus, devfn, where, val);
 147	if (ret != PCIBIOS_SUCCESSFUL) {
 148		*val = 0xffffffff;
 149		return ret;
 150	}
 151
 152	if (size == 1)
 153		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 154	else if (size == 2)
 155		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 156
 157	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 158		bus->number, devfn, where, size, *val);
 159
 160	return ret;
 161}
 162
 163/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 164static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 165				int where, int size, u32 val)
 166{
 167	struct rcar_pcie_host *host = bus->sysdata;
 168	unsigned int shift;
 169	u32 data;
 170	int ret;
 171
 172	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 173				      bus, devfn, where, &data);
 174	if (ret != PCIBIOS_SUCCESSFUL)
 175		return ret;
 176
 177	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 178		bus->number, devfn, where, size, val);
 179
 180	if (size == 1) {
 181		shift = BITS_PER_BYTE * (where & 3);
 182		data &= ~(0xff << shift);
 183		data |= ((val & 0xff) << shift);
 184	} else if (size == 2) {
 185		shift = BITS_PER_BYTE * (where & 2);
 186		data &= ~(0xffff << shift);
 187		data |= ((val & 0xffff) << shift);
 188	} else
 189		data = val;
 190
 191	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 192				      bus, devfn, where, &data);
 193
 194	return ret;
 195}
 196
 197static struct pci_ops rcar_pcie_ops = {
 198	.read	= rcar_pcie_read_conf,
 199	.write	= rcar_pcie_write_conf,
 200};
 201
 202static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 203{
 204	struct device *dev = pcie->dev;
 205	unsigned int timeout = 1000;
 206	u32 macsr;
 207
 208	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 209		return;
 210
 211	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 212		dev_err(dev, "Speed change already in progress\n");
 213		return;
 214	}
 215
 216	macsr = rcar_pci_read_reg(pcie, MACSR);
 217	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 218		goto done;
 219
 220	/* Set target link speed to 5.0 GT/s */
 221	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 222		   PCI_EXP_LNKSTA_CLS_5_0GB);
 223
 224	/* Set speed change reason as intentional factor */
 225	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 226
 227	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 228	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 229		rcar_pci_write_reg(pcie, macsr, MACSR);
 230
 231	/* Start link speed change */
 232	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 233
 234	while (timeout--) {
 235		macsr = rcar_pci_read_reg(pcie, MACSR);
 236		if (macsr & SPCHGFIN) {
 237			/* Clear the interrupt bits */
 238			rcar_pci_write_reg(pcie, macsr, MACSR);
 239
 240			if (macsr & SPCHGFAIL)
 241				dev_err(dev, "Speed change failed\n");
 242
 243			goto done;
 244		}
 245
 246		msleep(1);
 247	}
 248
 249	dev_err(dev, "Speed change timed out\n");
 250
 251done:
 252	dev_info(dev, "Current link speed is %s GT/s\n",
 253		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 254}
 255
 256static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 257{
 258	struct rcar_pcie *pcie = &host->pcie;
 259	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 260	struct resource_entry *win;
 261	LIST_HEAD(res);
 262	int i = 0;
 263
 264	/* Try setting 5 GT/s link speed */
 265	rcar_pcie_force_speedup(pcie);
 266
 267	/* Setup PCI resources */
 268	resource_list_for_each_entry(win, &bridge->windows) {
 269		struct resource *res = win->res;
 270
 271		if (!res->flags)
 272			continue;
 273
 274		switch (resource_type(res)) {
 275		case IORESOURCE_IO:
 276		case IORESOURCE_MEM:
 277			rcar_pcie_set_outbound(pcie, i, win);
 278			i++;
 279			break;
 280		}
 281	}
 282}
 283
 284static int rcar_pcie_enable(struct rcar_pcie_host *host)
 285{
 286	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 287
 288	rcar_pcie_hw_enable(host);
 289
 290	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 291
 292	bridge->sysdata = host;
 293	bridge->ops = &rcar_pcie_ops;
 
 
 294
 295	return pci_host_probe(bridge);
 296}
 297
 298static int phy_wait_for_ack(struct rcar_pcie *pcie)
 299{
 300	struct device *dev = pcie->dev;
 301	unsigned int timeout = 100;
 302
 303	while (timeout--) {
 304		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 305			return 0;
 306
 307		udelay(100);
 308	}
 309
 310	dev_err(dev, "Access to PCIe phy timed out\n");
 311
 312	return -ETIMEDOUT;
 313}
 314
 315static void phy_write_reg(struct rcar_pcie *pcie,
 316			  unsigned int rate, u32 addr,
 317			  unsigned int lane, u32 data)
 318{
 319	u32 phyaddr;
 320
 321	phyaddr = WRITE_CMD |
 322		((rate & 1) << RATE_POS) |
 323		((lane & 0xf) << LANE_POS) |
 324		((addr & 0xff) << ADR_POS);
 325
 326	/* Set write data */
 327	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 328	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 329
 330	/* Ignore errors as they will be dealt with if the data link is down */
 331	phy_wait_for_ack(pcie);
 332
 333	/* Clear command */
 334	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 335	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 336
 337	/* Ignore errors as they will be dealt with if the data link is down */
 338	phy_wait_for_ack(pcie);
 339}
 340
 341static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 342{
 343	int err;
 344
 345	/* Begin initialization */
 346	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 347
 348	/* Set mode */
 349	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 350
 351	err = rcar_pcie_wait_for_phyrdy(pcie);
 352	if (err)
 353		return err;
 354
 355	/*
 356	 * Initial header for port config space is type 1, set the device
 357	 * class to match. Hardware takes care of propagating the IDSETR
 358	 * settings, so there is no need to bother with a quirk.
 359	 */
 360	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
 361
 362	/*
 363	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 364	 * they aren't used, to avoid bridge being detected as broken.
 365	 */
 366	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 367	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 368
 369	/* Initialize default capabilities. */
 370	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 371	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 372		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 373	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
 374		PCI_HEADER_TYPE_BRIDGE);
 375
 376	/* Enable data link layer active state reporting */
 377	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 378		PCI_EXP_LNKCAP_DLLLARC);
 379
 380	/* Write out the physical slot number = 0 */
 381	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 382
 383	/* Set the completion timer timeout to the maximum 50ms. */
 384	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 385
 386	/* Terminate list of capabilities (Next Capability Offset=0) */
 387	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 388
 389	/* Enable MSI */
 390	if (IS_ENABLED(CONFIG_PCI_MSI))
 391		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 392
 393	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 394
 395	/* Finish initialization - establish a PCI Express link */
 396	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 397
 398	/* This will timeout if we don't have a link. */
 399	err = rcar_pcie_wait_for_dl(pcie);
 400	if (err)
 401		return err;
 402
 403	/* Enable INTx interrupts */
 404	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 405
 406	wmb();
 407
 408	return 0;
 409}
 410
 411static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 412{
 413	struct rcar_pcie *pcie = &host->pcie;
 414
 415	/* Initialize the phy */
 416	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 417	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 418	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 419	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 420	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 421	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 422	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 423	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 424	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 425	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 426	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 427	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 428
 429	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 430	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 431	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 432
 433	return 0;
 434}
 435
 436static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 437{
 438	struct rcar_pcie *pcie = &host->pcie;
 439
 440	/*
 441	 * These settings come from the R-Car Series, 2nd Generation User's
 442	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 443	 */
 444	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 445	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 446	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 447	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 448
 449	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 450	/* The following value is for DC connection, no termination resistor */
 451	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 452	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 453	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 454
 455	return 0;
 456}
 457
 458static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 459{
 460	int err;
 461
 462	err = phy_init(host->phy);
 463	if (err)
 464		return err;
 465
 466	err = phy_power_on(host->phy);
 467	if (err)
 468		phy_exit(host->phy);
 469
 470	return err;
 471}
 472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 474{
 475	struct rcar_pcie_host *host = data;
 476	struct rcar_pcie *pcie = &host->pcie;
 477	struct rcar_msi *msi = &host->msi;
 478	struct device *dev = pcie->dev;
 479	unsigned long reg;
 480
 481	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 482
 483	/* MSI & INTx share an interrupt - we only handle MSI here */
 484	if (!reg)
 485		return IRQ_NONE;
 486
 487	while (reg) {
 488		unsigned int index = find_first_bit(&reg, 32);
 489		unsigned int msi_irq;
 490
 491		msi_irq = irq_find_mapping(msi->domain->parent, index);
 
 
 
 492		if (msi_irq) {
 493			generic_handle_irq(msi_irq);
 
 
 
 494		} else {
 495			/* Unknown MSI, just clear it */
 496			dev_dbg(dev, "unexpected MSI\n");
 497			rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
 498		}
 499
 500		/* see if there's any more pending in this vector */
 501		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 502	}
 503
 504	return IRQ_HANDLED;
 505}
 506
 507static void rcar_msi_top_irq_ack(struct irq_data *d)
 
 508{
 509	irq_chip_ack_parent(d);
 510}
 
 
 
 
 
 511
 512static void rcar_msi_top_irq_mask(struct irq_data *d)
 513{
 514	pci_msi_mask_irq(d);
 515	irq_chip_mask_parent(d);
 516}
 517
 518static void rcar_msi_top_irq_unmask(struct irq_data *d)
 519{
 520	pci_msi_unmask_irq(d);
 521	irq_chip_unmask_parent(d);
 522}
 523
 524static struct irq_chip rcar_msi_top_chip = {
 525	.name		= "PCIe MSI",
 526	.irq_ack	= rcar_msi_top_irq_ack,
 527	.irq_mask	= rcar_msi_top_irq_mask,
 528	.irq_unmask	= rcar_msi_top_irq_unmask,
 529};
 530
 531static void rcar_msi_irq_ack(struct irq_data *d)
 532{
 533	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 534	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 535
 536	/* clear the interrupt */
 537	rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
 538}
 539
 540static void rcar_msi_irq_mask(struct irq_data *d)
 541{
 542	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 543	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 544	unsigned long flags;
 545	u32 value;
 546
 547	spin_lock_irqsave(&msi->mask_lock, flags);
 548	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 549	value &= ~BIT(d->hwirq);
 550	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 551	spin_unlock_irqrestore(&msi->mask_lock, flags);
 552}
 553
 554static void rcar_msi_irq_unmask(struct irq_data *d)
 
 555{
 556	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 557	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 558	unsigned long flags;
 559	u32 value;
 
 
 
 
 
 560
 561	spin_lock_irqsave(&msi->mask_lock, flags);
 562	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 563	value |= BIT(d->hwirq);
 564	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 565	spin_unlock_irqrestore(&msi->mask_lock, flags);
 566}
 567
 568static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
 569{
 570	return -EINVAL;
 571}
 572
 573static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 574{
 575	struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
 576	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 577
 578	msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 579	msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 580	msg->data = data->hwirq;
 581}
 582
 583static struct irq_chip rcar_msi_bottom_chip = {
 584	.name			= "Rcar MSI",
 585	.irq_ack		= rcar_msi_irq_ack,
 586	.irq_mask		= rcar_msi_irq_mask,
 587	.irq_unmask		= rcar_msi_irq_unmask,
 588	.irq_set_affinity 	= rcar_msi_set_affinity,
 589	.irq_compose_msi_msg	= rcar_compose_msi_msg,
 590};
 
 
 
 
 
 591
 592static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
 593				  unsigned int nr_irqs, void *args)
 594{
 595	struct rcar_msi *msi = domain->host_data;
 596	unsigned int i;
 597	int hwirq;
 598
 599	mutex_lock(&msi->map_lock);
 600
 601	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 
 
 602
 603	mutex_unlock(&msi->map_lock);
 604
 605	if (hwirq < 0)
 606		return -ENOSPC;
 607
 608	for (i = 0; i < nr_irqs; i++)
 609		irq_domain_set_info(domain, virq + i, hwirq + i,
 610				    &rcar_msi_bottom_chip, domain->host_data,
 611				    handle_edge_irq, NULL, NULL);
 612
 613	return 0;
 614}
 615
 616static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
 617				  unsigned int nr_irqs)
 618{
 619	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 620	struct rcar_msi *msi = domain->host_data;
 621
 622	mutex_lock(&msi->map_lock);
 
 
 
 
 
 
 
 
 
 623
 624	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
 
 
 
 
 625
 626	mutex_unlock(&msi->map_lock);
 627}
 628
 629static const struct irq_domain_ops rcar_msi_domain_ops = {
 630	.alloc	= rcar_msi_domain_alloc,
 631	.free	= rcar_msi_domain_free,
 632};
 633
 634static struct msi_domain_info rcar_msi_info = {
 635	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 636		   MSI_FLAG_MULTI_PCI_MSI),
 637	.chip	= &rcar_msi_top_chip,
 638};
 639
 640static int rcar_allocate_domains(struct rcar_msi *msi)
 641{
 642	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 643	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
 644	struct irq_domain *parent;
 645
 646	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
 647					  &rcar_msi_domain_ops, msi);
 648	if (!parent) {
 649		dev_err(pcie->dev, "failed to create IRQ domain\n");
 650		return -ENOMEM;
 651	}
 652	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
 653
 654	msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
 655	if (!msi->domain) {
 656		dev_err(pcie->dev, "failed to create MSI domain\n");
 657		irq_domain_remove(parent);
 658		return -ENOMEM;
 659	}
 660
 661	return 0;
 662}
 663
 664static void rcar_free_domains(struct rcar_msi *msi)
 665{
 666	struct irq_domain *parent = msi->domain->parent;
 
 
 667
 668	irq_domain_remove(msi->domain);
 669	irq_domain_remove(parent);
 
 
 
 
 
 
 670}
 671
 672static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 673{
 674	struct rcar_pcie *pcie = &host->pcie;
 675	struct device *dev = pcie->dev;
 676	struct rcar_msi *msi = &host->msi;
 677	struct resource res;
 678	int err;
 
 679
 680	mutex_init(&msi->map_lock);
 681	spin_lock_init(&msi->mask_lock);
 
 
 682
 683	err = of_address_to_resource(dev->of_node, 0, &res);
 684	if (err)
 685		return err;
 
 
 
 686
 687	err = rcar_allocate_domains(msi);
 688	if (err)
 689		return err;
 690
 691	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 692	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 693			       IRQF_SHARED | IRQF_NO_THREAD,
 694			       rcar_msi_bottom_chip.name, host);
 695	if (err < 0) {
 696		dev_err(dev, "failed to request IRQ: %d\n", err);
 697		goto err;
 698	}
 699
 700	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 701			       IRQF_SHARED | IRQF_NO_THREAD,
 702			       rcar_msi_bottom_chip.name, host);
 703	if (err < 0) {
 704		dev_err(dev, "failed to request IRQ: %d\n", err);
 705		goto err;
 706	}
 707
 708	/* disable all MSIs */
 709	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 710
 711	/*
 712	 * Setup MSI data target using RC base address address, which
 713	 * is guaranteed to be in the low 32bit range on any RCar HW.
 714	 */
 715	rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 716	rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 717
 718	return 0;
 719
 720err:
 721	rcar_free_domains(msi);
 722	return err;
 723}
 724
 725static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 726{
 727	struct rcar_pcie *pcie = &host->pcie;
 
 728
 729	/* Disable all MSI interrupts */
 730	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 731
 732	/* Disable address decoding of the MSI interrupt, MSIFE */
 733	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 734
 735	rcar_free_domains(&host->msi);
 
 
 736}
 737
 738static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 739{
 740	struct rcar_pcie *pcie = &host->pcie;
 741	struct device *dev = pcie->dev;
 742	struct resource res;
 743	int err, i;
 744
 745	host->phy = devm_phy_optional_get(dev, "pcie");
 746	if (IS_ERR(host->phy))
 747		return PTR_ERR(host->phy);
 748
 749	err = of_address_to_resource(dev->of_node, 0, &res);
 750	if (err)
 751		return err;
 752
 753	pcie->base = devm_ioremap_resource(dev, &res);
 754	if (IS_ERR(pcie->base))
 755		return PTR_ERR(pcie->base);
 756
 757	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 758	if (IS_ERR(host->bus_clk)) {
 759		dev_err(dev, "cannot get pcie bus clock\n");
 760		return PTR_ERR(host->bus_clk);
 761	}
 762
 763	i = irq_of_parse_and_map(dev->of_node, 0);
 764	if (!i) {
 765		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 766		err = -ENOENT;
 767		goto err_irq1;
 768	}
 769	host->msi.irq1 = i;
 770
 771	i = irq_of_parse_and_map(dev->of_node, 1);
 772	if (!i) {
 773		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 774		err = -ENOENT;
 775		goto err_irq2;
 776	}
 777	host->msi.irq2 = i;
 778
 779	return 0;
 780
 781err_irq2:
 782	irq_dispose_mapping(host->msi.irq1);
 783err_irq1:
 784	return err;
 785}
 786
 787static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 788				    struct resource_entry *entry,
 789				    int *index)
 790{
 791	u64 restype = entry->res->flags;
 792	u64 cpu_addr = entry->res->start;
 793	u64 cpu_end = entry->res->end;
 794	u64 pci_addr = entry->res->start - entry->offset;
 795	u32 flags = LAM_64BIT | LAR_ENABLE;
 796	u64 mask;
 797	u64 size = resource_size(entry->res);
 798	int idx = *index;
 799
 800	if (restype & IORESOURCE_PREFETCH)
 801		flags |= LAM_PREFETCH;
 802
 803	while (cpu_addr < cpu_end) {
 804		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 805			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 806			return -EINVAL;
 807		}
 808		/*
 809		 * If the size of the range is larger than the alignment of
 810		 * the start address, we have to use multiple entries to
 811		 * perform the mapping.
 812		 */
 813		if (cpu_addr > 0) {
 814			unsigned long nr_zeros = __ffs64(cpu_addr);
 815			u64 alignment = 1ULL << nr_zeros;
 816
 817			size = min(size, alignment);
 818		}
 819		/* Hardware supports max 4GiB inbound region */
 820		size = min(size, 1ULL << 32);
 821
 822		mask = roundup_pow_of_two(size) - 1;
 823		mask &= ~0xf;
 824
 825		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 826				      lower_32_bits(mask) | flags, idx, true);
 827
 828		pci_addr += size;
 829		cpu_addr += size;
 830		idx += 2;
 831	}
 832	*index = idx;
 833
 834	return 0;
 835}
 836
 837static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 838{
 839	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 840	struct resource_entry *entry;
 841	int index = 0, err = 0;
 842
 843	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 844		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 845		if (err)
 846			break;
 847	}
 848
 849	return err;
 850}
 851
 852static const struct of_device_id rcar_pcie_of_match[] = {
 853	{ .compatible = "renesas,pcie-r8a7779",
 854	  .data = rcar_pcie_phy_init_h1 },
 855	{ .compatible = "renesas,pcie-r8a7790",
 856	  .data = rcar_pcie_phy_init_gen2 },
 857	{ .compatible = "renesas,pcie-r8a7791",
 858	  .data = rcar_pcie_phy_init_gen2 },
 859	{ .compatible = "renesas,pcie-rcar-gen2",
 860	  .data = rcar_pcie_phy_init_gen2 },
 861	{ .compatible = "renesas,pcie-r8a7795",
 862	  .data = rcar_pcie_phy_init_gen3 },
 863	{ .compatible = "renesas,pcie-rcar-gen3",
 864	  .data = rcar_pcie_phy_init_gen3 },
 865	{},
 866};
 867
 868static int rcar_pcie_probe(struct platform_device *pdev)
 869{
 870	struct device *dev = &pdev->dev;
 871	struct rcar_pcie_host *host;
 872	struct rcar_pcie *pcie;
 873	u32 data;
 874	int err;
 875	struct pci_host_bridge *bridge;
 876
 877	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 878	if (!bridge)
 879		return -ENOMEM;
 880
 881	host = pci_host_bridge_priv(bridge);
 882	pcie = &host->pcie;
 883	pcie->dev = dev;
 884	platform_set_drvdata(pdev, host);
 885
 886	pm_runtime_enable(pcie->dev);
 887	err = pm_runtime_get_sync(pcie->dev);
 888	if (err < 0) {
 889		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 890		goto err_pm_put;
 891	}
 892
 893	err = rcar_pcie_get_resources(host);
 894	if (err < 0) {
 895		dev_err(dev, "failed to request resources: %d\n", err);
 896		goto err_pm_put;
 897	}
 898
 899	err = clk_prepare_enable(host->bus_clk);
 900	if (err) {
 901		dev_err(dev, "failed to enable bus clock: %d\n", err);
 902		goto err_unmap_msi_irqs;
 903	}
 904
 905	err = rcar_pcie_parse_map_dma_ranges(host);
 906	if (err)
 907		goto err_clk_disable;
 908
 909	host->phy_init_fn = of_device_get_match_data(dev);
 910	err = host->phy_init_fn(host);
 911	if (err) {
 912		dev_err(dev, "failed to init PCIe PHY\n");
 913		goto err_clk_disable;
 914	}
 915
 916	/* Failure to get a link might just be that no cards are inserted */
 917	if (rcar_pcie_hw_init(pcie)) {
 918		dev_info(dev, "PCIe link down\n");
 919		err = -ENODEV;
 920		goto err_phy_shutdown;
 921	}
 922
 923	data = rcar_pci_read_reg(pcie, MACSR);
 924	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 925
 926	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 927		err = rcar_pcie_enable_msi(host);
 928		if (err < 0) {
 929			dev_err(dev,
 930				"failed to enable MSI support: %d\n",
 931				err);
 932			goto err_phy_shutdown;
 933		}
 934	}
 935
 936	err = rcar_pcie_enable(host);
 937	if (err)
 938		goto err_msi_teardown;
 939
 940	return 0;
 941
 942err_msi_teardown:
 943	if (IS_ENABLED(CONFIG_PCI_MSI))
 944		rcar_pcie_teardown_msi(host);
 945
 946err_phy_shutdown:
 947	if (host->phy) {
 948		phy_power_off(host->phy);
 949		phy_exit(host->phy);
 950	}
 951
 952err_clk_disable:
 953	clk_disable_unprepare(host->bus_clk);
 954
 955err_unmap_msi_irqs:
 956	irq_dispose_mapping(host->msi.irq2);
 957	irq_dispose_mapping(host->msi.irq1);
 958
 959err_pm_put:
 960	pm_runtime_put(dev);
 961	pm_runtime_disable(dev);
 962
 963	return err;
 964}
 965
 966static int __maybe_unused rcar_pcie_resume(struct device *dev)
 967{
 968	struct rcar_pcie_host *host = dev_get_drvdata(dev);
 969	struct rcar_pcie *pcie = &host->pcie;
 970	unsigned int data;
 971	int err;
 972
 973	err = rcar_pcie_parse_map_dma_ranges(host);
 974	if (err)
 975		return 0;
 976
 977	/* Failure to get a link might just be that no cards are inserted */
 978	err = host->phy_init_fn(host);
 979	if (err) {
 980		dev_info(dev, "PCIe link down\n");
 981		return 0;
 982	}
 983
 984	data = rcar_pci_read_reg(pcie, MACSR);
 985	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 986
 987	/* Enable MSI */
 988	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 989		struct resource res;
 990		u32 val;
 991
 992		of_address_to_resource(dev->of_node, 0, &res);
 993		rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 994		rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 995
 996		bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
 997		rcar_pci_write_reg(pcie, val, PCIEMSIIER);
 998	}
 999
1000	rcar_pcie_hw_enable(host);
1001
1002	return 0;
1003}
1004
1005static int rcar_pcie_resume_noirq(struct device *dev)
1006{
1007	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1008	struct rcar_pcie *pcie = &host->pcie;
1009
1010	if (rcar_pci_read_reg(pcie, PMSR) &&
1011	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1012		return 0;
1013
1014	/* Re-establish the PCIe link */
1015	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1016	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1017	return rcar_pcie_wait_for_dl(pcie);
1018}
1019
1020static const struct dev_pm_ops rcar_pcie_pm_ops = {
1021	SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1022	.resume_noirq = rcar_pcie_resume_noirq,
1023};
1024
1025static struct platform_driver rcar_pcie_driver = {
1026	.driver = {
1027		.name = "rcar-pcie",
1028		.of_match_table = rcar_pcie_of_match,
1029		.pm = &rcar_pcie_pm_ops,
1030		.suppress_bind_attrs = true,
1031	},
1032	.probe = rcar_pcie_probe,
1033};
1034builtin_platform_driver(rcar_pcie_driver);