Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
  16#include <linux/clk-provider.h>
  17#include <linux/delay.h>
  18#include <linux/interrupt.h>
  19#include <linux/irq.h>
  20#include <linux/irqdomain.h>
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/iopoll.h>
  24#include <linux/msi.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
  27#include <linux/of_platform.h>
  28#include <linux/pci.h>
  29#include <linux/phy/phy.h>
  30#include <linux/platform_device.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/regulator/consumer.h>
  33
  34#include "pcie-rcar.h"
  35
  36struct rcar_msi {
  37	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  38	struct irq_domain *domain;
  39	struct mutex map_lock;
  40	spinlock_t mask_lock;
  41	int irq1;
  42	int irq2;
  43};
  44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45/* Structure representing the PCIe interface */
  46struct rcar_pcie_host {
  47	struct rcar_pcie	pcie;
  48	struct phy		*phy;
  49	struct clk		*bus_clk;
  50	struct			rcar_msi msi;
  51	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  52};
  53
  54static DEFINE_SPINLOCK(pmsr_lock);
  55
  56static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
  57{
  58	unsigned long flags;
  59	u32 pmsr, val;
  60	int ret = 0;
  61
  62	spin_lock_irqsave(&pmsr_lock, flags);
  63
  64	if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
  65		ret = -EINVAL;
  66		goto unlock_exit;
  67	}
  68
  69	pmsr = readl(pcie_base + PMSR);
  70
  71	/*
  72	 * Test if the PCIe controller received PM_ENTER_L1 DLLP and
  73	 * the PCIe controller is not in L1 link state. If true, apply
  74	 * fix, which will put the controller into L1 link state, from
  75	 * which it can return to L0s/L0 on its own.
  76	 */
  77	if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
  78		writel(L1IATN, pcie_base + PMCTLR);
  79		ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
  80						val & L1FAEG, 10, 1000);
  81		WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
  82		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
  83	}
  84
  85unlock_exit:
  86	spin_unlock_irqrestore(&pmsr_lock, flags);
  87	return ret;
  88}
  89
  90static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
  91{
  92	return container_of(msi, struct rcar_pcie_host, msi);
  93}
  94
  95static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  96{
  97	unsigned int shift = BITS_PER_BYTE * (where & 3);
  98	u32 val = rcar_pci_read_reg(pcie, where & ~3);
  99
 100	return val >> shift;
 101}
 102
 103#ifdef CONFIG_ARM
 104#define __rcar_pci_rw_reg_workaround(instr)				\
 105		"	.arch armv7-a\n"				\
 106		"1:	" instr " %1, [%2]\n"				\
 107		"2:	isb\n"						\
 108		"3:	.pushsection .text.fixup,\"ax\"\n"		\
 109		"	.align	2\n"					\
 110		"4:	mov	%0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \
 111		"	b	3b\n"					\
 112		"	.popsection\n"					\
 113		"	.pushsection __ex_table,\"a\"\n"		\
 114		"	.align	3\n"					\
 115		"	.long	1b, 4b\n"				\
 116		"	.long	2b, 4b\n"				\
 117		"	.popsection\n"
 118#endif
 119
 120static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val,
 121					 unsigned int reg)
 122{
 123	int error = PCIBIOS_SUCCESSFUL;
 124#ifdef CONFIG_ARM
 125	asm volatile(
 126		__rcar_pci_rw_reg_workaround("str")
 127	: "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory");
 128#else
 129	rcar_pci_write_reg(pcie, val, reg);
 130#endif
 131	return error;
 132}
 133
 134static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val,
 135					unsigned int reg)
 136{
 137	int error = PCIBIOS_SUCCESSFUL;
 138#ifdef CONFIG_ARM
 139	asm volatile(
 140		__rcar_pci_rw_reg_workaround("ldr")
 141	: "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory");
 142
 143	if (error != PCIBIOS_SUCCESSFUL)
 144		PCI_SET_ERROR_RESPONSE(val);
 145#else
 146	*val = rcar_pci_read_reg(pcie, reg);
 147#endif
 148	return error;
 149}
 150
 151/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 152static int rcar_pcie_config_access(struct rcar_pcie_host *host,
 153		unsigned char access_type, struct pci_bus *bus,
 154		unsigned int devfn, int where, u32 *data)
 155{
 156	struct rcar_pcie *pcie = &host->pcie;
 157	unsigned int dev, func, reg, index;
 158	int ret;
 159
 160	/* Wake the bus up in case it is in L1 state. */
 161	ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
 162	if (ret) {
 163		PCI_SET_ERROR_RESPONSE(data);
 164		return PCIBIOS_SET_FAILED;
 165	}
 166
 167	dev = PCI_SLOT(devfn);
 168	func = PCI_FUNC(devfn);
 169	reg = where & ~3;
 170	index = reg / 4;
 171
 172	/*
 173	 * While each channel has its own memory-mapped extended config
 174	 * space, it's generally only accessible when in endpoint mode.
 175	 * When in root complex mode, the controller is unable to target
 176	 * itself with either type 0 or type 1 accesses, and indeed, any
 177	 * controller initiated target transfer to its own config space
 178	 * result in a completer abort.
 179	 *
 180	 * Each channel effectively only supports a single device, but as
 181	 * the same channel <-> device access works for any PCI_SLOT()
 182	 * value, we cheat a bit here and bind the controller's config
 183	 * space to devfn 0 in order to enable self-enumeration. In this
 184	 * case the regular ECAR/ECDR path is sidelined and the mangled
 185	 * config access itself is initiated as an internal bus transaction.
 186	 */
 187	if (pci_is_root_bus(bus)) {
 188		if (dev != 0)
 189			return PCIBIOS_DEVICE_NOT_FOUND;
 190
 191		if (access_type == RCAR_PCI_ACCESS_READ)
 192			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 193		else
 194			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 195
 196		return PCIBIOS_SUCCESSFUL;
 197	}
 198
 199	/* Clear errors */
 200	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 201
 202	/* Set the PIO address */
 203	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 204		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 205
 206	/* Enable the configuration access */
 207	if (pci_is_root_bus(bus->parent))
 208		rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR);
 209	else
 210		rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR);
 211
 212	/* Check for errors */
 213	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 214		return PCIBIOS_DEVICE_NOT_FOUND;
 215
 216	/* Check for master and target aborts */
 217	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 218		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 219		return PCIBIOS_DEVICE_NOT_FOUND;
 220
 221	if (access_type == RCAR_PCI_ACCESS_READ)
 222		ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR);
 223	else
 224		ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR);
 225
 226	/* Disable the configuration access */
 227	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 228
 229	return ret;
 230}
 231
 232static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 233			       int where, int size, u32 *val)
 234{
 235	struct rcar_pcie_host *host = bus->sysdata;
 236	int ret;
 237
 238	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 239				      bus, devfn, where, val);
 240	if (ret != PCIBIOS_SUCCESSFUL)
 241		return ret;
 242
 243	if (size == 1)
 244		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 245	else if (size == 2)
 246		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 247
 248	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 249		bus->number, devfn, where, size, *val);
 250
 251	return ret;
 252}
 253
 254/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 255static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 256				int where, int size, u32 val)
 257{
 258	struct rcar_pcie_host *host = bus->sysdata;
 259	unsigned int shift;
 260	u32 data;
 261	int ret;
 262
 263	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 264				      bus, devfn, where, &data);
 265	if (ret != PCIBIOS_SUCCESSFUL)
 266		return ret;
 267
 268	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 269		bus->number, devfn, where, size, val);
 270
 271	if (size == 1) {
 272		shift = BITS_PER_BYTE * (where & 3);
 273		data &= ~(0xff << shift);
 274		data |= ((val & 0xff) << shift);
 275	} else if (size == 2) {
 276		shift = BITS_PER_BYTE * (where & 2);
 277		data &= ~(0xffff << shift);
 278		data |= ((val & 0xffff) << shift);
 279	} else
 280		data = val;
 281
 282	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 283				      bus, devfn, where, &data);
 284
 285	return ret;
 286}
 287
 288static struct pci_ops rcar_pcie_ops = {
 289	.read	= rcar_pcie_read_conf,
 290	.write	= rcar_pcie_write_conf,
 291};
 292
 293static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 294{
 295	struct device *dev = pcie->dev;
 296	unsigned int timeout = 1000;
 297	u32 macsr;
 298
 299	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 300		return;
 301
 302	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 303		dev_err(dev, "Speed change already in progress\n");
 304		return;
 305	}
 306
 307	macsr = rcar_pci_read_reg(pcie, MACSR);
 308	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 309		goto done;
 310
 311	/* Set target link speed to 5.0 GT/s */
 312	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 313		   PCI_EXP_LNKSTA_CLS_5_0GB);
 314
 315	/* Set speed change reason as intentional factor */
 316	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 317
 318	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 319	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 320		rcar_pci_write_reg(pcie, macsr, MACSR);
 321
 322	/* Start link speed change */
 323	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 324
 325	while (timeout--) {
 326		macsr = rcar_pci_read_reg(pcie, MACSR);
 327		if (macsr & SPCHGFIN) {
 328			/* Clear the interrupt bits */
 329			rcar_pci_write_reg(pcie, macsr, MACSR);
 330
 331			if (macsr & SPCHGFAIL)
 332				dev_err(dev, "Speed change failed\n");
 333
 334			goto done;
 335		}
 336
 337		msleep(1);
 338	}
 339
 340	dev_err(dev, "Speed change timed out\n");
 341
 342done:
 343	dev_info(dev, "Current link speed is %s GT/s\n",
 344		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 345}
 346
 347static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 348{
 349	struct rcar_pcie *pcie = &host->pcie;
 350	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 351	struct resource_entry *win;
 352	LIST_HEAD(res);
 353	int i = 0;
 354
 355	/* Try setting 5 GT/s link speed */
 356	rcar_pcie_force_speedup(pcie);
 357
 358	/* Setup PCI resources */
 359	resource_list_for_each_entry(win, &bridge->windows) {
 360		struct resource *res = win->res;
 361
 362		if (!res->flags)
 363			continue;
 364
 365		switch (resource_type(res)) {
 366		case IORESOURCE_IO:
 367		case IORESOURCE_MEM:
 368			rcar_pcie_set_outbound(pcie, i, win);
 369			i++;
 370			break;
 371		}
 372	}
 373}
 374
 375static int rcar_pcie_enable(struct rcar_pcie_host *host)
 376{
 377	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 378
 379	rcar_pcie_hw_enable(host);
 380
 381	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 382
 383	bridge->sysdata = host;
 384	bridge->ops = &rcar_pcie_ops;
 385
 386	return pci_host_probe(bridge);
 387}
 388
 389static int phy_wait_for_ack(struct rcar_pcie *pcie)
 390{
 391	struct device *dev = pcie->dev;
 392	unsigned int timeout = 100;
 393
 394	while (timeout--) {
 395		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 396			return 0;
 397
 398		udelay(100);
 399	}
 400
 401	dev_err(dev, "Access to PCIe phy timed out\n");
 402
 403	return -ETIMEDOUT;
 404}
 405
 406static void phy_write_reg(struct rcar_pcie *pcie,
 407			  unsigned int rate, u32 addr,
 408			  unsigned int lane, u32 data)
 409{
 410	u32 phyaddr;
 411
 412	phyaddr = WRITE_CMD |
 413		((rate & 1) << RATE_POS) |
 414		((lane & 0xf) << LANE_POS) |
 415		((addr & 0xff) << ADR_POS);
 416
 417	/* Set write data */
 418	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 419	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 420
 421	/* Ignore errors as they will be dealt with if the data link is down */
 422	phy_wait_for_ack(pcie);
 423
 424	/* Clear command */
 425	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 426	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 427
 428	/* Ignore errors as they will be dealt with if the data link is down */
 429	phy_wait_for_ack(pcie);
 430}
 431
 432static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 433{
 434	int err;
 435
 436	/* Begin initialization */
 437	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 438
 439	/* Set mode */
 440	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 441
 442	err = rcar_pcie_wait_for_phyrdy(pcie);
 443	if (err)
 444		return err;
 445
 446	/*
 447	 * Initial header for port config space is type 1, set the device
 448	 * class to match. Hardware takes care of propagating the IDSETR
 449	 * settings, so there is no need to bother with a quirk.
 450	 */
 451	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
 452
 453	/*
 454	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 455	 * they aren't used, to avoid bridge being detected as broken.
 456	 */
 457	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 458	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 459
 460	/* Initialize default capabilities. */
 461	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 462	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 463		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 464	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
 465		PCI_HEADER_TYPE_BRIDGE);
 466
 467	/* Enable data link layer active state reporting */
 468	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 469		PCI_EXP_LNKCAP_DLLLARC);
 470
 471	/* Write out the physical slot number = 0 */
 472	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 473
 474	/* Set the completion timer timeout to the maximum 50ms. */
 475	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 476
 477	/* Terminate list of capabilities (Next Capability Offset=0) */
 478	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 479
 480	/* Enable MSI */
 481	if (IS_ENABLED(CONFIG_PCI_MSI))
 482		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 483
 484	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 485
 486	/* Finish initialization - establish a PCI Express link */
 487	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 488
 489	/* This will timeout if we don't have a link. */
 490	err = rcar_pcie_wait_for_dl(pcie);
 491	if (err)
 492		return err;
 493
 494	/* Enable INTx interrupts */
 495	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 496
 497	wmb();
 498
 499	return 0;
 500}
 501
 502static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 503{
 504	struct rcar_pcie *pcie = &host->pcie;
 505
 506	/* Initialize the phy */
 507	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 508	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 509	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 510	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 511	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 512	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 513	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 514	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 515	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 516	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 517	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 518	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 519
 520	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 521	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 522	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 523
 524	return 0;
 525}
 526
 527static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 528{
 529	struct rcar_pcie *pcie = &host->pcie;
 530
 531	/*
 532	 * These settings come from the R-Car Series, 2nd Generation User's
 533	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 534	 */
 535	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 536	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 537	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 538	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 539
 540	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 541	/* The following value is for DC connection, no termination resistor */
 542	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 543	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 544	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 545
 546	return 0;
 547}
 548
 549static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 550{
 551	int err;
 552
 553	err = phy_init(host->phy);
 554	if (err)
 555		return err;
 556
 557	err = phy_power_on(host->phy);
 558	if (err)
 559		phy_exit(host->phy);
 560
 561	return err;
 562}
 563
 564static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 565{
 566	struct rcar_pcie_host *host = data;
 567	struct rcar_pcie *pcie = &host->pcie;
 568	struct rcar_msi *msi = &host->msi;
 569	struct device *dev = pcie->dev;
 570	unsigned long reg;
 571
 572	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 573
 574	/* MSI & INTx share an interrupt - we only handle MSI here */
 575	if (!reg)
 576		return IRQ_NONE;
 577
 578	while (reg) {
 579		unsigned int index = find_first_bit(&reg, 32);
 580		int ret;
 581
 582		ret = generic_handle_domain_irq(msi->domain->parent, index);
 583		if (ret) {
 584			/* Unknown MSI, just clear it */
 585			dev_dbg(dev, "unexpected MSI\n");
 586			rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
 587		}
 588
 589		/* see if there's any more pending in this vector */
 590		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 591	}
 592
 593	return IRQ_HANDLED;
 594}
 595
 596static void rcar_msi_top_irq_ack(struct irq_data *d)
 597{
 598	irq_chip_ack_parent(d);
 599}
 600
 601static void rcar_msi_top_irq_mask(struct irq_data *d)
 602{
 603	pci_msi_mask_irq(d);
 604	irq_chip_mask_parent(d);
 605}
 606
 607static void rcar_msi_top_irq_unmask(struct irq_data *d)
 608{
 609	pci_msi_unmask_irq(d);
 610	irq_chip_unmask_parent(d);
 611}
 612
 613static struct irq_chip rcar_msi_top_chip = {
 614	.name		= "PCIe MSI",
 615	.irq_ack	= rcar_msi_top_irq_ack,
 616	.irq_mask	= rcar_msi_top_irq_mask,
 617	.irq_unmask	= rcar_msi_top_irq_unmask,
 618};
 619
 620static void rcar_msi_irq_ack(struct irq_data *d)
 621{
 622	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 623	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 624
 625	/* clear the interrupt */
 626	rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
 627}
 628
 629static void rcar_msi_irq_mask(struct irq_data *d)
 630{
 631	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 632	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 633	unsigned long flags;
 634	u32 value;
 635
 636	spin_lock_irqsave(&msi->mask_lock, flags);
 637	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 638	value &= ~BIT(d->hwirq);
 639	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 640	spin_unlock_irqrestore(&msi->mask_lock, flags);
 641}
 642
 643static void rcar_msi_irq_unmask(struct irq_data *d)
 644{
 645	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 646	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 647	unsigned long flags;
 648	u32 value;
 649
 650	spin_lock_irqsave(&msi->mask_lock, flags);
 651	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 652	value |= BIT(d->hwirq);
 653	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 654	spin_unlock_irqrestore(&msi->mask_lock, flags);
 655}
 656
 657static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
 658{
 659	return -EINVAL;
 660}
 661
 662static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 663{
 664	struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
 665	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 666
 667	msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 668	msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 669	msg->data = data->hwirq;
 670}
 671
 672static struct irq_chip rcar_msi_bottom_chip = {
 673	.name			= "R-Car MSI",
 674	.irq_ack		= rcar_msi_irq_ack,
 675	.irq_mask		= rcar_msi_irq_mask,
 676	.irq_unmask		= rcar_msi_irq_unmask,
 677	.irq_set_affinity 	= rcar_msi_set_affinity,
 678	.irq_compose_msi_msg	= rcar_compose_msi_msg,
 679};
 680
 681static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
 682				  unsigned int nr_irqs, void *args)
 683{
 684	struct rcar_msi *msi = domain->host_data;
 685	unsigned int i;
 686	int hwirq;
 687
 688	mutex_lock(&msi->map_lock);
 689
 690	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 691
 692	mutex_unlock(&msi->map_lock);
 693
 694	if (hwirq < 0)
 695		return -ENOSPC;
 696
 697	for (i = 0; i < nr_irqs; i++)
 698		irq_domain_set_info(domain, virq + i, hwirq + i,
 699				    &rcar_msi_bottom_chip, domain->host_data,
 700				    handle_edge_irq, NULL, NULL);
 701
 702	return 0;
 703}
 704
 705static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
 706				  unsigned int nr_irqs)
 707{
 708	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 709	struct rcar_msi *msi = domain->host_data;
 710
 711	mutex_lock(&msi->map_lock);
 712
 713	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
 714
 715	mutex_unlock(&msi->map_lock);
 716}
 717
 718static const struct irq_domain_ops rcar_msi_domain_ops = {
 719	.alloc	= rcar_msi_domain_alloc,
 720	.free	= rcar_msi_domain_free,
 721};
 722
 723static struct msi_domain_info rcar_msi_info = {
 724	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 725		   MSI_FLAG_MULTI_PCI_MSI),
 726	.chip	= &rcar_msi_top_chip,
 727};
 728
 729static int rcar_allocate_domains(struct rcar_msi *msi)
 730{
 731	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 732	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
 733	struct irq_domain *parent;
 734
 735	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
 736					  &rcar_msi_domain_ops, msi);
 737	if (!parent) {
 738		dev_err(pcie->dev, "failed to create IRQ domain\n");
 739		return -ENOMEM;
 740	}
 741	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
 742
 743	msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
 744	if (!msi->domain) {
 745		dev_err(pcie->dev, "failed to create MSI domain\n");
 746		irq_domain_remove(parent);
 747		return -ENOMEM;
 748	}
 749
 750	return 0;
 751}
 752
 753static void rcar_free_domains(struct rcar_msi *msi)
 754{
 755	struct irq_domain *parent = msi->domain->parent;
 756
 757	irq_domain_remove(msi->domain);
 758	irq_domain_remove(parent);
 759}
 760
 761static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 762{
 763	struct rcar_pcie *pcie = &host->pcie;
 764	struct device *dev = pcie->dev;
 765	struct rcar_msi *msi = &host->msi;
 766	struct resource res;
 767	int err;
 768
 769	mutex_init(&msi->map_lock);
 770	spin_lock_init(&msi->mask_lock);
 771
 772	err = of_address_to_resource(dev->of_node, 0, &res);
 773	if (err)
 774		return err;
 775
 776	err = rcar_allocate_domains(msi);
 777	if (err)
 778		return err;
 779
 780	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 781	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 782			       IRQF_SHARED | IRQF_NO_THREAD,
 783			       rcar_msi_bottom_chip.name, host);
 784	if (err < 0) {
 785		dev_err(dev, "failed to request IRQ: %d\n", err);
 786		goto err;
 787	}
 788
 789	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 790			       IRQF_SHARED | IRQF_NO_THREAD,
 791			       rcar_msi_bottom_chip.name, host);
 792	if (err < 0) {
 793		dev_err(dev, "failed to request IRQ: %d\n", err);
 794		goto err;
 795	}
 796
 797	/* disable all MSIs */
 798	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 799
 800	/*
 801	 * Setup MSI data target using RC base address address, which
 802	 * is guaranteed to be in the low 32bit range on any R-Car HW.
 803	 */
 804	rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 805	rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 806
 807	return 0;
 808
 809err:
 810	rcar_free_domains(msi);
 811	return err;
 812}
 813
 814static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 815{
 816	struct rcar_pcie *pcie = &host->pcie;
 817
 818	/* Disable all MSI interrupts */
 819	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 820
 821	/* Disable address decoding of the MSI interrupt, MSIFE */
 822	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 823
 824	rcar_free_domains(&host->msi);
 825}
 826
 827static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 828{
 829	struct rcar_pcie *pcie = &host->pcie;
 830	struct device *dev = pcie->dev;
 831	struct resource res;
 832	int err, i;
 833
 834	host->phy = devm_phy_optional_get(dev, "pcie");
 835	if (IS_ERR(host->phy))
 836		return PTR_ERR(host->phy);
 837
 838	err = of_address_to_resource(dev->of_node, 0, &res);
 839	if (err)
 840		return err;
 841
 842	pcie->base = devm_ioremap_resource(dev, &res);
 843	if (IS_ERR(pcie->base))
 844		return PTR_ERR(pcie->base);
 845
 846	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 847	if (IS_ERR(host->bus_clk)) {
 848		dev_err(dev, "cannot get pcie bus clock\n");
 849		return PTR_ERR(host->bus_clk);
 850	}
 851
 852	i = irq_of_parse_and_map(dev->of_node, 0);
 853	if (!i) {
 854		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 855		err = -ENOENT;
 856		goto err_irq1;
 857	}
 858	host->msi.irq1 = i;
 859
 860	i = irq_of_parse_and_map(dev->of_node, 1);
 861	if (!i) {
 862		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 863		err = -ENOENT;
 864		goto err_irq2;
 865	}
 866	host->msi.irq2 = i;
 867
 
 
 
 
 
 
 868	return 0;
 869
 870err_irq2:
 871	irq_dispose_mapping(host->msi.irq1);
 872err_irq1:
 873	return err;
 874}
 875
 876static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 877				    struct resource_entry *entry,
 878				    int *index)
 879{
 880	u64 restype = entry->res->flags;
 881	u64 cpu_addr = entry->res->start;
 882	u64 cpu_end = entry->res->end;
 883	u64 pci_addr = entry->res->start - entry->offset;
 884	u32 flags = LAM_64BIT | LAR_ENABLE;
 885	u64 mask;
 886	u64 size = resource_size(entry->res);
 887	int idx = *index;
 888
 889	if (restype & IORESOURCE_PREFETCH)
 890		flags |= LAM_PREFETCH;
 891
 892	while (cpu_addr < cpu_end) {
 893		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 894			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 895			return -EINVAL;
 896		}
 897		/*
 898		 * If the size of the range is larger than the alignment of
 899		 * the start address, we have to use multiple entries to
 900		 * perform the mapping.
 901		 */
 902		if (cpu_addr > 0) {
 903			unsigned long nr_zeros = __ffs64(cpu_addr);
 904			u64 alignment = 1ULL << nr_zeros;
 905
 906			size = min(size, alignment);
 907		}
 908		/* Hardware supports max 4GiB inbound region */
 909		size = min(size, 1ULL << 32);
 910
 911		mask = roundup_pow_of_two(size) - 1;
 912		mask &= ~0xf;
 913
 914		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 915				      lower_32_bits(mask) | flags, idx, true);
 916
 917		pci_addr += size;
 918		cpu_addr += size;
 919		idx += 2;
 920	}
 921	*index = idx;
 922
 923	return 0;
 924}
 925
 926static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 927{
 928	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 929	struct resource_entry *entry;
 930	int index = 0, err = 0;
 931
 932	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 933		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 934		if (err)
 935			break;
 936	}
 937
 938	return err;
 939}
 940
 941static const struct of_device_id rcar_pcie_of_match[] = {
 942	{ .compatible = "renesas,pcie-r8a7779",
 943	  .data = rcar_pcie_phy_init_h1 },
 944	{ .compatible = "renesas,pcie-r8a7790",
 945	  .data = rcar_pcie_phy_init_gen2 },
 946	{ .compatible = "renesas,pcie-r8a7791",
 947	  .data = rcar_pcie_phy_init_gen2 },
 948	{ .compatible = "renesas,pcie-rcar-gen2",
 949	  .data = rcar_pcie_phy_init_gen2 },
 950	{ .compatible = "renesas,pcie-r8a7795",
 951	  .data = rcar_pcie_phy_init_gen3 },
 952	{ .compatible = "renesas,pcie-rcar-gen3",
 953	  .data = rcar_pcie_phy_init_gen3 },
 954	{},
 955};
 956
 957/* Design note 346 from Linear Technology says order is not important. */
 958static const char * const rcar_pcie_supplies[] = {
 959	"vpcie1v5",
 960	"vpcie3v3",
 961	"vpcie12v",
 962};
 963
 964static int rcar_pcie_probe(struct platform_device *pdev)
 965{
 966	struct device *dev = &pdev->dev;
 967	struct pci_host_bridge *bridge;
 968	struct rcar_pcie_host *host;
 969	struct rcar_pcie *pcie;
 970	unsigned int i;
 971	u32 data;
 972	int err;
 
 973
 974	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 975	if (!bridge)
 976		return -ENOMEM;
 977
 978	host = pci_host_bridge_priv(bridge);
 979	pcie = &host->pcie;
 980	pcie->dev = dev;
 981	platform_set_drvdata(pdev, host);
 982
 983	for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
 984		err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
 985		if (err < 0 && err != -ENODEV)
 986			return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
 987					     rcar_pcie_supplies[i]);
 988	}
 989
 990	pm_runtime_enable(pcie->dev);
 991	err = pm_runtime_get_sync(pcie->dev);
 992	if (err < 0) {
 993		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 994		goto err_pm_put;
 995	}
 996
 997	err = rcar_pcie_get_resources(host);
 998	if (err < 0) {
 999		dev_err(dev, "failed to request resources: %d\n", err);
1000		goto err_pm_put;
1001	}
1002
1003	err = clk_prepare_enable(host->bus_clk);
1004	if (err) {
1005		dev_err(dev, "failed to enable bus clock: %d\n", err);
1006		goto err_unmap_msi_irqs;
1007	}
1008
1009	err = rcar_pcie_parse_map_dma_ranges(host);
1010	if (err)
1011		goto err_clk_disable;
1012
1013	host->phy_init_fn = of_device_get_match_data(dev);
1014	err = host->phy_init_fn(host);
1015	if (err) {
1016		dev_err(dev, "failed to init PCIe PHY\n");
1017		goto err_clk_disable;
1018	}
1019
1020	/* Failure to get a link might just be that no cards are inserted */
1021	if (rcar_pcie_hw_init(pcie)) {
1022		dev_info(dev, "PCIe link down\n");
1023		err = -ENODEV;
1024		goto err_phy_shutdown;
1025	}
1026
1027	data = rcar_pci_read_reg(pcie, MACSR);
1028	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1029
1030	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1031		err = rcar_pcie_enable_msi(host);
1032		if (err < 0) {
1033			dev_err(dev,
1034				"failed to enable MSI support: %d\n",
1035				err);
1036			goto err_phy_shutdown;
1037		}
1038	}
1039
1040	err = rcar_pcie_enable(host);
1041	if (err)
1042		goto err_msi_teardown;
1043
1044	return 0;
1045
1046err_msi_teardown:
1047	if (IS_ENABLED(CONFIG_PCI_MSI))
1048		rcar_pcie_teardown_msi(host);
1049
1050err_phy_shutdown:
1051	if (host->phy) {
1052		phy_power_off(host->phy);
1053		phy_exit(host->phy);
1054	}
1055
1056err_clk_disable:
1057	clk_disable_unprepare(host->bus_clk);
1058
1059err_unmap_msi_irqs:
1060	irq_dispose_mapping(host->msi.irq2);
1061	irq_dispose_mapping(host->msi.irq1);
1062
1063err_pm_put:
1064	pm_runtime_put(dev);
1065	pm_runtime_disable(dev);
1066
1067	return err;
1068}
1069
1070static int rcar_pcie_resume(struct device *dev)
1071{
1072	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1073	struct rcar_pcie *pcie = &host->pcie;
1074	unsigned int data;
1075	int err;
1076
1077	err = rcar_pcie_parse_map_dma_ranges(host);
1078	if (err)
1079		return 0;
1080
1081	/* Failure to get a link might just be that no cards are inserted */
1082	err = host->phy_init_fn(host);
1083	if (err) {
1084		dev_info(dev, "PCIe link down\n");
1085		return 0;
1086	}
1087
1088	data = rcar_pci_read_reg(pcie, MACSR);
1089	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1090
1091	/* Enable MSI */
1092	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1093		struct resource res;
1094		u32 val;
1095
1096		of_address_to_resource(dev->of_node, 0, &res);
1097		rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
1098		rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
1099
1100		bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
1101		rcar_pci_write_reg(pcie, val, PCIEMSIIER);
1102	}
1103
1104	rcar_pcie_hw_enable(host);
1105
1106	return 0;
1107}
1108
1109static int rcar_pcie_resume_noirq(struct device *dev)
1110{
1111	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1112	struct rcar_pcie *pcie = &host->pcie;
1113
1114	if (rcar_pci_read_reg(pcie, PMSR) &&
1115	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1116		return 0;
1117
1118	/* Re-establish the PCIe link */
1119	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1120	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1121	return rcar_pcie_wait_for_dl(pcie);
1122}
1123
1124static const struct dev_pm_ops rcar_pcie_pm_ops = {
1125	SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1126	.resume_noirq = rcar_pcie_resume_noirq,
1127};
1128
1129static struct platform_driver rcar_pcie_driver = {
1130	.driver = {
1131		.name = "rcar-pcie",
1132		.of_match_table = rcar_pcie_of_match,
1133		.pm = &rcar_pcie_pm_ops,
1134		.suppress_bind_attrs = true,
1135	},
1136	.probe = rcar_pcie_probe,
1137};
1138
1139#ifdef CONFIG_ARM
1140static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
1141		unsigned int fsr, struct pt_regs *regs)
1142{
1143	return !fixup_exception(regs);
1144}
1145
1146static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
1147	{ .compatible = "renesas,pcie-r8a7779" },
1148	{ .compatible = "renesas,pcie-r8a7790" },
1149	{ .compatible = "renesas,pcie-r8a7791" },
1150	{ .compatible = "renesas,pcie-rcar-gen2" },
1151	{},
1152};
1153
1154static int __init rcar_pcie_init(void)
1155{
1156	if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
1157#ifdef CONFIG_ARM_LPAE
1158		hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1159				"asynchronous external abort");
1160#else
1161		hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1162				"imprecise external abort");
1163#endif
1164	}
1165
1166	return platform_driver_register(&rcar_pcie_driver);
1167}
1168device_initcall(rcar_pcie_init);
1169#else
1170builtin_platform_driver(rcar_pcie_driver);
1171#endif
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
  16#include <linux/clk-provider.h>
  17#include <linux/delay.h>
  18#include <linux/interrupt.h>
  19#include <linux/irq.h>
  20#include <linux/irqdomain.h>
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/iopoll.h>
  24#include <linux/msi.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
  27#include <linux/of_platform.h>
  28#include <linux/pci.h>
  29#include <linux/phy/phy.h>
  30#include <linux/platform_device.h>
  31#include <linux/pm_runtime.h>
 
  32
  33#include "pcie-rcar.h"
  34
  35struct rcar_msi {
  36	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  37	struct irq_domain *domain;
  38	struct mutex map_lock;
  39	spinlock_t mask_lock;
  40	int irq1;
  41	int irq2;
  42};
  43
  44#ifdef CONFIG_ARM
  45/*
  46 * Here we keep a static copy of the remapped PCIe controller address.
  47 * This is only used on aarch32 systems, all of which have one single
  48 * PCIe controller, to provide quick access to the PCIe controller in
  49 * the L1 link state fixup function, called from the ARM fault handler.
  50 */
  51static void __iomem *pcie_base;
  52/*
  53 * Static copy of PCIe device pointer, so we can check whether the
  54 * device is runtime suspended or not.
  55 */
  56static struct device *pcie_dev;
  57#endif
  58
  59/* Structure representing the PCIe interface */
  60struct rcar_pcie_host {
  61	struct rcar_pcie	pcie;
  62	struct phy		*phy;
  63	struct clk		*bus_clk;
  64	struct			rcar_msi msi;
  65	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  66};
  67
  68static DEFINE_SPINLOCK(pmsr_lock);
  69
  70static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
  71{
  72	unsigned long flags;
  73	u32 pmsr, val;
  74	int ret = 0;
  75
  76	spin_lock_irqsave(&pmsr_lock, flags);
  77
  78	if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
  79		ret = -EINVAL;
  80		goto unlock_exit;
  81	}
  82
  83	pmsr = readl(pcie_base + PMSR);
  84
  85	/*
  86	 * Test if the PCIe controller received PM_ENTER_L1 DLLP and
  87	 * the PCIe controller is not in L1 link state. If true, apply
  88	 * fix, which will put the controller into L1 link state, from
  89	 * which it can return to L0s/L0 on its own.
  90	 */
  91	if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
  92		writel(L1IATN, pcie_base + PMCTLR);
  93		ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
  94						val & L1FAEG, 10, 1000);
  95		WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
  96		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
  97	}
  98
  99unlock_exit:
 100	spin_unlock_irqrestore(&pmsr_lock, flags);
 101	return ret;
 102}
 103
 104static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
 105{
 106	return container_of(msi, struct rcar_pcie_host, msi);
 107}
 108
 109static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
 110{
 111	unsigned int shift = BITS_PER_BYTE * (where & 3);
 112	u32 val = rcar_pci_read_reg(pcie, where & ~3);
 113
 114	return val >> shift;
 115}
 116
 117#ifdef CONFIG_ARM
 118#define __rcar_pci_rw_reg_workaround(instr)				\
 119		"	.arch armv7-a\n"				\
 120		"1:	" instr " %1, [%2]\n"				\
 121		"2:	isb\n"						\
 122		"3:	.pushsection .text.fixup,\"ax\"\n"		\
 123		"	.align	2\n"					\
 124		"4:	mov	%0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \
 125		"	b	3b\n"					\
 126		"	.popsection\n"					\
 127		"	.pushsection __ex_table,\"a\"\n"		\
 128		"	.align	3\n"					\
 129		"	.long	1b, 4b\n"				\
 130		"	.long	2b, 4b\n"				\
 131		"	.popsection\n"
 132#endif
 133
 134static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val,
 135					 unsigned int reg)
 136{
 137	int error = PCIBIOS_SUCCESSFUL;
 138#ifdef CONFIG_ARM
 139	asm volatile(
 140		__rcar_pci_rw_reg_workaround("str")
 141	: "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory");
 142#else
 143	rcar_pci_write_reg(pcie, val, reg);
 144#endif
 145	return error;
 146}
 147
 148static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val,
 149					unsigned int reg)
 150{
 151	int error = PCIBIOS_SUCCESSFUL;
 152#ifdef CONFIG_ARM
 153	asm volatile(
 154		__rcar_pci_rw_reg_workaround("ldr")
 155	: "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory");
 156
 157	if (error != PCIBIOS_SUCCESSFUL)
 158		PCI_SET_ERROR_RESPONSE(val);
 159#else
 160	*val = rcar_pci_read_reg(pcie, reg);
 161#endif
 162	return error;
 163}
 164
 165/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 166static int rcar_pcie_config_access(struct rcar_pcie_host *host,
 167		unsigned char access_type, struct pci_bus *bus,
 168		unsigned int devfn, int where, u32 *data)
 169{
 170	struct rcar_pcie *pcie = &host->pcie;
 171	unsigned int dev, func, reg, index;
 172	int ret;
 173
 174	/* Wake the bus up in case it is in L1 state. */
 175	ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
 176	if (ret) {
 177		PCI_SET_ERROR_RESPONSE(data);
 178		return PCIBIOS_SET_FAILED;
 179	}
 180
 181	dev = PCI_SLOT(devfn);
 182	func = PCI_FUNC(devfn);
 183	reg = where & ~3;
 184	index = reg / 4;
 185
 186	/*
 187	 * While each channel has its own memory-mapped extended config
 188	 * space, it's generally only accessible when in endpoint mode.
 189	 * When in root complex mode, the controller is unable to target
 190	 * itself with either type 0 or type 1 accesses, and indeed, any
 191	 * controller initiated target transfer to its own config space
 192	 * result in a completer abort.
 193	 *
 194	 * Each channel effectively only supports a single device, but as
 195	 * the same channel <-> device access works for any PCI_SLOT()
 196	 * value, we cheat a bit here and bind the controller's config
 197	 * space to devfn 0 in order to enable self-enumeration. In this
 198	 * case the regular ECAR/ECDR path is sidelined and the mangled
 199	 * config access itself is initiated as an internal bus transaction.
 200	 */
 201	if (pci_is_root_bus(bus)) {
 202		if (dev != 0)
 203			return PCIBIOS_DEVICE_NOT_FOUND;
 204
 205		if (access_type == RCAR_PCI_ACCESS_READ)
 206			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 207		else
 208			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 209
 210		return PCIBIOS_SUCCESSFUL;
 211	}
 212
 213	/* Clear errors */
 214	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 215
 216	/* Set the PIO address */
 217	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 218		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 219
 220	/* Enable the configuration access */
 221	if (pci_is_root_bus(bus->parent))
 222		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
 223	else
 224		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 225
 226	/* Check for errors */
 227	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 228		return PCIBIOS_DEVICE_NOT_FOUND;
 229
 230	/* Check for master and target aborts */
 231	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 232		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 233		return PCIBIOS_DEVICE_NOT_FOUND;
 234
 235	if (access_type == RCAR_PCI_ACCESS_READ)
 236		ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR);
 237	else
 238		ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR);
 239
 240	/* Disable the configuration access */
 241	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 242
 243	return ret;
 244}
 245
 246static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 247			       int where, int size, u32 *val)
 248{
 249	struct rcar_pcie_host *host = bus->sysdata;
 250	int ret;
 251
 252	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 253				      bus, devfn, where, val);
 254	if (ret != PCIBIOS_SUCCESSFUL)
 255		return ret;
 256
 257	if (size == 1)
 258		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 259	else if (size == 2)
 260		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 261
 262	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 263		bus->number, devfn, where, size, *val);
 264
 265	return ret;
 266}
 267
 268/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 269static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 270				int where, int size, u32 val)
 271{
 272	struct rcar_pcie_host *host = bus->sysdata;
 273	unsigned int shift;
 274	u32 data;
 275	int ret;
 276
 277	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 278				      bus, devfn, where, &data);
 279	if (ret != PCIBIOS_SUCCESSFUL)
 280		return ret;
 281
 282	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 283		bus->number, devfn, where, size, val);
 284
 285	if (size == 1) {
 286		shift = BITS_PER_BYTE * (where & 3);
 287		data &= ~(0xff << shift);
 288		data |= ((val & 0xff) << shift);
 289	} else if (size == 2) {
 290		shift = BITS_PER_BYTE * (where & 2);
 291		data &= ~(0xffff << shift);
 292		data |= ((val & 0xffff) << shift);
 293	} else
 294		data = val;
 295
 296	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 297				      bus, devfn, where, &data);
 298
 299	return ret;
 300}
 301
 302static struct pci_ops rcar_pcie_ops = {
 303	.read	= rcar_pcie_read_conf,
 304	.write	= rcar_pcie_write_conf,
 305};
 306
 307static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 308{
 309	struct device *dev = pcie->dev;
 310	unsigned int timeout = 1000;
 311	u32 macsr;
 312
 313	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 314		return;
 315
 316	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 317		dev_err(dev, "Speed change already in progress\n");
 318		return;
 319	}
 320
 321	macsr = rcar_pci_read_reg(pcie, MACSR);
 322	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 323		goto done;
 324
 325	/* Set target link speed to 5.0 GT/s */
 326	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 327		   PCI_EXP_LNKSTA_CLS_5_0GB);
 328
 329	/* Set speed change reason as intentional factor */
 330	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 331
 332	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 333	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 334		rcar_pci_write_reg(pcie, macsr, MACSR);
 335
 336	/* Start link speed change */
 337	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 338
 339	while (timeout--) {
 340		macsr = rcar_pci_read_reg(pcie, MACSR);
 341		if (macsr & SPCHGFIN) {
 342			/* Clear the interrupt bits */
 343			rcar_pci_write_reg(pcie, macsr, MACSR);
 344
 345			if (macsr & SPCHGFAIL)
 346				dev_err(dev, "Speed change failed\n");
 347
 348			goto done;
 349		}
 350
 351		msleep(1);
 352	}
 353
 354	dev_err(dev, "Speed change timed out\n");
 355
 356done:
 357	dev_info(dev, "Current link speed is %s GT/s\n",
 358		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 359}
 360
 361static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 362{
 363	struct rcar_pcie *pcie = &host->pcie;
 364	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 365	struct resource_entry *win;
 366	LIST_HEAD(res);
 367	int i = 0;
 368
 369	/* Try setting 5 GT/s link speed */
 370	rcar_pcie_force_speedup(pcie);
 371
 372	/* Setup PCI resources */
 373	resource_list_for_each_entry(win, &bridge->windows) {
 374		struct resource *res = win->res;
 375
 376		if (!res->flags)
 377			continue;
 378
 379		switch (resource_type(res)) {
 380		case IORESOURCE_IO:
 381		case IORESOURCE_MEM:
 382			rcar_pcie_set_outbound(pcie, i, win);
 383			i++;
 384			break;
 385		}
 386	}
 387}
 388
 389static int rcar_pcie_enable(struct rcar_pcie_host *host)
 390{
 391	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 392
 393	rcar_pcie_hw_enable(host);
 394
 395	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 396
 397	bridge->sysdata = host;
 398	bridge->ops = &rcar_pcie_ops;
 399
 400	return pci_host_probe(bridge);
 401}
 402
 403static int phy_wait_for_ack(struct rcar_pcie *pcie)
 404{
 405	struct device *dev = pcie->dev;
 406	unsigned int timeout = 100;
 407
 408	while (timeout--) {
 409		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 410			return 0;
 411
 412		udelay(100);
 413	}
 414
 415	dev_err(dev, "Access to PCIe phy timed out\n");
 416
 417	return -ETIMEDOUT;
 418}
 419
 420static void phy_write_reg(struct rcar_pcie *pcie,
 421			  unsigned int rate, u32 addr,
 422			  unsigned int lane, u32 data)
 423{
 424	u32 phyaddr;
 425
 426	phyaddr = WRITE_CMD |
 427		((rate & 1) << RATE_POS) |
 428		((lane & 0xf) << LANE_POS) |
 429		((addr & 0xff) << ADR_POS);
 430
 431	/* Set write data */
 432	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 433	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 434
 435	/* Ignore errors as they will be dealt with if the data link is down */
 436	phy_wait_for_ack(pcie);
 437
 438	/* Clear command */
 439	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 440	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 441
 442	/* Ignore errors as they will be dealt with if the data link is down */
 443	phy_wait_for_ack(pcie);
 444}
 445
 446static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 447{
 448	int err;
 449
 450	/* Begin initialization */
 451	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 452
 453	/* Set mode */
 454	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 455
 456	err = rcar_pcie_wait_for_phyrdy(pcie);
 457	if (err)
 458		return err;
 459
 460	/*
 461	 * Initial header for port config space is type 1, set the device
 462	 * class to match. Hardware takes care of propagating the IDSETR
 463	 * settings, so there is no need to bother with a quirk.
 464	 */
 465	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
 466
 467	/*
 468	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 469	 * they aren't used, to avoid bridge being detected as broken.
 470	 */
 471	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 472	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 473
 474	/* Initialize default capabilities. */
 475	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 476	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 477		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 478	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
 479		PCI_HEADER_TYPE_BRIDGE);
 480
 481	/* Enable data link layer active state reporting */
 482	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 483		PCI_EXP_LNKCAP_DLLLARC);
 484
 485	/* Write out the physical slot number = 0 */
 486	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 487
 488	/* Set the completion timer timeout to the maximum 50ms. */
 489	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 490
 491	/* Terminate list of capabilities (Next Capability Offset=0) */
 492	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 493
 494	/* Enable MSI */
 495	if (IS_ENABLED(CONFIG_PCI_MSI))
 496		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 497
 498	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 499
 500	/* Finish initialization - establish a PCI Express link */
 501	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 502
 503	/* This will timeout if we don't have a link. */
 504	err = rcar_pcie_wait_for_dl(pcie);
 505	if (err)
 506		return err;
 507
 508	/* Enable INTx interrupts */
 509	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 510
 511	wmb();
 512
 513	return 0;
 514}
 515
 516static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 517{
 518	struct rcar_pcie *pcie = &host->pcie;
 519
 520	/* Initialize the phy */
 521	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 522	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 523	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 524	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 525	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 526	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 527	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 528	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 529	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 530	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 531	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 532	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 533
 534	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 535	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 536	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 537
 538	return 0;
 539}
 540
 541static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 542{
 543	struct rcar_pcie *pcie = &host->pcie;
 544
 545	/*
 546	 * These settings come from the R-Car Series, 2nd Generation User's
 547	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 548	 */
 549	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 550	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 551	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 552	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 553
 554	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 555	/* The following value is for DC connection, no termination resistor */
 556	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 557	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 558	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 559
 560	return 0;
 561}
 562
 563static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 564{
 565	int err;
 566
 567	err = phy_init(host->phy);
 568	if (err)
 569		return err;
 570
 571	err = phy_power_on(host->phy);
 572	if (err)
 573		phy_exit(host->phy);
 574
 575	return err;
 576}
 577
 578static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 579{
 580	struct rcar_pcie_host *host = data;
 581	struct rcar_pcie *pcie = &host->pcie;
 582	struct rcar_msi *msi = &host->msi;
 583	struct device *dev = pcie->dev;
 584	unsigned long reg;
 585
 586	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 587
 588	/* MSI & INTx share an interrupt - we only handle MSI here */
 589	if (!reg)
 590		return IRQ_NONE;
 591
 592	while (reg) {
 593		unsigned int index = find_first_bit(&reg, 32);
 594		int ret;
 595
 596		ret = generic_handle_domain_irq(msi->domain->parent, index);
 597		if (ret) {
 598			/* Unknown MSI, just clear it */
 599			dev_dbg(dev, "unexpected MSI\n");
 600			rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
 601		}
 602
 603		/* see if there's any more pending in this vector */
 604		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 605	}
 606
 607	return IRQ_HANDLED;
 608}
 609
 610static void rcar_msi_top_irq_ack(struct irq_data *d)
 611{
 612	irq_chip_ack_parent(d);
 613}
 614
 615static void rcar_msi_top_irq_mask(struct irq_data *d)
 616{
 617	pci_msi_mask_irq(d);
 618	irq_chip_mask_parent(d);
 619}
 620
 621static void rcar_msi_top_irq_unmask(struct irq_data *d)
 622{
 623	pci_msi_unmask_irq(d);
 624	irq_chip_unmask_parent(d);
 625}
 626
 627static struct irq_chip rcar_msi_top_chip = {
 628	.name		= "PCIe MSI",
 629	.irq_ack	= rcar_msi_top_irq_ack,
 630	.irq_mask	= rcar_msi_top_irq_mask,
 631	.irq_unmask	= rcar_msi_top_irq_unmask,
 632};
 633
 634static void rcar_msi_irq_ack(struct irq_data *d)
 635{
 636	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 637	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 638
 639	/* clear the interrupt */
 640	rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
 641}
 642
 643static void rcar_msi_irq_mask(struct irq_data *d)
 644{
 645	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 646	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 647	unsigned long flags;
 648	u32 value;
 649
 650	spin_lock_irqsave(&msi->mask_lock, flags);
 651	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 652	value &= ~BIT(d->hwirq);
 653	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 654	spin_unlock_irqrestore(&msi->mask_lock, flags);
 655}
 656
 657static void rcar_msi_irq_unmask(struct irq_data *d)
 658{
 659	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 660	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 661	unsigned long flags;
 662	u32 value;
 663
 664	spin_lock_irqsave(&msi->mask_lock, flags);
 665	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 666	value |= BIT(d->hwirq);
 667	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 668	spin_unlock_irqrestore(&msi->mask_lock, flags);
 669}
 670
 671static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
 672{
 673	return -EINVAL;
 674}
 675
 676static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 677{
 678	struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
 679	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 680
 681	msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 682	msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 683	msg->data = data->hwirq;
 684}
 685
 686static struct irq_chip rcar_msi_bottom_chip = {
 687	.name			= "Rcar MSI",
 688	.irq_ack		= rcar_msi_irq_ack,
 689	.irq_mask		= rcar_msi_irq_mask,
 690	.irq_unmask		= rcar_msi_irq_unmask,
 691	.irq_set_affinity 	= rcar_msi_set_affinity,
 692	.irq_compose_msi_msg	= rcar_compose_msi_msg,
 693};
 694
 695static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
 696				  unsigned int nr_irqs, void *args)
 697{
 698	struct rcar_msi *msi = domain->host_data;
 699	unsigned int i;
 700	int hwirq;
 701
 702	mutex_lock(&msi->map_lock);
 703
 704	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 705
 706	mutex_unlock(&msi->map_lock);
 707
 708	if (hwirq < 0)
 709		return -ENOSPC;
 710
 711	for (i = 0; i < nr_irqs; i++)
 712		irq_domain_set_info(domain, virq + i, hwirq + i,
 713				    &rcar_msi_bottom_chip, domain->host_data,
 714				    handle_edge_irq, NULL, NULL);
 715
 716	return 0;
 717}
 718
 719static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
 720				  unsigned int nr_irqs)
 721{
 722	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 723	struct rcar_msi *msi = domain->host_data;
 724
 725	mutex_lock(&msi->map_lock);
 726
 727	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
 728
 729	mutex_unlock(&msi->map_lock);
 730}
 731
 732static const struct irq_domain_ops rcar_msi_domain_ops = {
 733	.alloc	= rcar_msi_domain_alloc,
 734	.free	= rcar_msi_domain_free,
 735};
 736
 737static struct msi_domain_info rcar_msi_info = {
 738	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 739		   MSI_FLAG_MULTI_PCI_MSI),
 740	.chip	= &rcar_msi_top_chip,
 741};
 742
 743static int rcar_allocate_domains(struct rcar_msi *msi)
 744{
 745	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 746	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
 747	struct irq_domain *parent;
 748
 749	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
 750					  &rcar_msi_domain_ops, msi);
 751	if (!parent) {
 752		dev_err(pcie->dev, "failed to create IRQ domain\n");
 753		return -ENOMEM;
 754	}
 755	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
 756
 757	msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
 758	if (!msi->domain) {
 759		dev_err(pcie->dev, "failed to create MSI domain\n");
 760		irq_domain_remove(parent);
 761		return -ENOMEM;
 762	}
 763
 764	return 0;
 765}
 766
 767static void rcar_free_domains(struct rcar_msi *msi)
 768{
 769	struct irq_domain *parent = msi->domain->parent;
 770
 771	irq_domain_remove(msi->domain);
 772	irq_domain_remove(parent);
 773}
 774
 775static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 776{
 777	struct rcar_pcie *pcie = &host->pcie;
 778	struct device *dev = pcie->dev;
 779	struct rcar_msi *msi = &host->msi;
 780	struct resource res;
 781	int err;
 782
 783	mutex_init(&msi->map_lock);
 784	spin_lock_init(&msi->mask_lock);
 785
 786	err = of_address_to_resource(dev->of_node, 0, &res);
 787	if (err)
 788		return err;
 789
 790	err = rcar_allocate_domains(msi);
 791	if (err)
 792		return err;
 793
 794	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 795	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 796			       IRQF_SHARED | IRQF_NO_THREAD,
 797			       rcar_msi_bottom_chip.name, host);
 798	if (err < 0) {
 799		dev_err(dev, "failed to request IRQ: %d\n", err);
 800		goto err;
 801	}
 802
 803	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 804			       IRQF_SHARED | IRQF_NO_THREAD,
 805			       rcar_msi_bottom_chip.name, host);
 806	if (err < 0) {
 807		dev_err(dev, "failed to request IRQ: %d\n", err);
 808		goto err;
 809	}
 810
 811	/* disable all MSIs */
 812	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 813
 814	/*
 815	 * Setup MSI data target using RC base address address, which
 816	 * is guaranteed to be in the low 32bit range on any RCar HW.
 817	 */
 818	rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 819	rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 820
 821	return 0;
 822
 823err:
 824	rcar_free_domains(msi);
 825	return err;
 826}
 827
 828static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 829{
 830	struct rcar_pcie *pcie = &host->pcie;
 831
 832	/* Disable all MSI interrupts */
 833	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 834
 835	/* Disable address decoding of the MSI interrupt, MSIFE */
 836	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 837
 838	rcar_free_domains(&host->msi);
 839}
 840
 841static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 842{
 843	struct rcar_pcie *pcie = &host->pcie;
 844	struct device *dev = pcie->dev;
 845	struct resource res;
 846	int err, i;
 847
 848	host->phy = devm_phy_optional_get(dev, "pcie");
 849	if (IS_ERR(host->phy))
 850		return PTR_ERR(host->phy);
 851
 852	err = of_address_to_resource(dev->of_node, 0, &res);
 853	if (err)
 854		return err;
 855
 856	pcie->base = devm_ioremap_resource(dev, &res);
 857	if (IS_ERR(pcie->base))
 858		return PTR_ERR(pcie->base);
 859
 860	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 861	if (IS_ERR(host->bus_clk)) {
 862		dev_err(dev, "cannot get pcie bus clock\n");
 863		return PTR_ERR(host->bus_clk);
 864	}
 865
 866	i = irq_of_parse_and_map(dev->of_node, 0);
 867	if (!i) {
 868		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 869		err = -ENOENT;
 870		goto err_irq1;
 871	}
 872	host->msi.irq1 = i;
 873
 874	i = irq_of_parse_and_map(dev->of_node, 1);
 875	if (!i) {
 876		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 877		err = -ENOENT;
 878		goto err_irq2;
 879	}
 880	host->msi.irq2 = i;
 881
 882#ifdef CONFIG_ARM
 883	/* Cache static copy for L1 link state fixup hook on aarch32 */
 884	pcie_base = pcie->base;
 885	pcie_dev = pcie->dev;
 886#endif
 887
 888	return 0;
 889
 890err_irq2:
 891	irq_dispose_mapping(host->msi.irq1);
 892err_irq1:
 893	return err;
 894}
 895
 896static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 897				    struct resource_entry *entry,
 898				    int *index)
 899{
 900	u64 restype = entry->res->flags;
 901	u64 cpu_addr = entry->res->start;
 902	u64 cpu_end = entry->res->end;
 903	u64 pci_addr = entry->res->start - entry->offset;
 904	u32 flags = LAM_64BIT | LAR_ENABLE;
 905	u64 mask;
 906	u64 size = resource_size(entry->res);
 907	int idx = *index;
 908
 909	if (restype & IORESOURCE_PREFETCH)
 910		flags |= LAM_PREFETCH;
 911
 912	while (cpu_addr < cpu_end) {
 913		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 914			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 915			return -EINVAL;
 916		}
 917		/*
 918		 * If the size of the range is larger than the alignment of
 919		 * the start address, we have to use multiple entries to
 920		 * perform the mapping.
 921		 */
 922		if (cpu_addr > 0) {
 923			unsigned long nr_zeros = __ffs64(cpu_addr);
 924			u64 alignment = 1ULL << nr_zeros;
 925
 926			size = min(size, alignment);
 927		}
 928		/* Hardware supports max 4GiB inbound region */
 929		size = min(size, 1ULL << 32);
 930
 931		mask = roundup_pow_of_two(size) - 1;
 932		mask &= ~0xf;
 933
 934		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 935				      lower_32_bits(mask) | flags, idx, true);
 936
 937		pci_addr += size;
 938		cpu_addr += size;
 939		idx += 2;
 940	}
 941	*index = idx;
 942
 943	return 0;
 944}
 945
 946static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 947{
 948	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 949	struct resource_entry *entry;
 950	int index = 0, err = 0;
 951
 952	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 953		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 954		if (err)
 955			break;
 956	}
 957
 958	return err;
 959}
 960
 961static const struct of_device_id rcar_pcie_of_match[] = {
 962	{ .compatible = "renesas,pcie-r8a7779",
 963	  .data = rcar_pcie_phy_init_h1 },
 964	{ .compatible = "renesas,pcie-r8a7790",
 965	  .data = rcar_pcie_phy_init_gen2 },
 966	{ .compatible = "renesas,pcie-r8a7791",
 967	  .data = rcar_pcie_phy_init_gen2 },
 968	{ .compatible = "renesas,pcie-rcar-gen2",
 969	  .data = rcar_pcie_phy_init_gen2 },
 970	{ .compatible = "renesas,pcie-r8a7795",
 971	  .data = rcar_pcie_phy_init_gen3 },
 972	{ .compatible = "renesas,pcie-rcar-gen3",
 973	  .data = rcar_pcie_phy_init_gen3 },
 974	{},
 975};
 976
 
 
 
 
 
 
 
 977static int rcar_pcie_probe(struct platform_device *pdev)
 978{
 979	struct device *dev = &pdev->dev;
 
 980	struct rcar_pcie_host *host;
 981	struct rcar_pcie *pcie;
 
 982	u32 data;
 983	int err;
 984	struct pci_host_bridge *bridge;
 985
 986	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 987	if (!bridge)
 988		return -ENOMEM;
 989
 990	host = pci_host_bridge_priv(bridge);
 991	pcie = &host->pcie;
 992	pcie->dev = dev;
 993	platform_set_drvdata(pdev, host);
 
 
 
 
 
 
 
 994
 995	pm_runtime_enable(pcie->dev);
 996	err = pm_runtime_get_sync(pcie->dev);
 997	if (err < 0) {
 998		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 999		goto err_pm_put;
1000	}
1001
1002	err = rcar_pcie_get_resources(host);
1003	if (err < 0) {
1004		dev_err(dev, "failed to request resources: %d\n", err);
1005		goto err_pm_put;
1006	}
1007
1008	err = clk_prepare_enable(host->bus_clk);
1009	if (err) {
1010		dev_err(dev, "failed to enable bus clock: %d\n", err);
1011		goto err_unmap_msi_irqs;
1012	}
1013
1014	err = rcar_pcie_parse_map_dma_ranges(host);
1015	if (err)
1016		goto err_clk_disable;
1017
1018	host->phy_init_fn = of_device_get_match_data(dev);
1019	err = host->phy_init_fn(host);
1020	if (err) {
1021		dev_err(dev, "failed to init PCIe PHY\n");
1022		goto err_clk_disable;
1023	}
1024
1025	/* Failure to get a link might just be that no cards are inserted */
1026	if (rcar_pcie_hw_init(pcie)) {
1027		dev_info(dev, "PCIe link down\n");
1028		err = -ENODEV;
1029		goto err_phy_shutdown;
1030	}
1031
1032	data = rcar_pci_read_reg(pcie, MACSR);
1033	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1034
1035	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1036		err = rcar_pcie_enable_msi(host);
1037		if (err < 0) {
1038			dev_err(dev,
1039				"failed to enable MSI support: %d\n",
1040				err);
1041			goto err_phy_shutdown;
1042		}
1043	}
1044
1045	err = rcar_pcie_enable(host);
1046	if (err)
1047		goto err_msi_teardown;
1048
1049	return 0;
1050
1051err_msi_teardown:
1052	if (IS_ENABLED(CONFIG_PCI_MSI))
1053		rcar_pcie_teardown_msi(host);
1054
1055err_phy_shutdown:
1056	if (host->phy) {
1057		phy_power_off(host->phy);
1058		phy_exit(host->phy);
1059	}
1060
1061err_clk_disable:
1062	clk_disable_unprepare(host->bus_clk);
1063
1064err_unmap_msi_irqs:
1065	irq_dispose_mapping(host->msi.irq2);
1066	irq_dispose_mapping(host->msi.irq1);
1067
1068err_pm_put:
1069	pm_runtime_put(dev);
1070	pm_runtime_disable(dev);
1071
1072	return err;
1073}
1074
1075static int rcar_pcie_resume(struct device *dev)
1076{
1077	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1078	struct rcar_pcie *pcie = &host->pcie;
1079	unsigned int data;
1080	int err;
1081
1082	err = rcar_pcie_parse_map_dma_ranges(host);
1083	if (err)
1084		return 0;
1085
1086	/* Failure to get a link might just be that no cards are inserted */
1087	err = host->phy_init_fn(host);
1088	if (err) {
1089		dev_info(dev, "PCIe link down\n");
1090		return 0;
1091	}
1092
1093	data = rcar_pci_read_reg(pcie, MACSR);
1094	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1095
1096	/* Enable MSI */
1097	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1098		struct resource res;
1099		u32 val;
1100
1101		of_address_to_resource(dev->of_node, 0, &res);
1102		rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
1103		rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
1104
1105		bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
1106		rcar_pci_write_reg(pcie, val, PCIEMSIIER);
1107	}
1108
1109	rcar_pcie_hw_enable(host);
1110
1111	return 0;
1112}
1113
1114static int rcar_pcie_resume_noirq(struct device *dev)
1115{
1116	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1117	struct rcar_pcie *pcie = &host->pcie;
1118
1119	if (rcar_pci_read_reg(pcie, PMSR) &&
1120	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1121		return 0;
1122
1123	/* Re-establish the PCIe link */
1124	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1125	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1126	return rcar_pcie_wait_for_dl(pcie);
1127}
1128
1129static const struct dev_pm_ops rcar_pcie_pm_ops = {
1130	SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1131	.resume_noirq = rcar_pcie_resume_noirq,
1132};
1133
1134static struct platform_driver rcar_pcie_driver = {
1135	.driver = {
1136		.name = "rcar-pcie",
1137		.of_match_table = rcar_pcie_of_match,
1138		.pm = &rcar_pcie_pm_ops,
1139		.suppress_bind_attrs = true,
1140	},
1141	.probe = rcar_pcie_probe,
1142};
1143
1144#ifdef CONFIG_ARM
1145static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
1146		unsigned int fsr, struct pt_regs *regs)
1147{
1148	return !fixup_exception(regs);
1149}
1150
1151static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
1152	{ .compatible = "renesas,pcie-r8a7779" },
1153	{ .compatible = "renesas,pcie-r8a7790" },
1154	{ .compatible = "renesas,pcie-r8a7791" },
1155	{ .compatible = "renesas,pcie-rcar-gen2" },
1156	{},
1157};
1158
1159static int __init rcar_pcie_init(void)
1160{
1161	if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
1162#ifdef CONFIG_ARM_LPAE
1163		hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1164				"asynchronous external abort");
1165#else
1166		hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1167				"imprecise external abort");
1168#endif
1169	}
1170
1171	return platform_driver_register(&rcar_pcie_driver);
1172}
1173device_initcall(rcar_pcie_init);
1174#else
1175builtin_platform_driver(rcar_pcie_driver);
1176#endif