Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
  16#include <linux/clk-provider.h>
  17#include <linux/delay.h>
  18#include <linux/interrupt.h>
  19#include <linux/irq.h>
  20#include <linux/irqdomain.h>
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/iopoll.h>
  24#include <linux/msi.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
 
  27#include <linux/of_platform.h>
  28#include <linux/pci.h>
  29#include <linux/phy/phy.h>
  30#include <linux/platform_device.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/regulator/consumer.h>
  33
  34#include "pcie-rcar.h"
  35
  36struct rcar_msi {
  37	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  38	struct irq_domain *domain;
  39	struct mutex map_lock;
  40	spinlock_t mask_lock;
 
  41	int irq1;
  42	int irq2;
  43};
  44
 
 
 
 
 
  45/* Structure representing the PCIe interface */
  46struct rcar_pcie_host {
  47	struct rcar_pcie	pcie;
 
  48	struct phy		*phy;
 
  49	struct clk		*bus_clk;
  50	struct			rcar_msi msi;
  51	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  52};
  53
  54static DEFINE_SPINLOCK(pmsr_lock);
  55
  56static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
  57{
  58	unsigned long flags;
  59	u32 pmsr, val;
  60	int ret = 0;
  61
  62	spin_lock_irqsave(&pmsr_lock, flags);
  63
  64	if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
  65		ret = -EINVAL;
  66		goto unlock_exit;
  67	}
  68
  69	pmsr = readl(pcie_base + PMSR);
  70
  71	/*
  72	 * Test if the PCIe controller received PM_ENTER_L1 DLLP and
  73	 * the PCIe controller is not in L1 link state. If true, apply
  74	 * fix, which will put the controller into L1 link state, from
  75	 * which it can return to L0s/L0 on its own.
  76	 */
  77	if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
  78		writel(L1IATN, pcie_base + PMCTLR);
  79		ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
  80						val & L1FAEG, 10, 1000);
  81		if (ret) {
  82			dev_warn_ratelimited(pcie_dev,
  83					     "Timeout waiting for L1 link state, ret=%d\n",
  84					     ret);
  85		}
  86		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
  87	}
  88
  89unlock_exit:
  90	spin_unlock_irqrestore(&pmsr_lock, flags);
  91	return ret;
  92}
  93
  94static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
  95{
  96	return container_of(msi, struct rcar_pcie_host, msi);
  97}
  98
  99static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
 100{
 101	unsigned int shift = BITS_PER_BYTE * (where & 3);
 102	u32 val = rcar_pci_read_reg(pcie, where & ~3);
 103
 104	return val >> shift;
 105}
 106
 107#ifdef CONFIG_ARM
 108#define __rcar_pci_rw_reg_workaround(instr)				\
 109		"	.arch armv7-a\n"				\
 110		"1:	" instr " %1, [%2]\n"				\
 111		"2:	isb\n"						\
 112		"3:	.pushsection .text.fixup,\"ax\"\n"		\
 113		"	.align	2\n"					\
 114		"4:	mov	%0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \
 115		"	b	3b\n"					\
 116		"	.popsection\n"					\
 117		"	.pushsection __ex_table,\"a\"\n"		\
 118		"	.align	3\n"					\
 119		"	.long	1b, 4b\n"				\
 120		"	.long	2b, 4b\n"				\
 121		"	.popsection\n"
 122#endif
 123
 124static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val,
 125					 unsigned int reg)
 126{
 127	int error = PCIBIOS_SUCCESSFUL;
 128#ifdef CONFIG_ARM
 129	asm volatile(
 130		__rcar_pci_rw_reg_workaround("str")
 131	: "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory");
 132#else
 133	rcar_pci_write_reg(pcie, val, reg);
 134#endif
 135	return error;
 136}
 137
 138static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val,
 139					unsigned int reg)
 140{
 141	int error = PCIBIOS_SUCCESSFUL;
 142#ifdef CONFIG_ARM
 143	asm volatile(
 144		__rcar_pci_rw_reg_workaround("ldr")
 145	: "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory");
 146
 147	if (error != PCIBIOS_SUCCESSFUL)
 148		PCI_SET_ERROR_RESPONSE(val);
 149#else
 150	*val = rcar_pci_read_reg(pcie, reg);
 151#endif
 152	return error;
 153}
 154
 155/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 156static int rcar_pcie_config_access(struct rcar_pcie_host *host,
 157		unsigned char access_type, struct pci_bus *bus,
 158		unsigned int devfn, int where, u32 *data)
 159{
 160	struct rcar_pcie *pcie = &host->pcie;
 161	unsigned int dev, func, reg, index;
 162	int ret;
 163
 164	/* Wake the bus up in case it is in L1 state. */
 165	ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
 166	if (ret) {
 167		PCI_SET_ERROR_RESPONSE(data);
 168		return PCIBIOS_SET_FAILED;
 169	}
 170
 171	dev = PCI_SLOT(devfn);
 172	func = PCI_FUNC(devfn);
 173	reg = where & ~3;
 174	index = reg / 4;
 175
 176	/*
 177	 * While each channel has its own memory-mapped extended config
 178	 * space, it's generally only accessible when in endpoint mode.
 179	 * When in root complex mode, the controller is unable to target
 180	 * itself with either type 0 or type 1 accesses, and indeed, any
 181	 * controller initiated target transfer to its own config space
 182	 * result in a completer abort.
 183	 *
 184	 * Each channel effectively only supports a single device, but as
 185	 * the same channel <-> device access works for any PCI_SLOT()
 186	 * value, we cheat a bit here and bind the controller's config
 187	 * space to devfn 0 in order to enable self-enumeration. In this
 188	 * case the regular ECAR/ECDR path is sidelined and the mangled
 189	 * config access itself is initiated as an internal bus transaction.
 190	 */
 191	if (pci_is_root_bus(bus)) {
 192		if (dev != 0)
 193			return PCIBIOS_DEVICE_NOT_FOUND;
 194
 195		if (access_type == RCAR_PCI_ACCESS_READ)
 196			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 197		else
 198			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 199
 200		return PCIBIOS_SUCCESSFUL;
 201	}
 202
 203	/* Clear errors */
 204	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 205
 206	/* Set the PIO address */
 207	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 208		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 209
 210	/* Enable the configuration access */
 211	if (pci_is_root_bus(bus->parent))
 212		rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR);
 213	else
 214		rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR);
 215
 216	/* Check for errors */
 217	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 218		return PCIBIOS_DEVICE_NOT_FOUND;
 219
 220	/* Check for master and target aborts */
 221	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 222		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 223		return PCIBIOS_DEVICE_NOT_FOUND;
 224
 225	if (access_type == RCAR_PCI_ACCESS_READ)
 226		ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR);
 227	else
 228		ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR);
 229
 230	/* Disable the configuration access */
 231	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 232
 233	return ret;
 234}
 235
 236static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 237			       int where, int size, u32 *val)
 238{
 239	struct rcar_pcie_host *host = bus->sysdata;
 240	int ret;
 241
 242	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 243				      bus, devfn, where, val);
 244	if (ret != PCIBIOS_SUCCESSFUL)
 
 245		return ret;
 
 246
 247	if (size == 1)
 248		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 249	else if (size == 2)
 250		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 251
 252	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 253		bus->number, devfn, where, size, *val);
 254
 255	return ret;
 256}
 257
 258/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 259static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 260				int where, int size, u32 val)
 261{
 262	struct rcar_pcie_host *host = bus->sysdata;
 263	unsigned int shift;
 264	u32 data;
 265	int ret;
 266
 267	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 268				      bus, devfn, where, &data);
 269	if (ret != PCIBIOS_SUCCESSFUL)
 270		return ret;
 271
 272	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 273		bus->number, devfn, where, size, val);
 274
 275	if (size == 1) {
 276		shift = BITS_PER_BYTE * (where & 3);
 277		data &= ~(0xff << shift);
 278		data |= ((val & 0xff) << shift);
 279	} else if (size == 2) {
 280		shift = BITS_PER_BYTE * (where & 2);
 281		data &= ~(0xffff << shift);
 282		data |= ((val & 0xffff) << shift);
 283	} else
 284		data = val;
 285
 286	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 287				      bus, devfn, where, &data);
 288
 289	return ret;
 290}
 291
 292static struct pci_ops rcar_pcie_ops = {
 293	.read	= rcar_pcie_read_conf,
 294	.write	= rcar_pcie_write_conf,
 295};
 296
 297static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 298{
 299	struct device *dev = pcie->dev;
 300	unsigned int timeout = 1000;
 301	u32 macsr;
 302
 303	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 304		return;
 305
 306	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 307		dev_err(dev, "Speed change already in progress\n");
 308		return;
 309	}
 310
 311	macsr = rcar_pci_read_reg(pcie, MACSR);
 312	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 313		goto done;
 314
 315	/* Set target link speed to 5.0 GT/s */
 316	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 317		   PCI_EXP_LNKSTA_CLS_5_0GB);
 318
 319	/* Set speed change reason as intentional factor */
 320	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 321
 322	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 323	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 324		rcar_pci_write_reg(pcie, macsr, MACSR);
 325
 326	/* Start link speed change */
 327	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 328
 329	while (timeout--) {
 330		macsr = rcar_pci_read_reg(pcie, MACSR);
 331		if (macsr & SPCHGFIN) {
 332			/* Clear the interrupt bits */
 333			rcar_pci_write_reg(pcie, macsr, MACSR);
 334
 335			if (macsr & SPCHGFAIL)
 336				dev_err(dev, "Speed change failed\n");
 337
 338			goto done;
 339		}
 340
 341		msleep(1);
 342	}
 343
 344	dev_err(dev, "Speed change timed out\n");
 345
 346done:
 347	dev_info(dev, "Current link speed is %s GT/s\n",
 348		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 349}
 350
 351static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 352{
 353	struct rcar_pcie *pcie = &host->pcie;
 354	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 355	struct resource_entry *win;
 356	LIST_HEAD(res);
 357	int i = 0;
 358
 359	/* Try setting 5 GT/s link speed */
 360	rcar_pcie_force_speedup(pcie);
 361
 362	/* Setup PCI resources */
 363	resource_list_for_each_entry(win, &bridge->windows) {
 364		struct resource *res = win->res;
 365
 366		if (!res->flags)
 367			continue;
 368
 369		switch (resource_type(res)) {
 370		case IORESOURCE_IO:
 371		case IORESOURCE_MEM:
 372			rcar_pcie_set_outbound(pcie, i, win);
 373			i++;
 374			break;
 375		}
 376	}
 377}
 378
 379static int rcar_pcie_enable(struct rcar_pcie_host *host)
 380{
 381	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 382
 383	rcar_pcie_hw_enable(host);
 384
 385	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 386
 387	bridge->sysdata = host;
 388	bridge->ops = &rcar_pcie_ops;
 
 
 389
 390	return pci_host_probe(bridge);
 391}
 392
 393static int phy_wait_for_ack(struct rcar_pcie *pcie)
 394{
 395	struct device *dev = pcie->dev;
 396	unsigned int timeout = 100;
 397
 398	while (timeout--) {
 399		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 400			return 0;
 401
 402		udelay(100);
 403	}
 404
 405	dev_err(dev, "Access to PCIe phy timed out\n");
 406
 407	return -ETIMEDOUT;
 408}
 409
 410static void phy_write_reg(struct rcar_pcie *pcie,
 411			  unsigned int rate, u32 addr,
 412			  unsigned int lane, u32 data)
 413{
 414	u32 phyaddr;
 415
 416	phyaddr = WRITE_CMD |
 417		((rate & 1) << RATE_POS) |
 418		((lane & 0xf) << LANE_POS) |
 419		((addr & 0xff) << ADR_POS);
 420
 421	/* Set write data */
 422	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 423	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 424
 425	/* Ignore errors as they will be dealt with if the data link is down */
 426	phy_wait_for_ack(pcie);
 427
 428	/* Clear command */
 429	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 430	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 431
 432	/* Ignore errors as they will be dealt with if the data link is down */
 433	phy_wait_for_ack(pcie);
 434}
 435
 436static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 437{
 438	int err;
 439
 440	/* Begin initialization */
 441	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 442
 443	/* Set mode */
 444	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 445
 446	err = rcar_pcie_wait_for_phyrdy(pcie);
 447	if (err)
 448		return err;
 449
 450	/*
 451	 * Initial header for port config space is type 1, set the device
 452	 * class to match. Hardware takes care of propagating the IDSETR
 453	 * settings, so there is no need to bother with a quirk.
 454	 */
 455	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
 456
 457	/*
 458	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 459	 * they aren't used, to avoid bridge being detected as broken.
 460	 */
 461	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 462	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 463
 464	/* Initialize default capabilities. */
 465	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 466	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 467		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 468	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
 469		PCI_HEADER_TYPE_BRIDGE);
 470
 471	/* Enable data link layer active state reporting */
 472	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 473		PCI_EXP_LNKCAP_DLLLARC);
 474
 475	/* Write out the physical slot number = 0 */
 476	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 477
 478	/* Set the completion timer timeout to the maximum 50ms. */
 479	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 480
 481	/* Terminate list of capabilities (Next Capability Offset=0) */
 482	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 483
 484	/* Enable MSI */
 485	if (IS_ENABLED(CONFIG_PCI_MSI))
 486		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 487
 488	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 489
 490	/* Finish initialization - establish a PCI Express link */
 491	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 492
 493	/* This will timeout if we don't have a link. */
 494	err = rcar_pcie_wait_for_dl(pcie);
 495	if (err)
 496		return err;
 497
 498	/* Enable INTx interrupts */
 499	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 500
 501	wmb();
 502
 503	return 0;
 504}
 505
 506static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 507{
 508	struct rcar_pcie *pcie = &host->pcie;
 509
 510	/* Initialize the phy */
 511	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 512	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 513	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 514	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 515	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 516	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 517	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 518	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 519	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 520	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 521	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 522	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 523
 524	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 525	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 526	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 527
 528	return 0;
 529}
 530
 531static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 532{
 533	struct rcar_pcie *pcie = &host->pcie;
 534
 535	/*
 536	 * These settings come from the R-Car Series, 2nd Generation User's
 537	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 538	 */
 539	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 540	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 541	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 542	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 543
 544	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 545	/* The following value is for DC connection, no termination resistor */
 546	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 547	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 548	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 549
 550	return 0;
 551}
 552
 553static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 554{
 555	int err;
 556
 557	err = phy_init(host->phy);
 558	if (err)
 559		return err;
 560
 561	err = phy_power_on(host->phy);
 562	if (err)
 563		phy_exit(host->phy);
 564
 565	return err;
 566}
 567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 569{
 570	struct rcar_pcie_host *host = data;
 571	struct rcar_pcie *pcie = &host->pcie;
 572	struct rcar_msi *msi = &host->msi;
 573	struct device *dev = pcie->dev;
 574	unsigned long reg;
 575
 576	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 577
 578	/* MSI & INTx share an interrupt - we only handle MSI here */
 579	if (!reg)
 580		return IRQ_NONE;
 581
 582	while (reg) {
 583		unsigned int index = find_first_bit(&reg, 32);
 584		int ret;
 
 
 
 585
 586		ret = generic_handle_domain_irq(msi->domain->parent, index);
 587		if (ret) {
 
 
 
 
 
 588			/* Unknown MSI, just clear it */
 589			dev_dbg(dev, "unexpected MSI\n");
 590			rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
 591		}
 592
 593		/* see if there's any more pending in this vector */
 594		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 595	}
 596
 597	return IRQ_HANDLED;
 598}
 599
 600static void rcar_msi_top_irq_ack(struct irq_data *d)
 601{
 602	irq_chip_ack_parent(d);
 603}
 604
 605static void rcar_msi_top_irq_mask(struct irq_data *d)
 606{
 607	pci_msi_mask_irq(d);
 608	irq_chip_mask_parent(d);
 609}
 610
 611static void rcar_msi_top_irq_unmask(struct irq_data *d)
 612{
 613	pci_msi_unmask_irq(d);
 614	irq_chip_unmask_parent(d);
 615}
 
 
 
 
 616
 617static struct irq_chip rcar_msi_top_chip = {
 618	.name		= "PCIe MSI",
 619	.irq_ack	= rcar_msi_top_irq_ack,
 620	.irq_mask	= rcar_msi_top_irq_mask,
 621	.irq_unmask	= rcar_msi_top_irq_unmask,
 622};
 623
 624static void rcar_msi_irq_ack(struct irq_data *d)
 625{
 626	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 627	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 
 628
 629	/* clear the interrupt */
 630	rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
 631}
 632
 633static void rcar_msi_irq_mask(struct irq_data *d)
 634{
 635	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 636	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 637	unsigned long flags;
 638	u32 value;
 639
 640	spin_lock_irqsave(&msi->mask_lock, flags);
 641	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 642	value &= ~BIT(d->hwirq);
 643	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 644	spin_unlock_irqrestore(&msi->mask_lock, flags);
 645}
 646
 647static void rcar_msi_irq_unmask(struct irq_data *d)
 648{
 649	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 650	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 651	unsigned long flags;
 652	u32 value;
 653
 654	spin_lock_irqsave(&msi->mask_lock, flags);
 655	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 656	value |= BIT(d->hwirq);
 657	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 658	spin_unlock_irqrestore(&msi->mask_lock, flags);
 659}
 660
 661static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 
 662{
 663	struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
 664	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 
 
 
 
 
 
 
 665
 666	msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 667	msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 668	msg->data = data->hwirq;
 669}
 670
 671static struct irq_chip rcar_msi_bottom_chip = {
 672	.name			= "R-Car MSI",
 673	.irq_ack		= rcar_msi_irq_ack,
 674	.irq_mask		= rcar_msi_irq_mask,
 675	.irq_unmask		= rcar_msi_irq_unmask,
 676	.irq_compose_msi_msg	= rcar_compose_msi_msg,
 677};
 678
 679static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
 680				  unsigned int nr_irqs, void *args)
 681{
 682	struct rcar_msi *msi = domain->host_data;
 683	unsigned int i;
 684	int hwirq;
 685
 686	mutex_lock(&msi->map_lock);
 
 
 687
 688	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 
 
 
 
 
 
 
 
 
 
 
 
 689
 690	mutex_unlock(&msi->map_lock);
 
 691
 692	if (hwirq < 0)
 693		return -ENOSPC;
 
 694
 695	for (i = 0; i < nr_irqs; i++)
 696		irq_domain_set_info(domain, virq + i, hwirq + i,
 697				    &rcar_msi_bottom_chip, domain->host_data,
 698				    handle_edge_irq, NULL, NULL);
 699
 700	return 0;
 701}
 702
 703static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
 704				  unsigned int nr_irqs)
 705{
 706	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 707	struct rcar_msi *msi = domain->host_data;
 708
 709	mutex_lock(&msi->map_lock);
 710
 711	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
 712
 713	mutex_unlock(&msi->map_lock);
 714}
 715
 716static const struct irq_domain_ops rcar_msi_domain_ops = {
 717	.alloc	= rcar_msi_domain_alloc,
 718	.free	= rcar_msi_domain_free,
 
 
 
 719};
 720
 721static struct msi_domain_info rcar_msi_info = {
 722	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 723		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
 724	.chip	= &rcar_msi_top_chip,
 
 
 
 
 
 
 
 725};
 726
 727static int rcar_allocate_domains(struct rcar_msi *msi)
 728{
 729	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 730	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
 731	struct irq_domain *parent;
 732
 733	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
 734					  &rcar_msi_domain_ops, msi);
 735	if (!parent) {
 736		dev_err(pcie->dev, "failed to create IRQ domain\n");
 737		return -ENOMEM;
 738	}
 739	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
 740
 741	msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
 742	if (!msi->domain) {
 743		dev_err(pcie->dev, "failed to create MSI domain\n");
 744		irq_domain_remove(parent);
 745		return -ENOMEM;
 746	}
 747
 748	return 0;
 749}
 750
 751static void rcar_free_domains(struct rcar_msi *msi)
 752{
 753	struct irq_domain *parent = msi->domain->parent;
 
 
 754
 755	irq_domain_remove(msi->domain);
 756	irq_domain_remove(parent);
 
 
 
 
 
 
 757}
 758
 759static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 760{
 761	struct rcar_pcie *pcie = &host->pcie;
 762	struct device *dev = pcie->dev;
 763	struct rcar_msi *msi = &host->msi;
 764	struct resource res;
 765	int err;
 766
 767	mutex_init(&msi->map_lock);
 768	spin_lock_init(&msi->mask_lock);
 769
 770	err = of_address_to_resource(dev->of_node, 0, &res);
 771	if (err)
 772		return err;
 
 773
 774	err = rcar_allocate_domains(msi);
 775	if (err)
 776		return err;
 
 
 
 
 
 
 777
 778	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 779	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 780			       IRQF_SHARED | IRQF_NO_THREAD,
 781			       rcar_msi_bottom_chip.name, host);
 782	if (err < 0) {
 783		dev_err(dev, "failed to request IRQ: %d\n", err);
 784		goto err;
 785	}
 786
 787	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 788			       IRQF_SHARED | IRQF_NO_THREAD,
 789			       rcar_msi_bottom_chip.name, host);
 790	if (err < 0) {
 791		dev_err(dev, "failed to request IRQ: %d\n", err);
 792		goto err;
 793	}
 794
 795	/* disable all MSIs */
 796	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 797
 798	/*
 799	 * Setup MSI data target using RC base address, which is guaranteed
 800	 * to be in the low 32bit range on any R-Car HW.
 801	 */
 802	rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 803	rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 804
 805	return 0;
 806
 807err:
 808	rcar_free_domains(msi);
 809	return err;
 810}
 811
 812static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 813{
 814	struct rcar_pcie *pcie = &host->pcie;
 
 815
 816	/* Disable all MSI interrupts */
 817	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 818
 819	/* Disable address decoding of the MSI interrupt, MSIFE */
 820	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 821
 822	rcar_free_domains(&host->msi);
 
 
 823}
 824
 825static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 826{
 827	struct rcar_pcie *pcie = &host->pcie;
 828	struct device *dev = pcie->dev;
 829	struct resource res;
 830	int err, i;
 831
 832	host->phy = devm_phy_optional_get(dev, "pcie");
 833	if (IS_ERR(host->phy))
 834		return PTR_ERR(host->phy);
 835
 836	err = of_address_to_resource(dev->of_node, 0, &res);
 837	if (err)
 838		return err;
 839
 840	pcie->base = devm_ioremap_resource(dev, &res);
 841	if (IS_ERR(pcie->base))
 842		return PTR_ERR(pcie->base);
 843
 844	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 845	if (IS_ERR(host->bus_clk)) {
 846		dev_err(dev, "cannot get pcie bus clock\n");
 847		return PTR_ERR(host->bus_clk);
 848	}
 849
 850	i = irq_of_parse_and_map(dev->of_node, 0);
 851	if (!i) {
 852		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 853		err = -ENOENT;
 854		goto err_irq1;
 855	}
 856	host->msi.irq1 = i;
 857
 858	i = irq_of_parse_and_map(dev->of_node, 1);
 859	if (!i) {
 860		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 861		err = -ENOENT;
 862		goto err_irq2;
 863	}
 864	host->msi.irq2 = i;
 865
 866	return 0;
 867
 868err_irq2:
 869	irq_dispose_mapping(host->msi.irq1);
 870err_irq1:
 871	return err;
 872}
 873
 874static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 875				    struct resource_entry *entry,
 876				    int *index)
 877{
 878	u64 restype = entry->res->flags;
 879	u64 cpu_addr = entry->res->start;
 880	u64 cpu_end = entry->res->end;
 881	u64 pci_addr = entry->res->start - entry->offset;
 882	u32 flags = LAM_64BIT | LAR_ENABLE;
 883	u64 mask;
 884	u64 size = resource_size(entry->res);
 885	int idx = *index;
 886
 887	if (restype & IORESOURCE_PREFETCH)
 888		flags |= LAM_PREFETCH;
 889
 890	while (cpu_addr < cpu_end) {
 891		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 892			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 893			return -EINVAL;
 894		}
 895		/*
 896		 * If the size of the range is larger than the alignment of
 897		 * the start address, we have to use multiple entries to
 898		 * perform the mapping.
 899		 */
 900		if (cpu_addr > 0) {
 901			unsigned long nr_zeros = __ffs64(cpu_addr);
 902			u64 alignment = 1ULL << nr_zeros;
 903
 904			size = min(size, alignment);
 905		}
 906		/* Hardware supports max 4GiB inbound region */
 907		size = min(size, 1ULL << 32);
 908
 909		mask = roundup_pow_of_two(size) - 1;
 910		mask &= ~0xf;
 911
 912		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 913				      lower_32_bits(mask) | flags, idx, true);
 914
 915		pci_addr += size;
 916		cpu_addr += size;
 917		idx += 2;
 918	}
 919	*index = idx;
 920
 921	return 0;
 922}
 923
 924static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 925{
 926	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 927	struct resource_entry *entry;
 928	int index = 0, err = 0;
 929
 930	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 931		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 932		if (err)
 933			break;
 934	}
 935
 936	return err;
 937}
 938
 939static const struct of_device_id rcar_pcie_of_match[] = {
 940	{ .compatible = "renesas,pcie-r8a7779",
 941	  .data = rcar_pcie_phy_init_h1 },
 942	{ .compatible = "renesas,pcie-r8a7790",
 943	  .data = rcar_pcie_phy_init_gen2 },
 944	{ .compatible = "renesas,pcie-r8a7791",
 945	  .data = rcar_pcie_phy_init_gen2 },
 946	{ .compatible = "renesas,pcie-rcar-gen2",
 947	  .data = rcar_pcie_phy_init_gen2 },
 948	{ .compatible = "renesas,pcie-r8a7795",
 949	  .data = rcar_pcie_phy_init_gen3 },
 950	{ .compatible = "renesas,pcie-rcar-gen3",
 951	  .data = rcar_pcie_phy_init_gen3 },
 952	{},
 953};
 954
 955/* Design note 346 from Linear Technology says order is not important. */
 956static const char * const rcar_pcie_supplies[] = {
 957	"vpcie1v5",
 958	"vpcie3v3",
 959	"vpcie12v",
 960};
 961
 962static int rcar_pcie_probe(struct platform_device *pdev)
 963{
 964	struct device *dev = &pdev->dev;
 965	struct pci_host_bridge *bridge;
 966	struct rcar_pcie_host *host;
 967	struct rcar_pcie *pcie;
 968	unsigned int i;
 969	u32 data;
 970	int err;
 
 971
 972	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 973	if (!bridge)
 974		return -ENOMEM;
 975
 976	host = pci_host_bridge_priv(bridge);
 977	pcie = &host->pcie;
 978	pcie->dev = dev;
 979	platform_set_drvdata(pdev, host);
 980
 981	for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
 982		err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
 983		if (err < 0 && err != -ENODEV)
 984			return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
 985					     rcar_pcie_supplies[i]);
 986	}
 987
 988	pm_runtime_enable(pcie->dev);
 989	err = pm_runtime_get_sync(pcie->dev);
 990	if (err < 0) {
 991		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 992		goto err_pm_put;
 993	}
 994
 995	err = rcar_pcie_get_resources(host);
 996	if (err < 0) {
 997		dev_err(dev, "failed to request resources: %d\n", err);
 998		goto err_pm_put;
 999	}
1000
1001	err = clk_prepare_enable(host->bus_clk);
1002	if (err) {
1003		dev_err(dev, "failed to enable bus clock: %d\n", err);
1004		goto err_unmap_msi_irqs;
1005	}
1006
1007	err = rcar_pcie_parse_map_dma_ranges(host);
1008	if (err)
1009		goto err_clk_disable;
1010
1011	host->phy_init_fn = of_device_get_match_data(dev);
1012	err = host->phy_init_fn(host);
1013	if (err) {
1014		dev_err(dev, "failed to init PCIe PHY\n");
1015		goto err_clk_disable;
1016	}
1017
1018	/* Failure to get a link might just be that no cards are inserted */
1019	if (rcar_pcie_hw_init(pcie)) {
1020		dev_info(dev, "PCIe link down\n");
1021		err = -ENODEV;
1022		goto err_phy_shutdown;
1023	}
1024
1025	data = rcar_pci_read_reg(pcie, MACSR);
1026	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1027
1028	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1029		err = rcar_pcie_enable_msi(host);
1030		if (err < 0) {
1031			dev_err(dev,
1032				"failed to enable MSI support: %d\n",
1033				err);
1034			goto err_phy_shutdown;
1035		}
1036	}
1037
1038	err = rcar_pcie_enable(host);
1039	if (err)
1040		goto err_msi_teardown;
1041
1042	return 0;
1043
1044err_msi_teardown:
1045	if (IS_ENABLED(CONFIG_PCI_MSI))
1046		rcar_pcie_teardown_msi(host);
1047
1048err_phy_shutdown:
1049	if (host->phy) {
1050		phy_power_off(host->phy);
1051		phy_exit(host->phy);
1052	}
1053
1054err_clk_disable:
1055	clk_disable_unprepare(host->bus_clk);
1056
1057err_unmap_msi_irqs:
1058	irq_dispose_mapping(host->msi.irq2);
1059	irq_dispose_mapping(host->msi.irq1);
1060
1061err_pm_put:
1062	pm_runtime_put(dev);
1063	pm_runtime_disable(dev);
1064
1065	return err;
1066}
1067
1068static int rcar_pcie_resume(struct device *dev)
1069{
1070	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1071	struct rcar_pcie *pcie = &host->pcie;
1072	unsigned int data;
1073	int err;
1074
1075	err = rcar_pcie_parse_map_dma_ranges(host);
1076	if (err)
1077		return 0;
1078
1079	/* Failure to get a link might just be that no cards are inserted */
1080	err = host->phy_init_fn(host);
1081	if (err) {
1082		dev_info(dev, "PCIe link down\n");
1083		return 0;
1084	}
1085
1086	data = rcar_pci_read_reg(pcie, MACSR);
1087	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1088
1089	/* Enable MSI */
1090	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1091		struct resource res;
1092		u32 val;
1093
1094		of_address_to_resource(dev->of_node, 0, &res);
1095		rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
1096		rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
1097
1098		bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
1099		rcar_pci_write_reg(pcie, val, PCIEMSIIER);
1100	}
1101
1102	rcar_pcie_hw_enable(host);
1103
1104	return 0;
1105}
1106
1107static int rcar_pcie_resume_noirq(struct device *dev)
1108{
1109	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1110	struct rcar_pcie *pcie = &host->pcie;
1111
1112	if (rcar_pci_read_reg(pcie, PMSR) &&
1113	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1114		return 0;
1115
1116	/* Re-establish the PCIe link */
1117	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1118	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1119	return rcar_pcie_wait_for_dl(pcie);
1120}
1121
1122static const struct dev_pm_ops rcar_pcie_pm_ops = {
1123	SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1124	.resume_noirq = rcar_pcie_resume_noirq,
1125};
1126
1127static struct platform_driver rcar_pcie_driver = {
1128	.driver = {
1129		.name = "rcar-pcie",
1130		.of_match_table = rcar_pcie_of_match,
1131		.pm = &rcar_pcie_pm_ops,
1132		.suppress_bind_attrs = true,
1133	},
1134	.probe = rcar_pcie_probe,
1135};
1136
1137#ifdef CONFIG_ARM
1138static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
1139		unsigned int fsr, struct pt_regs *regs)
1140{
1141	return !fixup_exception(regs);
1142}
1143
1144static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
1145	{ .compatible = "renesas,pcie-r8a7779" },
1146	{ .compatible = "renesas,pcie-r8a7790" },
1147	{ .compatible = "renesas,pcie-r8a7791" },
1148	{ .compatible = "renesas,pcie-rcar-gen2" },
1149	{},
1150};
1151
1152static int __init rcar_pcie_init(void)
1153{
1154	if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
1155#ifdef CONFIG_ARM_LPAE
1156		hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1157				"asynchronous external abort");
1158#else
1159		hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1160				"imprecise external abort");
1161#endif
1162	}
1163
1164	return platform_driver_register(&rcar_pcie_driver);
1165}
1166device_initcall(rcar_pcie_init);
1167#else
1168builtin_platform_driver(rcar_pcie_driver);
1169#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
 
  16#include <linux/delay.h>
  17#include <linux/interrupt.h>
  18#include <linux/irq.h>
  19#include <linux/irqdomain.h>
  20#include <linux/kernel.h>
  21#include <linux/init.h>
 
  22#include <linux/msi.h>
  23#include <linux/of_address.h>
  24#include <linux/of_irq.h>
  25#include <linux/of_pci.h>
  26#include <linux/of_platform.h>
  27#include <linux/pci.h>
  28#include <linux/phy/phy.h>
  29#include <linux/platform_device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/slab.h>
  32
  33#include "pcie-rcar.h"
  34
  35struct rcar_msi {
  36	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  37	struct irq_domain *domain;
  38	struct msi_controller chip;
  39	unsigned long pages;
  40	struct mutex lock;
  41	int irq1;
  42	int irq2;
  43};
  44
  45static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
  46{
  47	return container_of(chip, struct rcar_msi, chip);
  48}
  49
  50/* Structure representing the PCIe interface */
  51struct rcar_pcie_host {
  52	struct rcar_pcie	pcie;
  53	struct device		*dev;
  54	struct phy		*phy;
  55	void __iomem		*base;
  56	struct clk		*bus_clk;
  57	struct			rcar_msi msi;
  58	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  59};
  60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  61static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  62{
  63	unsigned int shift = BITS_PER_BYTE * (where & 3);
  64	u32 val = rcar_pci_read_reg(pcie, where & ~3);
  65
  66	return val >> shift;
  67}
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  70static int rcar_pcie_config_access(struct rcar_pcie_host *host,
  71		unsigned char access_type, struct pci_bus *bus,
  72		unsigned int devfn, int where, u32 *data)
  73{
  74	struct rcar_pcie *pcie = &host->pcie;
  75	unsigned int dev, func, reg, index;
 
 
 
 
 
 
 
 
  76
  77	dev = PCI_SLOT(devfn);
  78	func = PCI_FUNC(devfn);
  79	reg = where & ~3;
  80	index = reg / 4;
  81
  82	/*
  83	 * While each channel has its own memory-mapped extended config
  84	 * space, it's generally only accessible when in endpoint mode.
  85	 * When in root complex mode, the controller is unable to target
  86	 * itself with either type 0 or type 1 accesses, and indeed, any
  87	 * controller initiated target transfer to its own config space
  88	 * result in a completer abort.
  89	 *
  90	 * Each channel effectively only supports a single device, but as
  91	 * the same channel <-> device access works for any PCI_SLOT()
  92	 * value, we cheat a bit here and bind the controller's config
  93	 * space to devfn 0 in order to enable self-enumeration. In this
  94	 * case the regular ECAR/ECDR path is sidelined and the mangled
  95	 * config access itself is initiated as an internal bus transaction.
  96	 */
  97	if (pci_is_root_bus(bus)) {
  98		if (dev != 0)
  99			return PCIBIOS_DEVICE_NOT_FOUND;
 100
 101		if (access_type == RCAR_PCI_ACCESS_READ)
 102			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 103		else
 104			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 105
 106		return PCIBIOS_SUCCESSFUL;
 107	}
 108
 109	/* Clear errors */
 110	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 111
 112	/* Set the PIO address */
 113	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 114		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 115
 116	/* Enable the configuration access */
 117	if (pci_is_root_bus(bus->parent))
 118		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
 119	else
 120		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 121
 122	/* Check for errors */
 123	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 124		return PCIBIOS_DEVICE_NOT_FOUND;
 125
 126	/* Check for master and target aborts */
 127	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 128		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 129		return PCIBIOS_DEVICE_NOT_FOUND;
 130
 131	if (access_type == RCAR_PCI_ACCESS_READ)
 132		*data = rcar_pci_read_reg(pcie, PCIECDR);
 133	else
 134		rcar_pci_write_reg(pcie, *data, PCIECDR);
 135
 136	/* Disable the configuration access */
 137	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 138
 139	return PCIBIOS_SUCCESSFUL;
 140}
 141
 142static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 143			       int where, int size, u32 *val)
 144{
 145	struct rcar_pcie_host *host = bus->sysdata;
 146	int ret;
 147
 148	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 149				      bus, devfn, where, val);
 150	if (ret != PCIBIOS_SUCCESSFUL) {
 151		*val = 0xffffffff;
 152		return ret;
 153	}
 154
 155	if (size == 1)
 156		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 157	else if (size == 2)
 158		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 159
 160	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 161		bus->number, devfn, where, size, *val);
 162
 163	return ret;
 164}
 165
 166/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 167static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 168				int where, int size, u32 val)
 169{
 170	struct rcar_pcie_host *host = bus->sysdata;
 171	unsigned int shift;
 172	u32 data;
 173	int ret;
 174
 175	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 176				      bus, devfn, where, &data);
 177	if (ret != PCIBIOS_SUCCESSFUL)
 178		return ret;
 179
 180	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 181		bus->number, devfn, where, size, val);
 182
 183	if (size == 1) {
 184		shift = BITS_PER_BYTE * (where & 3);
 185		data &= ~(0xff << shift);
 186		data |= ((val & 0xff) << shift);
 187	} else if (size == 2) {
 188		shift = BITS_PER_BYTE * (where & 2);
 189		data &= ~(0xffff << shift);
 190		data |= ((val & 0xffff) << shift);
 191	} else
 192		data = val;
 193
 194	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 195				      bus, devfn, where, &data);
 196
 197	return ret;
 198}
 199
 200static struct pci_ops rcar_pcie_ops = {
 201	.read	= rcar_pcie_read_conf,
 202	.write	= rcar_pcie_write_conf,
 203};
 204
 205static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 206{
 207	struct device *dev = pcie->dev;
 208	unsigned int timeout = 1000;
 209	u32 macsr;
 210
 211	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 212		return;
 213
 214	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 215		dev_err(dev, "Speed change already in progress\n");
 216		return;
 217	}
 218
 219	macsr = rcar_pci_read_reg(pcie, MACSR);
 220	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 221		goto done;
 222
 223	/* Set target link speed to 5.0 GT/s */
 224	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 225		   PCI_EXP_LNKSTA_CLS_5_0GB);
 226
 227	/* Set speed change reason as intentional factor */
 228	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 229
 230	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 231	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 232		rcar_pci_write_reg(pcie, macsr, MACSR);
 233
 234	/* Start link speed change */
 235	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 236
 237	while (timeout--) {
 238		macsr = rcar_pci_read_reg(pcie, MACSR);
 239		if (macsr & SPCHGFIN) {
 240			/* Clear the interrupt bits */
 241			rcar_pci_write_reg(pcie, macsr, MACSR);
 242
 243			if (macsr & SPCHGFAIL)
 244				dev_err(dev, "Speed change failed\n");
 245
 246			goto done;
 247		}
 248
 249		msleep(1);
 250	}
 251
 252	dev_err(dev, "Speed change timed out\n");
 253
 254done:
 255	dev_info(dev, "Current link speed is %s GT/s\n",
 256		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 257}
 258
 259static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 260{
 261	struct rcar_pcie *pcie = &host->pcie;
 262	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 263	struct resource_entry *win;
 264	LIST_HEAD(res);
 265	int i = 0;
 266
 267	/* Try setting 5 GT/s link speed */
 268	rcar_pcie_force_speedup(pcie);
 269
 270	/* Setup PCI resources */
 271	resource_list_for_each_entry(win, &bridge->windows) {
 272		struct resource *res = win->res;
 273
 274		if (!res->flags)
 275			continue;
 276
 277		switch (resource_type(res)) {
 278		case IORESOURCE_IO:
 279		case IORESOURCE_MEM:
 280			rcar_pcie_set_outbound(pcie, i, win);
 281			i++;
 282			break;
 283		}
 284	}
 285}
 286
 287static int rcar_pcie_enable(struct rcar_pcie_host *host)
 288{
 289	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 290
 291	rcar_pcie_hw_enable(host);
 292
 293	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 294
 295	bridge->sysdata = host;
 296	bridge->ops = &rcar_pcie_ops;
 297	if (IS_ENABLED(CONFIG_PCI_MSI))
 298		bridge->msi = &host->msi.chip;
 299
 300	return pci_host_probe(bridge);
 301}
 302
 303static int phy_wait_for_ack(struct rcar_pcie *pcie)
 304{
 305	struct device *dev = pcie->dev;
 306	unsigned int timeout = 100;
 307
 308	while (timeout--) {
 309		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 310			return 0;
 311
 312		udelay(100);
 313	}
 314
 315	dev_err(dev, "Access to PCIe phy timed out\n");
 316
 317	return -ETIMEDOUT;
 318}
 319
 320static void phy_write_reg(struct rcar_pcie *pcie,
 321			  unsigned int rate, u32 addr,
 322			  unsigned int lane, u32 data)
 323{
 324	u32 phyaddr;
 325
 326	phyaddr = WRITE_CMD |
 327		((rate & 1) << RATE_POS) |
 328		((lane & 0xf) << LANE_POS) |
 329		((addr & 0xff) << ADR_POS);
 330
 331	/* Set write data */
 332	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 333	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 334
 335	/* Ignore errors as they will be dealt with if the data link is down */
 336	phy_wait_for_ack(pcie);
 337
 338	/* Clear command */
 339	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 340	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 341
 342	/* Ignore errors as they will be dealt with if the data link is down */
 343	phy_wait_for_ack(pcie);
 344}
 345
 346static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 347{
 348	int err;
 349
 350	/* Begin initialization */
 351	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 352
 353	/* Set mode */
 354	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 355
 356	err = rcar_pcie_wait_for_phyrdy(pcie);
 357	if (err)
 358		return err;
 359
 360	/*
 361	 * Initial header for port config space is type 1, set the device
 362	 * class to match. Hardware takes care of propagating the IDSETR
 363	 * settings, so there is no need to bother with a quirk.
 364	 */
 365	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
 366
 367	/*
 368	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 369	 * they aren't used, to avoid bridge being detected as broken.
 370	 */
 371	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 372	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 373
 374	/* Initialize default capabilities. */
 375	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 376	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 377		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 378	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
 379		PCI_HEADER_TYPE_BRIDGE);
 380
 381	/* Enable data link layer active state reporting */
 382	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 383		PCI_EXP_LNKCAP_DLLLARC);
 384
 385	/* Write out the physical slot number = 0 */
 386	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 387
 388	/* Set the completion timer timeout to the maximum 50ms. */
 389	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 390
 391	/* Terminate list of capabilities (Next Capability Offset=0) */
 392	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 393
 394	/* Enable MSI */
 395	if (IS_ENABLED(CONFIG_PCI_MSI))
 396		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 397
 398	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 399
 400	/* Finish initialization - establish a PCI Express link */
 401	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 402
 403	/* This will timeout if we don't have a link. */
 404	err = rcar_pcie_wait_for_dl(pcie);
 405	if (err)
 406		return err;
 407
 408	/* Enable INTx interrupts */
 409	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 410
 411	wmb();
 412
 413	return 0;
 414}
 415
 416static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 417{
 418	struct rcar_pcie *pcie = &host->pcie;
 419
 420	/* Initialize the phy */
 421	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 422	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 423	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 424	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 425	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 426	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 427	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 428	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 429	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 430	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 431	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 432	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 433
 434	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 435	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 436	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 437
 438	return 0;
 439}
 440
 441static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 442{
 443	struct rcar_pcie *pcie = &host->pcie;
 444
 445	/*
 446	 * These settings come from the R-Car Series, 2nd Generation User's
 447	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 448	 */
 449	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 450	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 451	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 452	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 453
 454	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 455	/* The following value is for DC connection, no termination resistor */
 456	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 457	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 458	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 459
 460	return 0;
 461}
 462
 463static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 464{
 465	int err;
 466
 467	err = phy_init(host->phy);
 468	if (err)
 469		return err;
 470
 471	err = phy_power_on(host->phy);
 472	if (err)
 473		phy_exit(host->phy);
 474
 475	return err;
 476}
 477
 478static int rcar_msi_alloc(struct rcar_msi *chip)
 479{
 480	int msi;
 481
 482	mutex_lock(&chip->lock);
 483
 484	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
 485	if (msi < INT_PCI_MSI_NR)
 486		set_bit(msi, chip->used);
 487	else
 488		msi = -ENOSPC;
 489
 490	mutex_unlock(&chip->lock);
 491
 492	return msi;
 493}
 494
 495static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
 496{
 497	int msi;
 498
 499	mutex_lock(&chip->lock);
 500	msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
 501				      order_base_2(no_irqs));
 502	mutex_unlock(&chip->lock);
 503
 504	return msi;
 505}
 506
 507static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
 508{
 509	mutex_lock(&chip->lock);
 510	clear_bit(irq, chip->used);
 511	mutex_unlock(&chip->lock);
 512}
 513
 514static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 515{
 516	struct rcar_pcie_host *host = data;
 517	struct rcar_pcie *pcie = &host->pcie;
 518	struct rcar_msi *msi = &host->msi;
 519	struct device *dev = pcie->dev;
 520	unsigned long reg;
 521
 522	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 523
 524	/* MSI & INTx share an interrupt - we only handle MSI here */
 525	if (!reg)
 526		return IRQ_NONE;
 527
 528	while (reg) {
 529		unsigned int index = find_first_bit(&reg, 32);
 530		unsigned int msi_irq;
 531
 532		/* clear the interrupt */
 533		rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
 534
 535		msi_irq = irq_find_mapping(msi->domain, index);
 536		if (msi_irq) {
 537			if (test_bit(index, msi->used))
 538				generic_handle_irq(msi_irq);
 539			else
 540				dev_info(dev, "unhandled MSI\n");
 541		} else {
 542			/* Unknown MSI, just clear it */
 543			dev_dbg(dev, "unexpected MSI\n");
 
 544		}
 545
 546		/* see if there's any more pending in this vector */
 547		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 548	}
 549
 550	return IRQ_HANDLED;
 551}
 552
 553static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
 554			      struct msi_desc *desc)
 
 
 
 
 
 
 
 
 
 
 555{
 556	struct rcar_msi *msi = to_rcar_msi(chip);
 557	struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
 558						   msi.chip);
 559	struct rcar_pcie *pcie = &host->pcie;
 560	struct msi_msg msg;
 561	unsigned int irq;
 562	int hwirq;
 563
 564	hwirq = rcar_msi_alloc(msi);
 565	if (hwirq < 0)
 566		return hwirq;
 
 
 
 567
 568	irq = irq_find_mapping(msi->domain, hwirq);
 569	if (!irq) {
 570		rcar_msi_free(msi, hwirq);
 571		return -EINVAL;
 572	}
 573
 574	irq_set_msi_desc(irq, desc);
 
 
 575
 576	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 577	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 578	msg.data = hwirq;
 
 
 
 579
 580	pci_write_msi_msg(irq, &msg);
 
 
 
 
 
 
 
 
 
 
 
 
 581
 582	return 0;
 
 
 
 
 583}
 584
 585static int rcar_msi_setup_irqs(struct msi_controller *chip,
 586			       struct pci_dev *pdev, int nvec, int type)
 587{
 588	struct rcar_msi *msi = to_rcar_msi(chip);
 589	struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
 590						   msi.chip);
 591	struct rcar_pcie *pcie = &host->pcie;
 592	struct msi_desc *desc;
 593	struct msi_msg msg;
 594	unsigned int irq;
 595	int hwirq;
 596	int i;
 597
 598	/* MSI-X interrupts are not supported */
 599	if (type == PCI_CAP_ID_MSIX)
 600		return -EINVAL;
 
 601
 602	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
 603	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
 
 
 
 
 
 604
 605	hwirq = rcar_msi_alloc_region(msi, nvec);
 606	if (hwirq < 0)
 607		return -ENOSPC;
 
 
 
 608
 609	irq = irq_find_mapping(msi->domain, hwirq);
 610	if (!irq)
 611		return -ENOSPC;
 612
 613	for (i = 0; i < nvec; i++) {
 614		/*
 615		 * irq_create_mapping() called from rcar_pcie_probe() pre-
 616		 * allocates descs,  so there is no need to allocate descs here.
 617		 * We can therefore assume that if irq_find_mapping() above
 618		 * returns non-zero, then the descs are also successfully
 619		 * allocated.
 620		 */
 621		if (irq_set_msi_desc_off(irq, i, desc)) {
 622			/* TODO: clear */
 623			return -EINVAL;
 624		}
 625	}
 626
 627	desc->nvec_used = nvec;
 628	desc->msi_attrib.multiple = order_base_2(nvec);
 629
 630	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 631	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 632	msg.data = hwirq;
 633
 634	pci_write_msi_msg(irq, &msg);
 
 
 
 635
 636	return 0;
 637}
 638
 639static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
 
 640{
 641	struct rcar_msi *msi = to_rcar_msi(chip);
 642	struct irq_data *d = irq_get_irq_data(irq);
 
 
 
 
 643
 644	rcar_msi_free(msi, d->hwirq);
 645}
 646
 647static struct irq_chip rcar_msi_irq_chip = {
 648	.name = "R-Car PCIe MSI",
 649	.irq_enable = pci_msi_unmask_irq,
 650	.irq_disable = pci_msi_mask_irq,
 651	.irq_mask = pci_msi_mask_irq,
 652	.irq_unmask = pci_msi_unmask_irq,
 653};
 654
 655static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
 656			irq_hw_number_t hwirq)
 657{
 658	irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
 659	irq_set_chip_data(irq, domain->host_data);
 660
 661	return 0;
 662}
 663
 664static const struct irq_domain_ops msi_domain_ops = {
 665	.map = rcar_msi_map,
 666};
 667
 668static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
 669{
 670	struct rcar_msi *msi = &host->msi;
 671	int i, irq;
 
 
 
 
 
 
 
 
 
 672
 673	for (i = 0; i < INT_PCI_MSI_NR; i++) {
 674		irq = irq_find_mapping(msi->domain, i);
 675		if (irq > 0)
 676			irq_dispose_mapping(irq);
 
 677	}
 678
 679	irq_domain_remove(msi->domain);
 680}
 681
 682static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
 683{
 684	struct rcar_pcie *pcie = &host->pcie;
 685	struct rcar_msi *msi = &host->msi;
 686	unsigned long base;
 687
 688	/* setup MSI data target */
 689	base = virt_to_phys((void *)msi->pages);
 690
 691	rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
 692	rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
 693
 694	/* enable all MSI interrupts */
 695	rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
 696}
 697
 698static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 699{
 700	struct rcar_pcie *pcie = &host->pcie;
 701	struct device *dev = pcie->dev;
 702	struct rcar_msi *msi = &host->msi;
 703	int err, i;
 
 704
 705	mutex_init(&msi->lock);
 
 706
 707	msi->chip.dev = dev;
 708	msi->chip.setup_irq = rcar_msi_setup_irq;
 709	msi->chip.setup_irqs = rcar_msi_setup_irqs;
 710	msi->chip.teardown_irq = rcar_msi_teardown_irq;
 711
 712	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
 713					    &msi_domain_ops, &msi->chip);
 714	if (!msi->domain) {
 715		dev_err(dev, "failed to create IRQ domain\n");
 716		return -ENOMEM;
 717	}
 718
 719	for (i = 0; i < INT_PCI_MSI_NR; i++)
 720		irq_create_mapping(msi->domain, i);
 721
 722	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 723	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 724			       IRQF_SHARED | IRQF_NO_THREAD,
 725			       rcar_msi_irq_chip.name, host);
 726	if (err < 0) {
 727		dev_err(dev, "failed to request IRQ: %d\n", err);
 728		goto err;
 729	}
 730
 731	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 732			       IRQF_SHARED | IRQF_NO_THREAD,
 733			       rcar_msi_irq_chip.name, host);
 734	if (err < 0) {
 735		dev_err(dev, "failed to request IRQ: %d\n", err);
 736		goto err;
 737	}
 738
 739	/* setup MSI data target */
 740	msi->pages = __get_free_pages(GFP_KERNEL, 0);
 741	rcar_pcie_hw_enable_msi(host);
 
 
 
 
 
 
 742
 743	return 0;
 744
 745err:
 746	rcar_pcie_unmap_msi(host);
 747	return err;
 748}
 749
 750static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 751{
 752	struct rcar_pcie *pcie = &host->pcie;
 753	struct rcar_msi *msi = &host->msi;
 754
 755	/* Disable all MSI interrupts */
 756	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 757
 758	/* Disable address decoding of the MSI interrupt, MSIFE */
 759	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 760
 761	free_pages(msi->pages, 0);
 762
 763	rcar_pcie_unmap_msi(host);
 764}
 765
 766static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 767{
 768	struct rcar_pcie *pcie = &host->pcie;
 769	struct device *dev = pcie->dev;
 770	struct resource res;
 771	int err, i;
 772
 773	host->phy = devm_phy_optional_get(dev, "pcie");
 774	if (IS_ERR(host->phy))
 775		return PTR_ERR(host->phy);
 776
 777	err = of_address_to_resource(dev->of_node, 0, &res);
 778	if (err)
 779		return err;
 780
 781	pcie->base = devm_ioremap_resource(dev, &res);
 782	if (IS_ERR(pcie->base))
 783		return PTR_ERR(pcie->base);
 784
 785	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 786	if (IS_ERR(host->bus_clk)) {
 787		dev_err(dev, "cannot get pcie bus clock\n");
 788		return PTR_ERR(host->bus_clk);
 789	}
 790
 791	i = irq_of_parse_and_map(dev->of_node, 0);
 792	if (!i) {
 793		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 794		err = -ENOENT;
 795		goto err_irq1;
 796	}
 797	host->msi.irq1 = i;
 798
 799	i = irq_of_parse_and_map(dev->of_node, 1);
 800	if (!i) {
 801		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 802		err = -ENOENT;
 803		goto err_irq2;
 804	}
 805	host->msi.irq2 = i;
 806
 807	return 0;
 808
 809err_irq2:
 810	irq_dispose_mapping(host->msi.irq1);
 811err_irq1:
 812	return err;
 813}
 814
 815static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 816				    struct resource_entry *entry,
 817				    int *index)
 818{
 819	u64 restype = entry->res->flags;
 820	u64 cpu_addr = entry->res->start;
 821	u64 cpu_end = entry->res->end;
 822	u64 pci_addr = entry->res->start - entry->offset;
 823	u32 flags = LAM_64BIT | LAR_ENABLE;
 824	u64 mask;
 825	u64 size = resource_size(entry->res);
 826	int idx = *index;
 827
 828	if (restype & IORESOURCE_PREFETCH)
 829		flags |= LAM_PREFETCH;
 830
 831	while (cpu_addr < cpu_end) {
 832		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 833			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 834			return -EINVAL;
 835		}
 836		/*
 837		 * If the size of the range is larger than the alignment of
 838		 * the start address, we have to use multiple entries to
 839		 * perform the mapping.
 840		 */
 841		if (cpu_addr > 0) {
 842			unsigned long nr_zeros = __ffs64(cpu_addr);
 843			u64 alignment = 1ULL << nr_zeros;
 844
 845			size = min(size, alignment);
 846		}
 847		/* Hardware supports max 4GiB inbound region */
 848		size = min(size, 1ULL << 32);
 849
 850		mask = roundup_pow_of_two(size) - 1;
 851		mask &= ~0xf;
 852
 853		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 854				      lower_32_bits(mask) | flags, idx, true);
 855
 856		pci_addr += size;
 857		cpu_addr += size;
 858		idx += 2;
 859	}
 860	*index = idx;
 861
 862	return 0;
 863}
 864
 865static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 866{
 867	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 868	struct resource_entry *entry;
 869	int index = 0, err = 0;
 870
 871	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 872		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 873		if (err)
 874			break;
 875	}
 876
 877	return err;
 878}
 879
 880static const struct of_device_id rcar_pcie_of_match[] = {
 881	{ .compatible = "renesas,pcie-r8a7779",
 882	  .data = rcar_pcie_phy_init_h1 },
 883	{ .compatible = "renesas,pcie-r8a7790",
 884	  .data = rcar_pcie_phy_init_gen2 },
 885	{ .compatible = "renesas,pcie-r8a7791",
 886	  .data = rcar_pcie_phy_init_gen2 },
 887	{ .compatible = "renesas,pcie-rcar-gen2",
 888	  .data = rcar_pcie_phy_init_gen2 },
 889	{ .compatible = "renesas,pcie-r8a7795",
 890	  .data = rcar_pcie_phy_init_gen3 },
 891	{ .compatible = "renesas,pcie-rcar-gen3",
 892	  .data = rcar_pcie_phy_init_gen3 },
 893	{},
 894};
 895
 
 
 
 
 
 
 
 896static int rcar_pcie_probe(struct platform_device *pdev)
 897{
 898	struct device *dev = &pdev->dev;
 
 899	struct rcar_pcie_host *host;
 900	struct rcar_pcie *pcie;
 
 901	u32 data;
 902	int err;
 903	struct pci_host_bridge *bridge;
 904
 905	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 906	if (!bridge)
 907		return -ENOMEM;
 908
 909	host = pci_host_bridge_priv(bridge);
 910	pcie = &host->pcie;
 911	pcie->dev = dev;
 912	platform_set_drvdata(pdev, host);
 913
 
 
 
 
 
 
 
 914	pm_runtime_enable(pcie->dev);
 915	err = pm_runtime_get_sync(pcie->dev);
 916	if (err < 0) {
 917		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 918		goto err_pm_put;
 919	}
 920
 921	err = rcar_pcie_get_resources(host);
 922	if (err < 0) {
 923		dev_err(dev, "failed to request resources: %d\n", err);
 924		goto err_pm_put;
 925	}
 926
 927	err = clk_prepare_enable(host->bus_clk);
 928	if (err) {
 929		dev_err(dev, "failed to enable bus clock: %d\n", err);
 930		goto err_unmap_msi_irqs;
 931	}
 932
 933	err = rcar_pcie_parse_map_dma_ranges(host);
 934	if (err)
 935		goto err_clk_disable;
 936
 937	host->phy_init_fn = of_device_get_match_data(dev);
 938	err = host->phy_init_fn(host);
 939	if (err) {
 940		dev_err(dev, "failed to init PCIe PHY\n");
 941		goto err_clk_disable;
 942	}
 943
 944	/* Failure to get a link might just be that no cards are inserted */
 945	if (rcar_pcie_hw_init(pcie)) {
 946		dev_info(dev, "PCIe link down\n");
 947		err = -ENODEV;
 948		goto err_phy_shutdown;
 949	}
 950
 951	data = rcar_pci_read_reg(pcie, MACSR);
 952	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 953
 954	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 955		err = rcar_pcie_enable_msi(host);
 956		if (err < 0) {
 957			dev_err(dev,
 958				"failed to enable MSI support: %d\n",
 959				err);
 960			goto err_phy_shutdown;
 961		}
 962	}
 963
 964	err = rcar_pcie_enable(host);
 965	if (err)
 966		goto err_msi_teardown;
 967
 968	return 0;
 969
 970err_msi_teardown:
 971	if (IS_ENABLED(CONFIG_PCI_MSI))
 972		rcar_pcie_teardown_msi(host);
 973
 974err_phy_shutdown:
 975	if (host->phy) {
 976		phy_power_off(host->phy);
 977		phy_exit(host->phy);
 978	}
 979
 980err_clk_disable:
 981	clk_disable_unprepare(host->bus_clk);
 982
 983err_unmap_msi_irqs:
 984	irq_dispose_mapping(host->msi.irq2);
 985	irq_dispose_mapping(host->msi.irq1);
 986
 987err_pm_put:
 988	pm_runtime_put(dev);
 989	pm_runtime_disable(dev);
 990
 991	return err;
 992}
 993
 994static int __maybe_unused rcar_pcie_resume(struct device *dev)
 995{
 996	struct rcar_pcie_host *host = dev_get_drvdata(dev);
 997	struct rcar_pcie *pcie = &host->pcie;
 998	unsigned int data;
 999	int err;
1000
1001	err = rcar_pcie_parse_map_dma_ranges(host);
1002	if (err)
1003		return 0;
1004
1005	/* Failure to get a link might just be that no cards are inserted */
1006	err = host->phy_init_fn(host);
1007	if (err) {
1008		dev_info(dev, "PCIe link down\n");
1009		return 0;
1010	}
1011
1012	data = rcar_pci_read_reg(pcie, MACSR);
1013	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1014
1015	/* Enable MSI */
1016	if (IS_ENABLED(CONFIG_PCI_MSI))
1017		rcar_pcie_hw_enable_msi(host);
 
 
 
 
 
 
 
 
 
1018
1019	rcar_pcie_hw_enable(host);
1020
1021	return 0;
1022}
1023
1024static int rcar_pcie_resume_noirq(struct device *dev)
1025{
1026	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1027	struct rcar_pcie *pcie = &host->pcie;
1028
1029	if (rcar_pci_read_reg(pcie, PMSR) &&
1030	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1031		return 0;
1032
1033	/* Re-establish the PCIe link */
1034	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1035	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1036	return rcar_pcie_wait_for_dl(pcie);
1037}
1038
1039static const struct dev_pm_ops rcar_pcie_pm_ops = {
1040	SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1041	.resume_noirq = rcar_pcie_resume_noirq,
1042};
1043
1044static struct platform_driver rcar_pcie_driver = {
1045	.driver = {
1046		.name = "rcar-pcie",
1047		.of_match_table = rcar_pcie_of_match,
1048		.pm = &rcar_pcie_pm_ops,
1049		.suppress_bind_attrs = true,
1050	},
1051	.probe = rcar_pcie_probe,
1052};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053builtin_platform_driver(rcar_pcie_driver);