Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
  16#include <linux/clk-provider.h>
  17#include <linux/delay.h>
  18#include <linux/interrupt.h>
  19#include <linux/irq.h>
  20#include <linux/irqdomain.h>
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/iopoll.h>
  24#include <linux/msi.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
 
  27#include <linux/of_platform.h>
  28#include <linux/pci.h>
  29#include <linux/phy/phy.h>
  30#include <linux/platform_device.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/regulator/consumer.h>
  33
  34#include "pcie-rcar.h"
  35
  36struct rcar_msi {
  37	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  38	struct irq_domain *domain;
  39	struct mutex map_lock;
  40	spinlock_t mask_lock;
  41	int irq1;
  42	int irq2;
  43};
  44
  45/* Structure representing the PCIe interface */
  46struct rcar_pcie_host {
  47	struct rcar_pcie	pcie;
  48	struct phy		*phy;
  49	struct clk		*bus_clk;
  50	struct			rcar_msi msi;
  51	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  52};
  53
  54static DEFINE_SPINLOCK(pmsr_lock);
  55
  56static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
  57{
  58	unsigned long flags;
  59	u32 pmsr, val;
  60	int ret = 0;
  61
  62	spin_lock_irqsave(&pmsr_lock, flags);
  63
  64	if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
  65		ret = -EINVAL;
  66		goto unlock_exit;
  67	}
  68
  69	pmsr = readl(pcie_base + PMSR);
  70
  71	/*
  72	 * Test if the PCIe controller received PM_ENTER_L1 DLLP and
  73	 * the PCIe controller is not in L1 link state. If true, apply
  74	 * fix, which will put the controller into L1 link state, from
  75	 * which it can return to L0s/L0 on its own.
  76	 */
  77	if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
  78		writel(L1IATN, pcie_base + PMCTLR);
  79		ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
  80						val & L1FAEG, 10, 1000);
  81		if (ret) {
  82			dev_warn_ratelimited(pcie_dev,
  83					     "Timeout waiting for L1 link state, ret=%d\n",
  84					     ret);
  85		}
  86		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
  87	}
  88
  89unlock_exit:
  90	spin_unlock_irqrestore(&pmsr_lock, flags);
  91	return ret;
  92}
  93
  94static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
  95{
  96	return container_of(msi, struct rcar_pcie_host, msi);
  97}
  98
  99static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
 100{
 101	unsigned int shift = BITS_PER_BYTE * (where & 3);
 102	u32 val = rcar_pci_read_reg(pcie, where & ~3);
 103
 104	return val >> shift;
 105}
 106
 107#ifdef CONFIG_ARM
 108#define __rcar_pci_rw_reg_workaround(instr)				\
 109		"	.arch armv7-a\n"				\
 110		"1:	" instr " %1, [%2]\n"				\
 111		"2:	isb\n"						\
 112		"3:	.pushsection .text.fixup,\"ax\"\n"		\
 113		"	.align	2\n"					\
 114		"4:	mov	%0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \
 115		"	b	3b\n"					\
 116		"	.popsection\n"					\
 117		"	.pushsection __ex_table,\"a\"\n"		\
 118		"	.align	3\n"					\
 119		"	.long	1b, 4b\n"				\
 120		"	.long	2b, 4b\n"				\
 121		"	.popsection\n"
 122#endif
 123
 124static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val,
 125					 unsigned int reg)
 126{
 127	int error = PCIBIOS_SUCCESSFUL;
 128#ifdef CONFIG_ARM
 129	asm volatile(
 130		__rcar_pci_rw_reg_workaround("str")
 131	: "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory");
 132#else
 133	rcar_pci_write_reg(pcie, val, reg);
 134#endif
 135	return error;
 136}
 137
 138static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val,
 139					unsigned int reg)
 140{
 141	int error = PCIBIOS_SUCCESSFUL;
 142#ifdef CONFIG_ARM
 143	asm volatile(
 144		__rcar_pci_rw_reg_workaround("ldr")
 145	: "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory");
 146
 147	if (error != PCIBIOS_SUCCESSFUL)
 148		PCI_SET_ERROR_RESPONSE(val);
 149#else
 150	*val = rcar_pci_read_reg(pcie, reg);
 151#endif
 152	return error;
 153}
 154
 155/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 156static int rcar_pcie_config_access(struct rcar_pcie_host *host,
 157		unsigned char access_type, struct pci_bus *bus,
 158		unsigned int devfn, int where, u32 *data)
 159{
 160	struct rcar_pcie *pcie = &host->pcie;
 161	unsigned int dev, func, reg, index;
 162	int ret;
 163
 164	/* Wake the bus up in case it is in L1 state. */
 165	ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
 166	if (ret) {
 167		PCI_SET_ERROR_RESPONSE(data);
 168		return PCIBIOS_SET_FAILED;
 169	}
 170
 171	dev = PCI_SLOT(devfn);
 172	func = PCI_FUNC(devfn);
 173	reg = where & ~3;
 174	index = reg / 4;
 175
 176	/*
 177	 * While each channel has its own memory-mapped extended config
 178	 * space, it's generally only accessible when in endpoint mode.
 179	 * When in root complex mode, the controller is unable to target
 180	 * itself with either type 0 or type 1 accesses, and indeed, any
 181	 * controller initiated target transfer to its own config space
 182	 * result in a completer abort.
 183	 *
 184	 * Each channel effectively only supports a single device, but as
 185	 * the same channel <-> device access works for any PCI_SLOT()
 186	 * value, we cheat a bit here and bind the controller's config
 187	 * space to devfn 0 in order to enable self-enumeration. In this
 188	 * case the regular ECAR/ECDR path is sidelined and the mangled
 189	 * config access itself is initiated as an internal bus transaction.
 190	 */
 191	if (pci_is_root_bus(bus)) {
 192		if (dev != 0)
 193			return PCIBIOS_DEVICE_NOT_FOUND;
 194
 195		if (access_type == RCAR_PCI_ACCESS_READ)
 196			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 197		else
 198			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 199
 200		return PCIBIOS_SUCCESSFUL;
 201	}
 202
 203	/* Clear errors */
 204	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 205
 206	/* Set the PIO address */
 207	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 208		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 209
 210	/* Enable the configuration access */
 211	if (pci_is_root_bus(bus->parent))
 212		rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR);
 213	else
 214		rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR);
 215
 216	/* Check for errors */
 217	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 218		return PCIBIOS_DEVICE_NOT_FOUND;
 219
 220	/* Check for master and target aborts */
 221	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 222		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 223		return PCIBIOS_DEVICE_NOT_FOUND;
 224
 225	if (access_type == RCAR_PCI_ACCESS_READ)
 226		ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR);
 227	else
 228		ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR);
 229
 230	/* Disable the configuration access */
 231	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 232
 233	return ret;
 234}
 235
 236static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 237			       int where, int size, u32 *val)
 238{
 239	struct rcar_pcie_host *host = bus->sysdata;
 240	int ret;
 241
 242	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 243				      bus, devfn, where, val);
 244	if (ret != PCIBIOS_SUCCESSFUL)
 
 245		return ret;
 
 246
 247	if (size == 1)
 248		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 249	else if (size == 2)
 250		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 251
 252	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 253		bus->number, devfn, where, size, *val);
 254
 255	return ret;
 256}
 257
 258/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 259static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 260				int where, int size, u32 val)
 261{
 262	struct rcar_pcie_host *host = bus->sysdata;
 263	unsigned int shift;
 264	u32 data;
 265	int ret;
 266
 267	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 268				      bus, devfn, where, &data);
 269	if (ret != PCIBIOS_SUCCESSFUL)
 270		return ret;
 271
 272	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 273		bus->number, devfn, where, size, val);
 274
 275	if (size == 1) {
 276		shift = BITS_PER_BYTE * (where & 3);
 277		data &= ~(0xff << shift);
 278		data |= ((val & 0xff) << shift);
 279	} else if (size == 2) {
 280		shift = BITS_PER_BYTE * (where & 2);
 281		data &= ~(0xffff << shift);
 282		data |= ((val & 0xffff) << shift);
 283	} else
 284		data = val;
 285
 286	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 287				      bus, devfn, where, &data);
 288
 289	return ret;
 290}
 291
 292static struct pci_ops rcar_pcie_ops = {
 293	.read	= rcar_pcie_read_conf,
 294	.write	= rcar_pcie_write_conf,
 295};
 296
 297static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 298{
 299	struct device *dev = pcie->dev;
 300	unsigned int timeout = 1000;
 301	u32 macsr;
 302
 303	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 304		return;
 305
 306	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 307		dev_err(dev, "Speed change already in progress\n");
 308		return;
 309	}
 310
 311	macsr = rcar_pci_read_reg(pcie, MACSR);
 312	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 313		goto done;
 314
 315	/* Set target link speed to 5.0 GT/s */
 316	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 317		   PCI_EXP_LNKSTA_CLS_5_0GB);
 318
 319	/* Set speed change reason as intentional factor */
 320	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 321
 322	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 323	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 324		rcar_pci_write_reg(pcie, macsr, MACSR);
 325
 326	/* Start link speed change */
 327	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 328
 329	while (timeout--) {
 330		macsr = rcar_pci_read_reg(pcie, MACSR);
 331		if (macsr & SPCHGFIN) {
 332			/* Clear the interrupt bits */
 333			rcar_pci_write_reg(pcie, macsr, MACSR);
 334
 335			if (macsr & SPCHGFAIL)
 336				dev_err(dev, "Speed change failed\n");
 337
 338			goto done;
 339		}
 340
 341		msleep(1);
 342	}
 343
 344	dev_err(dev, "Speed change timed out\n");
 345
 346done:
 347	dev_info(dev, "Current link speed is %s GT/s\n",
 348		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 349}
 350
 351static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 352{
 353	struct rcar_pcie *pcie = &host->pcie;
 354	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 355	struct resource_entry *win;
 356	LIST_HEAD(res);
 357	int i = 0;
 358
 359	/* Try setting 5 GT/s link speed */
 360	rcar_pcie_force_speedup(pcie);
 361
 362	/* Setup PCI resources */
 363	resource_list_for_each_entry(win, &bridge->windows) {
 364		struct resource *res = win->res;
 365
 366		if (!res->flags)
 367			continue;
 368
 369		switch (resource_type(res)) {
 370		case IORESOURCE_IO:
 371		case IORESOURCE_MEM:
 372			rcar_pcie_set_outbound(pcie, i, win);
 373			i++;
 374			break;
 375		}
 376	}
 377}
 378
 379static int rcar_pcie_enable(struct rcar_pcie_host *host)
 380{
 381	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 382
 383	rcar_pcie_hw_enable(host);
 384
 385	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 386
 387	bridge->sysdata = host;
 388	bridge->ops = &rcar_pcie_ops;
 389
 390	return pci_host_probe(bridge);
 391}
 392
 393static int phy_wait_for_ack(struct rcar_pcie *pcie)
 394{
 395	struct device *dev = pcie->dev;
 396	unsigned int timeout = 100;
 397
 398	while (timeout--) {
 399		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 400			return 0;
 401
 402		udelay(100);
 403	}
 404
 405	dev_err(dev, "Access to PCIe phy timed out\n");
 406
 407	return -ETIMEDOUT;
 408}
 409
 410static void phy_write_reg(struct rcar_pcie *pcie,
 411			  unsigned int rate, u32 addr,
 412			  unsigned int lane, u32 data)
 413{
 414	u32 phyaddr;
 415
 416	phyaddr = WRITE_CMD |
 417		((rate & 1) << RATE_POS) |
 418		((lane & 0xf) << LANE_POS) |
 419		((addr & 0xff) << ADR_POS);
 420
 421	/* Set write data */
 422	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 423	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 424
 425	/* Ignore errors as they will be dealt with if the data link is down */
 426	phy_wait_for_ack(pcie);
 427
 428	/* Clear command */
 429	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 430	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 431
 432	/* Ignore errors as they will be dealt with if the data link is down */
 433	phy_wait_for_ack(pcie);
 434}
 435
 436static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 437{
 438	int err;
 439
 440	/* Begin initialization */
 441	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 442
 443	/* Set mode */
 444	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 445
 446	err = rcar_pcie_wait_for_phyrdy(pcie);
 447	if (err)
 448		return err;
 449
 450	/*
 451	 * Initial header for port config space is type 1, set the device
 452	 * class to match. Hardware takes care of propagating the IDSETR
 453	 * settings, so there is no need to bother with a quirk.
 454	 */
 455	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
 456
 457	/*
 458	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 459	 * they aren't used, to avoid bridge being detected as broken.
 460	 */
 461	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 462	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 463
 464	/* Initialize default capabilities. */
 465	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 466	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 467		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 468	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
 469		PCI_HEADER_TYPE_BRIDGE);
 470
 471	/* Enable data link layer active state reporting */
 472	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 473		PCI_EXP_LNKCAP_DLLLARC);
 474
 475	/* Write out the physical slot number = 0 */
 476	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 477
 478	/* Set the completion timer timeout to the maximum 50ms. */
 479	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 480
 481	/* Terminate list of capabilities (Next Capability Offset=0) */
 482	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 483
 484	/* Enable MSI */
 485	if (IS_ENABLED(CONFIG_PCI_MSI))
 486		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 487
 488	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 489
 490	/* Finish initialization - establish a PCI Express link */
 491	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 492
 493	/* This will timeout if we don't have a link. */
 494	err = rcar_pcie_wait_for_dl(pcie);
 495	if (err)
 496		return err;
 497
 498	/* Enable INTx interrupts */
 499	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 500
 501	wmb();
 502
 503	return 0;
 504}
 505
 506static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 507{
 508	struct rcar_pcie *pcie = &host->pcie;
 509
 510	/* Initialize the phy */
 511	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 512	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 513	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 514	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 515	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 516	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 517	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 518	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 519	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 520	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 521	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 522	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 523
 524	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 525	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 526	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 527
 528	return 0;
 529}
 530
 531static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 532{
 533	struct rcar_pcie *pcie = &host->pcie;
 534
 535	/*
 536	 * These settings come from the R-Car Series, 2nd Generation User's
 537	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 538	 */
 539	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 540	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 541	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 542	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 543
 544	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 545	/* The following value is for DC connection, no termination resistor */
 546	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 547	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 548	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 549
 550	return 0;
 551}
 552
 553static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 554{
 555	int err;
 556
 557	err = phy_init(host->phy);
 558	if (err)
 559		return err;
 560
 561	err = phy_power_on(host->phy);
 562	if (err)
 563		phy_exit(host->phy);
 564
 565	return err;
 566}
 567
 568static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 569{
 570	struct rcar_pcie_host *host = data;
 571	struct rcar_pcie *pcie = &host->pcie;
 572	struct rcar_msi *msi = &host->msi;
 573	struct device *dev = pcie->dev;
 574	unsigned long reg;
 575
 576	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 577
 578	/* MSI & INTx share an interrupt - we only handle MSI here */
 579	if (!reg)
 580		return IRQ_NONE;
 581
 582	while (reg) {
 583		unsigned int index = find_first_bit(&reg, 32);
 584		int ret;
 585
 586		ret = generic_handle_domain_irq(msi->domain->parent, index);
 587		if (ret) {
 
 
 588			/* Unknown MSI, just clear it */
 589			dev_dbg(dev, "unexpected MSI\n");
 590			rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
 591		}
 592
 593		/* see if there's any more pending in this vector */
 594		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 595	}
 596
 597	return IRQ_HANDLED;
 598}
 599
 600static void rcar_msi_top_irq_ack(struct irq_data *d)
 601{
 602	irq_chip_ack_parent(d);
 603}
 604
 605static void rcar_msi_top_irq_mask(struct irq_data *d)
 606{
 607	pci_msi_mask_irq(d);
 608	irq_chip_mask_parent(d);
 609}
 610
 611static void rcar_msi_top_irq_unmask(struct irq_data *d)
 612{
 613	pci_msi_unmask_irq(d);
 614	irq_chip_unmask_parent(d);
 615}
 616
 617static struct irq_chip rcar_msi_top_chip = {
 618	.name		= "PCIe MSI",
 619	.irq_ack	= rcar_msi_top_irq_ack,
 620	.irq_mask	= rcar_msi_top_irq_mask,
 621	.irq_unmask	= rcar_msi_top_irq_unmask,
 622};
 623
 624static void rcar_msi_irq_ack(struct irq_data *d)
 625{
 626	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 627	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 628
 629	/* clear the interrupt */
 630	rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
 631}
 632
 633static void rcar_msi_irq_mask(struct irq_data *d)
 634{
 635	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 636	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 637	unsigned long flags;
 638	u32 value;
 639
 640	spin_lock_irqsave(&msi->mask_lock, flags);
 641	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 642	value &= ~BIT(d->hwirq);
 643	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 644	spin_unlock_irqrestore(&msi->mask_lock, flags);
 645}
 646
 647static void rcar_msi_irq_unmask(struct irq_data *d)
 648{
 649	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 650	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 651	unsigned long flags;
 652	u32 value;
 653
 654	spin_lock_irqsave(&msi->mask_lock, flags);
 655	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 656	value |= BIT(d->hwirq);
 657	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 658	spin_unlock_irqrestore(&msi->mask_lock, flags);
 659}
 660
 
 
 
 
 
 661static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 662{
 663	struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
 664	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 665
 666	msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 667	msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 668	msg->data = data->hwirq;
 669}
 670
 671static struct irq_chip rcar_msi_bottom_chip = {
 672	.name			= "R-Car MSI",
 673	.irq_ack		= rcar_msi_irq_ack,
 674	.irq_mask		= rcar_msi_irq_mask,
 675	.irq_unmask		= rcar_msi_irq_unmask,
 
 676	.irq_compose_msi_msg	= rcar_compose_msi_msg,
 677};
 678
 679static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
 680				  unsigned int nr_irqs, void *args)
 681{
 682	struct rcar_msi *msi = domain->host_data;
 683	unsigned int i;
 684	int hwirq;
 685
 686	mutex_lock(&msi->map_lock);
 687
 688	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 689
 690	mutex_unlock(&msi->map_lock);
 691
 692	if (hwirq < 0)
 693		return -ENOSPC;
 694
 695	for (i = 0; i < nr_irqs; i++)
 696		irq_domain_set_info(domain, virq + i, hwirq + i,
 697				    &rcar_msi_bottom_chip, domain->host_data,
 698				    handle_edge_irq, NULL, NULL);
 699
 700	return 0;
 701}
 702
 703static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
 704				  unsigned int nr_irqs)
 705{
 706	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 707	struct rcar_msi *msi = domain->host_data;
 708
 709	mutex_lock(&msi->map_lock);
 710
 711	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
 712
 713	mutex_unlock(&msi->map_lock);
 714}
 715
 716static const struct irq_domain_ops rcar_msi_domain_ops = {
 717	.alloc	= rcar_msi_domain_alloc,
 718	.free	= rcar_msi_domain_free,
 719};
 720
 721static struct msi_domain_info rcar_msi_info = {
 722	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 723		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
 724	.chip	= &rcar_msi_top_chip,
 725};
 726
 727static int rcar_allocate_domains(struct rcar_msi *msi)
 728{
 729	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 730	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
 731	struct irq_domain *parent;
 732
 733	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
 734					  &rcar_msi_domain_ops, msi);
 735	if (!parent) {
 736		dev_err(pcie->dev, "failed to create IRQ domain\n");
 737		return -ENOMEM;
 738	}
 739	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
 740
 741	msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
 742	if (!msi->domain) {
 743		dev_err(pcie->dev, "failed to create MSI domain\n");
 744		irq_domain_remove(parent);
 745		return -ENOMEM;
 746	}
 747
 748	return 0;
 749}
 750
 751static void rcar_free_domains(struct rcar_msi *msi)
 752{
 753	struct irq_domain *parent = msi->domain->parent;
 754
 755	irq_domain_remove(msi->domain);
 756	irq_domain_remove(parent);
 757}
 758
 759static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 760{
 761	struct rcar_pcie *pcie = &host->pcie;
 762	struct device *dev = pcie->dev;
 763	struct rcar_msi *msi = &host->msi;
 764	struct resource res;
 765	int err;
 766
 767	mutex_init(&msi->map_lock);
 768	spin_lock_init(&msi->mask_lock);
 769
 770	err = of_address_to_resource(dev->of_node, 0, &res);
 771	if (err)
 772		return err;
 773
 774	err = rcar_allocate_domains(msi);
 775	if (err)
 776		return err;
 777
 778	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 779	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 780			       IRQF_SHARED | IRQF_NO_THREAD,
 781			       rcar_msi_bottom_chip.name, host);
 782	if (err < 0) {
 783		dev_err(dev, "failed to request IRQ: %d\n", err);
 784		goto err;
 785	}
 786
 787	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 788			       IRQF_SHARED | IRQF_NO_THREAD,
 789			       rcar_msi_bottom_chip.name, host);
 790	if (err < 0) {
 791		dev_err(dev, "failed to request IRQ: %d\n", err);
 792		goto err;
 793	}
 794
 795	/* disable all MSIs */
 796	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 797
 798	/*
 799	 * Setup MSI data target using RC base address, which is guaranteed
 800	 * to be in the low 32bit range on any R-Car HW.
 801	 */
 802	rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 803	rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 804
 805	return 0;
 806
 807err:
 808	rcar_free_domains(msi);
 809	return err;
 810}
 811
 812static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 813{
 814	struct rcar_pcie *pcie = &host->pcie;
 815
 816	/* Disable all MSI interrupts */
 817	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 818
 819	/* Disable address decoding of the MSI interrupt, MSIFE */
 820	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 821
 822	rcar_free_domains(&host->msi);
 823}
 824
 825static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 826{
 827	struct rcar_pcie *pcie = &host->pcie;
 828	struct device *dev = pcie->dev;
 829	struct resource res;
 830	int err, i;
 831
 832	host->phy = devm_phy_optional_get(dev, "pcie");
 833	if (IS_ERR(host->phy))
 834		return PTR_ERR(host->phy);
 835
 836	err = of_address_to_resource(dev->of_node, 0, &res);
 837	if (err)
 838		return err;
 839
 840	pcie->base = devm_ioremap_resource(dev, &res);
 841	if (IS_ERR(pcie->base))
 842		return PTR_ERR(pcie->base);
 843
 844	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 845	if (IS_ERR(host->bus_clk)) {
 846		dev_err(dev, "cannot get pcie bus clock\n");
 847		return PTR_ERR(host->bus_clk);
 848	}
 849
 850	i = irq_of_parse_and_map(dev->of_node, 0);
 851	if (!i) {
 852		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 853		err = -ENOENT;
 854		goto err_irq1;
 855	}
 856	host->msi.irq1 = i;
 857
 858	i = irq_of_parse_and_map(dev->of_node, 1);
 859	if (!i) {
 860		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 861		err = -ENOENT;
 862		goto err_irq2;
 863	}
 864	host->msi.irq2 = i;
 865
 866	return 0;
 867
 868err_irq2:
 869	irq_dispose_mapping(host->msi.irq1);
 870err_irq1:
 871	return err;
 872}
 873
 874static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 875				    struct resource_entry *entry,
 876				    int *index)
 877{
 878	u64 restype = entry->res->flags;
 879	u64 cpu_addr = entry->res->start;
 880	u64 cpu_end = entry->res->end;
 881	u64 pci_addr = entry->res->start - entry->offset;
 882	u32 flags = LAM_64BIT | LAR_ENABLE;
 883	u64 mask;
 884	u64 size = resource_size(entry->res);
 885	int idx = *index;
 886
 887	if (restype & IORESOURCE_PREFETCH)
 888		flags |= LAM_PREFETCH;
 889
 890	while (cpu_addr < cpu_end) {
 891		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 892			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 893			return -EINVAL;
 894		}
 895		/*
 896		 * If the size of the range is larger than the alignment of
 897		 * the start address, we have to use multiple entries to
 898		 * perform the mapping.
 899		 */
 900		if (cpu_addr > 0) {
 901			unsigned long nr_zeros = __ffs64(cpu_addr);
 902			u64 alignment = 1ULL << nr_zeros;
 903
 904			size = min(size, alignment);
 905		}
 906		/* Hardware supports max 4GiB inbound region */
 907		size = min(size, 1ULL << 32);
 908
 909		mask = roundup_pow_of_two(size) - 1;
 910		mask &= ~0xf;
 911
 912		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 913				      lower_32_bits(mask) | flags, idx, true);
 914
 915		pci_addr += size;
 916		cpu_addr += size;
 917		idx += 2;
 918	}
 919	*index = idx;
 920
 921	return 0;
 922}
 923
 924static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 925{
 926	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 927	struct resource_entry *entry;
 928	int index = 0, err = 0;
 929
 930	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 931		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 932		if (err)
 933			break;
 934	}
 935
 936	return err;
 937}
 938
 939static const struct of_device_id rcar_pcie_of_match[] = {
 940	{ .compatible = "renesas,pcie-r8a7779",
 941	  .data = rcar_pcie_phy_init_h1 },
 942	{ .compatible = "renesas,pcie-r8a7790",
 943	  .data = rcar_pcie_phy_init_gen2 },
 944	{ .compatible = "renesas,pcie-r8a7791",
 945	  .data = rcar_pcie_phy_init_gen2 },
 946	{ .compatible = "renesas,pcie-rcar-gen2",
 947	  .data = rcar_pcie_phy_init_gen2 },
 948	{ .compatible = "renesas,pcie-r8a7795",
 949	  .data = rcar_pcie_phy_init_gen3 },
 950	{ .compatible = "renesas,pcie-rcar-gen3",
 951	  .data = rcar_pcie_phy_init_gen3 },
 952	{},
 953};
 954
 955/* Design note 346 from Linear Technology says order is not important. */
 956static const char * const rcar_pcie_supplies[] = {
 957	"vpcie1v5",
 958	"vpcie3v3",
 959	"vpcie12v",
 960};
 961
 962static int rcar_pcie_probe(struct platform_device *pdev)
 963{
 964	struct device *dev = &pdev->dev;
 965	struct pci_host_bridge *bridge;
 966	struct rcar_pcie_host *host;
 967	struct rcar_pcie *pcie;
 968	unsigned int i;
 969	u32 data;
 970	int err;
 
 971
 972	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 973	if (!bridge)
 974		return -ENOMEM;
 975
 976	host = pci_host_bridge_priv(bridge);
 977	pcie = &host->pcie;
 978	pcie->dev = dev;
 979	platform_set_drvdata(pdev, host);
 980
 981	for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
 982		err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
 983		if (err < 0 && err != -ENODEV)
 984			return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
 985					     rcar_pcie_supplies[i]);
 986	}
 987
 988	pm_runtime_enable(pcie->dev);
 989	err = pm_runtime_get_sync(pcie->dev);
 990	if (err < 0) {
 991		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 992		goto err_pm_put;
 993	}
 994
 995	err = rcar_pcie_get_resources(host);
 996	if (err < 0) {
 997		dev_err(dev, "failed to request resources: %d\n", err);
 998		goto err_pm_put;
 999	}
1000
1001	err = clk_prepare_enable(host->bus_clk);
1002	if (err) {
1003		dev_err(dev, "failed to enable bus clock: %d\n", err);
1004		goto err_unmap_msi_irqs;
1005	}
1006
1007	err = rcar_pcie_parse_map_dma_ranges(host);
1008	if (err)
1009		goto err_clk_disable;
1010
1011	host->phy_init_fn = of_device_get_match_data(dev);
1012	err = host->phy_init_fn(host);
1013	if (err) {
1014		dev_err(dev, "failed to init PCIe PHY\n");
1015		goto err_clk_disable;
1016	}
1017
1018	/* Failure to get a link might just be that no cards are inserted */
1019	if (rcar_pcie_hw_init(pcie)) {
1020		dev_info(dev, "PCIe link down\n");
1021		err = -ENODEV;
1022		goto err_phy_shutdown;
1023	}
1024
1025	data = rcar_pci_read_reg(pcie, MACSR);
1026	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1027
1028	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1029		err = rcar_pcie_enable_msi(host);
1030		if (err < 0) {
1031			dev_err(dev,
1032				"failed to enable MSI support: %d\n",
1033				err);
1034			goto err_phy_shutdown;
1035		}
1036	}
1037
1038	err = rcar_pcie_enable(host);
1039	if (err)
1040		goto err_msi_teardown;
1041
1042	return 0;
1043
1044err_msi_teardown:
1045	if (IS_ENABLED(CONFIG_PCI_MSI))
1046		rcar_pcie_teardown_msi(host);
1047
1048err_phy_shutdown:
1049	if (host->phy) {
1050		phy_power_off(host->phy);
1051		phy_exit(host->phy);
1052	}
1053
1054err_clk_disable:
1055	clk_disable_unprepare(host->bus_clk);
1056
1057err_unmap_msi_irqs:
1058	irq_dispose_mapping(host->msi.irq2);
1059	irq_dispose_mapping(host->msi.irq1);
1060
1061err_pm_put:
1062	pm_runtime_put(dev);
1063	pm_runtime_disable(dev);
1064
1065	return err;
1066}
1067
1068static int rcar_pcie_resume(struct device *dev)
1069{
1070	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1071	struct rcar_pcie *pcie = &host->pcie;
1072	unsigned int data;
1073	int err;
1074
1075	err = rcar_pcie_parse_map_dma_ranges(host);
1076	if (err)
1077		return 0;
1078
1079	/* Failure to get a link might just be that no cards are inserted */
1080	err = host->phy_init_fn(host);
1081	if (err) {
1082		dev_info(dev, "PCIe link down\n");
1083		return 0;
1084	}
1085
1086	data = rcar_pci_read_reg(pcie, MACSR);
1087	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1088
1089	/* Enable MSI */
1090	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1091		struct resource res;
1092		u32 val;
1093
1094		of_address_to_resource(dev->of_node, 0, &res);
1095		rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
1096		rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
1097
1098		bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
1099		rcar_pci_write_reg(pcie, val, PCIEMSIIER);
1100	}
1101
1102	rcar_pcie_hw_enable(host);
1103
1104	return 0;
1105}
1106
1107static int rcar_pcie_resume_noirq(struct device *dev)
1108{
1109	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1110	struct rcar_pcie *pcie = &host->pcie;
1111
1112	if (rcar_pci_read_reg(pcie, PMSR) &&
1113	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1114		return 0;
1115
1116	/* Re-establish the PCIe link */
1117	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1118	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1119	return rcar_pcie_wait_for_dl(pcie);
1120}
1121
1122static const struct dev_pm_ops rcar_pcie_pm_ops = {
1123	SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1124	.resume_noirq = rcar_pcie_resume_noirq,
1125};
1126
1127static struct platform_driver rcar_pcie_driver = {
1128	.driver = {
1129		.name = "rcar-pcie",
1130		.of_match_table = rcar_pcie_of_match,
1131		.pm = &rcar_pcie_pm_ops,
1132		.suppress_bind_attrs = true,
1133	},
1134	.probe = rcar_pcie_probe,
1135};
1136
1137#ifdef CONFIG_ARM
1138static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
1139		unsigned int fsr, struct pt_regs *regs)
1140{
1141	return !fixup_exception(regs);
1142}
1143
1144static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
1145	{ .compatible = "renesas,pcie-r8a7779" },
1146	{ .compatible = "renesas,pcie-r8a7790" },
1147	{ .compatible = "renesas,pcie-r8a7791" },
1148	{ .compatible = "renesas,pcie-rcar-gen2" },
1149	{},
1150};
1151
1152static int __init rcar_pcie_init(void)
1153{
1154	if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
1155#ifdef CONFIG_ARM_LPAE
1156		hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1157				"asynchronous external abort");
1158#else
1159		hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1160				"imprecise external abort");
1161#endif
1162	}
1163
1164	return platform_driver_register(&rcar_pcie_driver);
1165}
1166device_initcall(rcar_pcie_init);
1167#else
1168builtin_platform_driver(rcar_pcie_driver);
1169#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe driver for Renesas R-Car SoCs
   4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
   5 *
   6 * Based on:
   7 *  arch/sh/drivers/pci/pcie-sh7786.c
   8 *  arch/sh/drivers/pci/ops-sh7786.c
   9 *  Copyright (C) 2009 - 2011  Paul Mundt
  10 *
  11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/clk.h>
 
  16#include <linux/delay.h>
  17#include <linux/interrupt.h>
  18#include <linux/irq.h>
  19#include <linux/irqdomain.h>
  20#include <linux/kernel.h>
  21#include <linux/init.h>
 
  22#include <linux/msi.h>
  23#include <linux/of_address.h>
  24#include <linux/of_irq.h>
  25#include <linux/of_pci.h>
  26#include <linux/of_platform.h>
  27#include <linux/pci.h>
  28#include <linux/phy/phy.h>
  29#include <linux/platform_device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/slab.h>
  32
  33#include "pcie-rcar.h"
  34
  35struct rcar_msi {
  36	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  37	struct irq_domain *domain;
  38	struct mutex map_lock;
  39	spinlock_t mask_lock;
  40	int irq1;
  41	int irq2;
  42};
  43
  44/* Structure representing the PCIe interface */
  45struct rcar_pcie_host {
  46	struct rcar_pcie	pcie;
  47	struct phy		*phy;
  48	struct clk		*bus_clk;
  49	struct			rcar_msi msi;
  50	int			(*phy_init_fn)(struct rcar_pcie_host *host);
  51};
  52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
  54{
  55	return container_of(msi, struct rcar_pcie_host, msi);
  56}
  57
  58static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  59{
  60	unsigned int shift = BITS_PER_BYTE * (where & 3);
  61	u32 val = rcar_pci_read_reg(pcie, where & ~3);
  62
  63	return val >> shift;
  64}
  65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  67static int rcar_pcie_config_access(struct rcar_pcie_host *host,
  68		unsigned char access_type, struct pci_bus *bus,
  69		unsigned int devfn, int where, u32 *data)
  70{
  71	struct rcar_pcie *pcie = &host->pcie;
  72	unsigned int dev, func, reg, index;
 
 
 
 
 
 
 
 
  73
  74	dev = PCI_SLOT(devfn);
  75	func = PCI_FUNC(devfn);
  76	reg = where & ~3;
  77	index = reg / 4;
  78
  79	/*
  80	 * While each channel has its own memory-mapped extended config
  81	 * space, it's generally only accessible when in endpoint mode.
  82	 * When in root complex mode, the controller is unable to target
  83	 * itself with either type 0 or type 1 accesses, and indeed, any
  84	 * controller initiated target transfer to its own config space
  85	 * result in a completer abort.
  86	 *
  87	 * Each channel effectively only supports a single device, but as
  88	 * the same channel <-> device access works for any PCI_SLOT()
  89	 * value, we cheat a bit here and bind the controller's config
  90	 * space to devfn 0 in order to enable self-enumeration. In this
  91	 * case the regular ECAR/ECDR path is sidelined and the mangled
  92	 * config access itself is initiated as an internal bus transaction.
  93	 */
  94	if (pci_is_root_bus(bus)) {
  95		if (dev != 0)
  96			return PCIBIOS_DEVICE_NOT_FOUND;
  97
  98		if (access_type == RCAR_PCI_ACCESS_READ)
  99			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 100		else
 101			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 102
 103		return PCIBIOS_SUCCESSFUL;
 104	}
 105
 106	/* Clear errors */
 107	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 108
 109	/* Set the PIO address */
 110	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
 111		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 112
 113	/* Enable the configuration access */
 114	if (pci_is_root_bus(bus->parent))
 115		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
 116	else
 117		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 118
 119	/* Check for errors */
 120	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 121		return PCIBIOS_DEVICE_NOT_FOUND;
 122
 123	/* Check for master and target aborts */
 124	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
 125		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 126		return PCIBIOS_DEVICE_NOT_FOUND;
 127
 128	if (access_type == RCAR_PCI_ACCESS_READ)
 129		*data = rcar_pci_read_reg(pcie, PCIECDR);
 130	else
 131		rcar_pci_write_reg(pcie, *data, PCIECDR);
 132
 133	/* Disable the configuration access */
 134	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 135
 136	return PCIBIOS_SUCCESSFUL;
 137}
 138
 139static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 140			       int where, int size, u32 *val)
 141{
 142	struct rcar_pcie_host *host = bus->sysdata;
 143	int ret;
 144
 145	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 146				      bus, devfn, where, val);
 147	if (ret != PCIBIOS_SUCCESSFUL) {
 148		*val = 0xffffffff;
 149		return ret;
 150	}
 151
 152	if (size == 1)
 153		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
 154	else if (size == 2)
 155		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
 156
 157	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 158		bus->number, devfn, where, size, *val);
 159
 160	return ret;
 161}
 162
 163/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
 164static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 165				int where, int size, u32 val)
 166{
 167	struct rcar_pcie_host *host = bus->sysdata;
 168	unsigned int shift;
 169	u32 data;
 170	int ret;
 171
 172	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
 173				      bus, devfn, where, &data);
 174	if (ret != PCIBIOS_SUCCESSFUL)
 175		return ret;
 176
 177	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
 178		bus->number, devfn, where, size, val);
 179
 180	if (size == 1) {
 181		shift = BITS_PER_BYTE * (where & 3);
 182		data &= ~(0xff << shift);
 183		data |= ((val & 0xff) << shift);
 184	} else if (size == 2) {
 185		shift = BITS_PER_BYTE * (where & 2);
 186		data &= ~(0xffff << shift);
 187		data |= ((val & 0xffff) << shift);
 188	} else
 189		data = val;
 190
 191	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
 192				      bus, devfn, where, &data);
 193
 194	return ret;
 195}
 196
 197static struct pci_ops rcar_pcie_ops = {
 198	.read	= rcar_pcie_read_conf,
 199	.write	= rcar_pcie_write_conf,
 200};
 201
 202static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 203{
 204	struct device *dev = pcie->dev;
 205	unsigned int timeout = 1000;
 206	u32 macsr;
 207
 208	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
 209		return;
 210
 211	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
 212		dev_err(dev, "Speed change already in progress\n");
 213		return;
 214	}
 215
 216	macsr = rcar_pci_read_reg(pcie, MACSR);
 217	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
 218		goto done;
 219
 220	/* Set target link speed to 5.0 GT/s */
 221	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
 222		   PCI_EXP_LNKSTA_CLS_5_0GB);
 223
 224	/* Set speed change reason as intentional factor */
 225	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
 226
 227	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
 228	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
 229		rcar_pci_write_reg(pcie, macsr, MACSR);
 230
 231	/* Start link speed change */
 232	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
 233
 234	while (timeout--) {
 235		macsr = rcar_pci_read_reg(pcie, MACSR);
 236		if (macsr & SPCHGFIN) {
 237			/* Clear the interrupt bits */
 238			rcar_pci_write_reg(pcie, macsr, MACSR);
 239
 240			if (macsr & SPCHGFAIL)
 241				dev_err(dev, "Speed change failed\n");
 242
 243			goto done;
 244		}
 245
 246		msleep(1);
 247	}
 248
 249	dev_err(dev, "Speed change timed out\n");
 250
 251done:
 252	dev_info(dev, "Current link speed is %s GT/s\n",
 253		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 254}
 255
 256static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
 257{
 258	struct rcar_pcie *pcie = &host->pcie;
 259	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 260	struct resource_entry *win;
 261	LIST_HEAD(res);
 262	int i = 0;
 263
 264	/* Try setting 5 GT/s link speed */
 265	rcar_pcie_force_speedup(pcie);
 266
 267	/* Setup PCI resources */
 268	resource_list_for_each_entry(win, &bridge->windows) {
 269		struct resource *res = win->res;
 270
 271		if (!res->flags)
 272			continue;
 273
 274		switch (resource_type(res)) {
 275		case IORESOURCE_IO:
 276		case IORESOURCE_MEM:
 277			rcar_pcie_set_outbound(pcie, i, win);
 278			i++;
 279			break;
 280		}
 281	}
 282}
 283
 284static int rcar_pcie_enable(struct rcar_pcie_host *host)
 285{
 286	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 287
 288	rcar_pcie_hw_enable(host);
 289
 290	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 291
 292	bridge->sysdata = host;
 293	bridge->ops = &rcar_pcie_ops;
 294
 295	return pci_host_probe(bridge);
 296}
 297
 298static int phy_wait_for_ack(struct rcar_pcie *pcie)
 299{
 300	struct device *dev = pcie->dev;
 301	unsigned int timeout = 100;
 302
 303	while (timeout--) {
 304		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 305			return 0;
 306
 307		udelay(100);
 308	}
 309
 310	dev_err(dev, "Access to PCIe phy timed out\n");
 311
 312	return -ETIMEDOUT;
 313}
 314
 315static void phy_write_reg(struct rcar_pcie *pcie,
 316			  unsigned int rate, u32 addr,
 317			  unsigned int lane, u32 data)
 318{
 319	u32 phyaddr;
 320
 321	phyaddr = WRITE_CMD |
 322		((rate & 1) << RATE_POS) |
 323		((lane & 0xf) << LANE_POS) |
 324		((addr & 0xff) << ADR_POS);
 325
 326	/* Set write data */
 327	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
 328	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 329
 330	/* Ignore errors as they will be dealt with if the data link is down */
 331	phy_wait_for_ack(pcie);
 332
 333	/* Clear command */
 334	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
 335	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 336
 337	/* Ignore errors as they will be dealt with if the data link is down */
 338	phy_wait_for_ack(pcie);
 339}
 340
 341static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 342{
 343	int err;
 344
 345	/* Begin initialization */
 346	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 347
 348	/* Set mode */
 349	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 350
 351	err = rcar_pcie_wait_for_phyrdy(pcie);
 352	if (err)
 353		return err;
 354
 355	/*
 356	 * Initial header for port config space is type 1, set the device
 357	 * class to match. Hardware takes care of propagating the IDSETR
 358	 * settings, so there is no need to bother with a quirk.
 359	 */
 360	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
 361
 362	/*
 363	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
 364	 * they aren't used, to avoid bridge being detected as broken.
 365	 */
 366	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
 367	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 368
 369	/* Initialize default capabilities. */
 370	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 371	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 372		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 373	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
 374		PCI_HEADER_TYPE_BRIDGE);
 375
 376	/* Enable data link layer active state reporting */
 377	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
 378		PCI_EXP_LNKCAP_DLLLARC);
 379
 380	/* Write out the physical slot number = 0 */
 381	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 382
 383	/* Set the completion timer timeout to the maximum 50ms. */
 384	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 385
 386	/* Terminate list of capabilities (Next Capability Offset=0) */
 387	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 388
 389	/* Enable MSI */
 390	if (IS_ENABLED(CONFIG_PCI_MSI))
 391		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 392
 393	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
 394
 395	/* Finish initialization - establish a PCI Express link */
 396	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 397
 398	/* This will timeout if we don't have a link. */
 399	err = rcar_pcie_wait_for_dl(pcie);
 400	if (err)
 401		return err;
 402
 403	/* Enable INTx interrupts */
 404	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 405
 406	wmb();
 407
 408	return 0;
 409}
 410
 411static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
 412{
 413	struct rcar_pcie *pcie = &host->pcie;
 414
 415	/* Initialize the phy */
 416	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 417	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
 418	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
 419	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
 420	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
 421	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
 422	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
 423	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
 424	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
 425	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
 426	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
 427	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
 428
 429	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
 430	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 431	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 432
 433	return 0;
 434}
 435
 436static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
 437{
 438	struct rcar_pcie *pcie = &host->pcie;
 439
 440	/*
 441	 * These settings come from the R-Car Series, 2nd Generation User's
 442	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
 443	 */
 444	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
 445	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
 446	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 447	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 448
 449	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
 450	/* The following value is for DC connection, no termination resistor */
 451	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
 452	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 453	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 454
 455	return 0;
 456}
 457
 458static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
 459{
 460	int err;
 461
 462	err = phy_init(host->phy);
 463	if (err)
 464		return err;
 465
 466	err = phy_power_on(host->phy);
 467	if (err)
 468		phy_exit(host->phy);
 469
 470	return err;
 471}
 472
 473static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
 474{
 475	struct rcar_pcie_host *host = data;
 476	struct rcar_pcie *pcie = &host->pcie;
 477	struct rcar_msi *msi = &host->msi;
 478	struct device *dev = pcie->dev;
 479	unsigned long reg;
 480
 481	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 482
 483	/* MSI & INTx share an interrupt - we only handle MSI here */
 484	if (!reg)
 485		return IRQ_NONE;
 486
 487	while (reg) {
 488		unsigned int index = find_first_bit(&reg, 32);
 489		unsigned int msi_irq;
 490
 491		msi_irq = irq_find_mapping(msi->domain->parent, index);
 492		if (msi_irq) {
 493			generic_handle_irq(msi_irq);
 494		} else {
 495			/* Unknown MSI, just clear it */
 496			dev_dbg(dev, "unexpected MSI\n");
 497			rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
 498		}
 499
 500		/* see if there's any more pending in this vector */
 501		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 502	}
 503
 504	return IRQ_HANDLED;
 505}
 506
 507static void rcar_msi_top_irq_ack(struct irq_data *d)
 508{
 509	irq_chip_ack_parent(d);
 510}
 511
 512static void rcar_msi_top_irq_mask(struct irq_data *d)
 513{
 514	pci_msi_mask_irq(d);
 515	irq_chip_mask_parent(d);
 516}
 517
 518static void rcar_msi_top_irq_unmask(struct irq_data *d)
 519{
 520	pci_msi_unmask_irq(d);
 521	irq_chip_unmask_parent(d);
 522}
 523
 524static struct irq_chip rcar_msi_top_chip = {
 525	.name		= "PCIe MSI",
 526	.irq_ack	= rcar_msi_top_irq_ack,
 527	.irq_mask	= rcar_msi_top_irq_mask,
 528	.irq_unmask	= rcar_msi_top_irq_unmask,
 529};
 530
 531static void rcar_msi_irq_ack(struct irq_data *d)
 532{
 533	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 534	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 535
 536	/* clear the interrupt */
 537	rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
 538}
 539
 540static void rcar_msi_irq_mask(struct irq_data *d)
 541{
 542	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 543	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 544	unsigned long flags;
 545	u32 value;
 546
 547	spin_lock_irqsave(&msi->mask_lock, flags);
 548	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 549	value &= ~BIT(d->hwirq);
 550	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 551	spin_unlock_irqrestore(&msi->mask_lock, flags);
 552}
 553
 554static void rcar_msi_irq_unmask(struct irq_data *d)
 555{
 556	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
 557	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 558	unsigned long flags;
 559	u32 value;
 560
 561	spin_lock_irqsave(&msi->mask_lock, flags);
 562	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
 563	value |= BIT(d->hwirq);
 564	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
 565	spin_unlock_irqrestore(&msi->mask_lock, flags);
 566}
 567
 568static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
 569{
 570	return -EINVAL;
 571}
 572
 573static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 574{
 575	struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
 576	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 577
 578	msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
 579	msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 580	msg->data = data->hwirq;
 581}
 582
 583static struct irq_chip rcar_msi_bottom_chip = {
 584	.name			= "Rcar MSI",
 585	.irq_ack		= rcar_msi_irq_ack,
 586	.irq_mask		= rcar_msi_irq_mask,
 587	.irq_unmask		= rcar_msi_irq_unmask,
 588	.irq_set_affinity 	= rcar_msi_set_affinity,
 589	.irq_compose_msi_msg	= rcar_compose_msi_msg,
 590};
 591
 592static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
 593				  unsigned int nr_irqs, void *args)
 594{
 595	struct rcar_msi *msi = domain->host_data;
 596	unsigned int i;
 597	int hwirq;
 598
 599	mutex_lock(&msi->map_lock);
 600
 601	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 602
 603	mutex_unlock(&msi->map_lock);
 604
 605	if (hwirq < 0)
 606		return -ENOSPC;
 607
 608	for (i = 0; i < nr_irqs; i++)
 609		irq_domain_set_info(domain, virq + i, hwirq + i,
 610				    &rcar_msi_bottom_chip, domain->host_data,
 611				    handle_edge_irq, NULL, NULL);
 612
 613	return 0;
 614}
 615
 616static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
 617				  unsigned int nr_irqs)
 618{
 619	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 620	struct rcar_msi *msi = domain->host_data;
 621
 622	mutex_lock(&msi->map_lock);
 623
 624	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
 625
 626	mutex_unlock(&msi->map_lock);
 627}
 628
 629static const struct irq_domain_ops rcar_msi_domain_ops = {
 630	.alloc	= rcar_msi_domain_alloc,
 631	.free	= rcar_msi_domain_free,
 632};
 633
 634static struct msi_domain_info rcar_msi_info = {
 635	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 636		   MSI_FLAG_MULTI_PCI_MSI),
 637	.chip	= &rcar_msi_top_chip,
 638};
 639
 640static int rcar_allocate_domains(struct rcar_msi *msi)
 641{
 642	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
 643	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
 644	struct irq_domain *parent;
 645
 646	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
 647					  &rcar_msi_domain_ops, msi);
 648	if (!parent) {
 649		dev_err(pcie->dev, "failed to create IRQ domain\n");
 650		return -ENOMEM;
 651	}
 652	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
 653
 654	msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
 655	if (!msi->domain) {
 656		dev_err(pcie->dev, "failed to create MSI domain\n");
 657		irq_domain_remove(parent);
 658		return -ENOMEM;
 659	}
 660
 661	return 0;
 662}
 663
 664static void rcar_free_domains(struct rcar_msi *msi)
 665{
 666	struct irq_domain *parent = msi->domain->parent;
 667
 668	irq_domain_remove(msi->domain);
 669	irq_domain_remove(parent);
 670}
 671
 672static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
 673{
 674	struct rcar_pcie *pcie = &host->pcie;
 675	struct device *dev = pcie->dev;
 676	struct rcar_msi *msi = &host->msi;
 677	struct resource res;
 678	int err;
 679
 680	mutex_init(&msi->map_lock);
 681	spin_lock_init(&msi->mask_lock);
 682
 683	err = of_address_to_resource(dev->of_node, 0, &res);
 684	if (err)
 685		return err;
 686
 687	err = rcar_allocate_domains(msi);
 688	if (err)
 689		return err;
 690
 691	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
 692	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 693			       IRQF_SHARED | IRQF_NO_THREAD,
 694			       rcar_msi_bottom_chip.name, host);
 695	if (err < 0) {
 696		dev_err(dev, "failed to request IRQ: %d\n", err);
 697		goto err;
 698	}
 699
 700	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 701			       IRQF_SHARED | IRQF_NO_THREAD,
 702			       rcar_msi_bottom_chip.name, host);
 703	if (err < 0) {
 704		dev_err(dev, "failed to request IRQ: %d\n", err);
 705		goto err;
 706	}
 707
 708	/* disable all MSIs */
 709	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 710
 711	/*
 712	 * Setup MSI data target using RC base address address, which
 713	 * is guaranteed to be in the low 32bit range on any RCar HW.
 714	 */
 715	rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 716	rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 717
 718	return 0;
 719
 720err:
 721	rcar_free_domains(msi);
 722	return err;
 723}
 724
 725static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
 726{
 727	struct rcar_pcie *pcie = &host->pcie;
 728
 729	/* Disable all MSI interrupts */
 730	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
 731
 732	/* Disable address decoding of the MSI interrupt, MSIFE */
 733	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
 734
 735	rcar_free_domains(&host->msi);
 736}
 737
 738static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
 739{
 740	struct rcar_pcie *pcie = &host->pcie;
 741	struct device *dev = pcie->dev;
 742	struct resource res;
 743	int err, i;
 744
 745	host->phy = devm_phy_optional_get(dev, "pcie");
 746	if (IS_ERR(host->phy))
 747		return PTR_ERR(host->phy);
 748
 749	err = of_address_to_resource(dev->of_node, 0, &res);
 750	if (err)
 751		return err;
 752
 753	pcie->base = devm_ioremap_resource(dev, &res);
 754	if (IS_ERR(pcie->base))
 755		return PTR_ERR(pcie->base);
 756
 757	host->bus_clk = devm_clk_get(dev, "pcie_bus");
 758	if (IS_ERR(host->bus_clk)) {
 759		dev_err(dev, "cannot get pcie bus clock\n");
 760		return PTR_ERR(host->bus_clk);
 761	}
 762
 763	i = irq_of_parse_and_map(dev->of_node, 0);
 764	if (!i) {
 765		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 766		err = -ENOENT;
 767		goto err_irq1;
 768	}
 769	host->msi.irq1 = i;
 770
 771	i = irq_of_parse_and_map(dev->of_node, 1);
 772	if (!i) {
 773		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 774		err = -ENOENT;
 775		goto err_irq2;
 776	}
 777	host->msi.irq2 = i;
 778
 779	return 0;
 780
 781err_irq2:
 782	irq_dispose_mapping(host->msi.irq1);
 783err_irq1:
 784	return err;
 785}
 786
 787static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
 788				    struct resource_entry *entry,
 789				    int *index)
 790{
 791	u64 restype = entry->res->flags;
 792	u64 cpu_addr = entry->res->start;
 793	u64 cpu_end = entry->res->end;
 794	u64 pci_addr = entry->res->start - entry->offset;
 795	u32 flags = LAM_64BIT | LAR_ENABLE;
 796	u64 mask;
 797	u64 size = resource_size(entry->res);
 798	int idx = *index;
 799
 800	if (restype & IORESOURCE_PREFETCH)
 801		flags |= LAM_PREFETCH;
 802
 803	while (cpu_addr < cpu_end) {
 804		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
 805			dev_err(pcie->dev, "Failed to map inbound regions!\n");
 806			return -EINVAL;
 807		}
 808		/*
 809		 * If the size of the range is larger than the alignment of
 810		 * the start address, we have to use multiple entries to
 811		 * perform the mapping.
 812		 */
 813		if (cpu_addr > 0) {
 814			unsigned long nr_zeros = __ffs64(cpu_addr);
 815			u64 alignment = 1ULL << nr_zeros;
 816
 817			size = min(size, alignment);
 818		}
 819		/* Hardware supports max 4GiB inbound region */
 820		size = min(size, 1ULL << 32);
 821
 822		mask = roundup_pow_of_two(size) - 1;
 823		mask &= ~0xf;
 824
 825		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
 826				      lower_32_bits(mask) | flags, idx, true);
 827
 828		pci_addr += size;
 829		cpu_addr += size;
 830		idx += 2;
 831	}
 832	*index = idx;
 833
 834	return 0;
 835}
 836
 837static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
 838{
 839	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
 840	struct resource_entry *entry;
 841	int index = 0, err = 0;
 842
 843	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 844		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
 845		if (err)
 846			break;
 847	}
 848
 849	return err;
 850}
 851
 852static const struct of_device_id rcar_pcie_of_match[] = {
 853	{ .compatible = "renesas,pcie-r8a7779",
 854	  .data = rcar_pcie_phy_init_h1 },
 855	{ .compatible = "renesas,pcie-r8a7790",
 856	  .data = rcar_pcie_phy_init_gen2 },
 857	{ .compatible = "renesas,pcie-r8a7791",
 858	  .data = rcar_pcie_phy_init_gen2 },
 859	{ .compatible = "renesas,pcie-rcar-gen2",
 860	  .data = rcar_pcie_phy_init_gen2 },
 861	{ .compatible = "renesas,pcie-r8a7795",
 862	  .data = rcar_pcie_phy_init_gen3 },
 863	{ .compatible = "renesas,pcie-rcar-gen3",
 864	  .data = rcar_pcie_phy_init_gen3 },
 865	{},
 866};
 867
 
 
 
 
 
 
 
 868static int rcar_pcie_probe(struct platform_device *pdev)
 869{
 870	struct device *dev = &pdev->dev;
 
 871	struct rcar_pcie_host *host;
 872	struct rcar_pcie *pcie;
 
 873	u32 data;
 874	int err;
 875	struct pci_host_bridge *bridge;
 876
 877	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
 878	if (!bridge)
 879		return -ENOMEM;
 880
 881	host = pci_host_bridge_priv(bridge);
 882	pcie = &host->pcie;
 883	pcie->dev = dev;
 884	platform_set_drvdata(pdev, host);
 885
 
 
 
 
 
 
 
 886	pm_runtime_enable(pcie->dev);
 887	err = pm_runtime_get_sync(pcie->dev);
 888	if (err < 0) {
 889		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
 890		goto err_pm_put;
 891	}
 892
 893	err = rcar_pcie_get_resources(host);
 894	if (err < 0) {
 895		dev_err(dev, "failed to request resources: %d\n", err);
 896		goto err_pm_put;
 897	}
 898
 899	err = clk_prepare_enable(host->bus_clk);
 900	if (err) {
 901		dev_err(dev, "failed to enable bus clock: %d\n", err);
 902		goto err_unmap_msi_irqs;
 903	}
 904
 905	err = rcar_pcie_parse_map_dma_ranges(host);
 906	if (err)
 907		goto err_clk_disable;
 908
 909	host->phy_init_fn = of_device_get_match_data(dev);
 910	err = host->phy_init_fn(host);
 911	if (err) {
 912		dev_err(dev, "failed to init PCIe PHY\n");
 913		goto err_clk_disable;
 914	}
 915
 916	/* Failure to get a link might just be that no cards are inserted */
 917	if (rcar_pcie_hw_init(pcie)) {
 918		dev_info(dev, "PCIe link down\n");
 919		err = -ENODEV;
 920		goto err_phy_shutdown;
 921	}
 922
 923	data = rcar_pci_read_reg(pcie, MACSR);
 924	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 925
 926	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 927		err = rcar_pcie_enable_msi(host);
 928		if (err < 0) {
 929			dev_err(dev,
 930				"failed to enable MSI support: %d\n",
 931				err);
 932			goto err_phy_shutdown;
 933		}
 934	}
 935
 936	err = rcar_pcie_enable(host);
 937	if (err)
 938		goto err_msi_teardown;
 939
 940	return 0;
 941
 942err_msi_teardown:
 943	if (IS_ENABLED(CONFIG_PCI_MSI))
 944		rcar_pcie_teardown_msi(host);
 945
 946err_phy_shutdown:
 947	if (host->phy) {
 948		phy_power_off(host->phy);
 949		phy_exit(host->phy);
 950	}
 951
 952err_clk_disable:
 953	clk_disable_unprepare(host->bus_clk);
 954
 955err_unmap_msi_irqs:
 956	irq_dispose_mapping(host->msi.irq2);
 957	irq_dispose_mapping(host->msi.irq1);
 958
 959err_pm_put:
 960	pm_runtime_put(dev);
 961	pm_runtime_disable(dev);
 962
 963	return err;
 964}
 965
 966static int __maybe_unused rcar_pcie_resume(struct device *dev)
 967{
 968	struct rcar_pcie_host *host = dev_get_drvdata(dev);
 969	struct rcar_pcie *pcie = &host->pcie;
 970	unsigned int data;
 971	int err;
 972
 973	err = rcar_pcie_parse_map_dma_ranges(host);
 974	if (err)
 975		return 0;
 976
 977	/* Failure to get a link might just be that no cards are inserted */
 978	err = host->phy_init_fn(host);
 979	if (err) {
 980		dev_info(dev, "PCIe link down\n");
 981		return 0;
 982	}
 983
 984	data = rcar_pci_read_reg(pcie, MACSR);
 985	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 986
 987	/* Enable MSI */
 988	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 989		struct resource res;
 990		u32 val;
 991
 992		of_address_to_resource(dev->of_node, 0, &res);
 993		rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
 994		rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
 995
 996		bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
 997		rcar_pci_write_reg(pcie, val, PCIEMSIIER);
 998	}
 999
1000	rcar_pcie_hw_enable(host);
1001
1002	return 0;
1003}
1004
1005static int rcar_pcie_resume_noirq(struct device *dev)
1006{
1007	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1008	struct rcar_pcie *pcie = &host->pcie;
1009
1010	if (rcar_pci_read_reg(pcie, PMSR) &&
1011	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1012		return 0;
1013
1014	/* Re-establish the PCIe link */
1015	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1016	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1017	return rcar_pcie_wait_for_dl(pcie);
1018}
1019
1020static const struct dev_pm_ops rcar_pcie_pm_ops = {
1021	SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1022	.resume_noirq = rcar_pcie_resume_noirq,
1023};
1024
1025static struct platform_driver rcar_pcie_driver = {
1026	.driver = {
1027		.name = "rcar-pcie",
1028		.of_match_table = rcar_pcie_of_match,
1029		.pm = &rcar_pcie_pm_ops,
1030		.suppress_bind_attrs = true,
1031	},
1032	.probe = rcar_pcie_probe,
1033};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034builtin_platform_driver(rcar_pcie_driver);