Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2017 Cadence
  3// Cadence PCIe endpoint controller driver.
  4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
  5
  6#include <linux/delay.h>
  7#include <linux/kernel.h>
  8#include <linux/of.h>
  9#include <linux/pci-epc.h>
 10#include <linux/platform_device.h>
 11#include <linux/pm_runtime.h>
 12#include <linux/sizes.h>
 13
 14#include "pcie-cadence.h"
 15
 16#define CDNS_PCIE_EP_MIN_APERTURE		128	/* 128 bytes */
 17#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE		0x1
 18#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY	0x3
 19
 20/**
 21 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
 22 * @pcie: Cadence PCIe controller
 23 * @max_regions: maximum number of regions supported by hardware
 24 * @ob_region_map: bitmask of mapped outbound regions
 25 * @ob_addr: base addresses in the AXI bus where the outbound regions start
 26 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
 27 *		   dedicated outbound regions is mapped.
 28 * @irq_cpu_addr: base address in the CPU space where a write access triggers
 29 *		  the sending of a memory write (MSI) / normal message (legacy
 30 *		  IRQ) TLP through the PCIe bus.
 31 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
 32 *		  dedicated outbound region.
 33 * @irq_pci_fn: the latest PCI function that has updated the mapping of
 34 *		the MSI/legacy IRQ dedicated outbound region.
 35 * @irq_pending: bitmask of asserted legacy IRQs.
 36 */
 37struct cdns_pcie_ep {
 38	struct cdns_pcie		pcie;
 39	u32				max_regions;
 40	unsigned long			ob_region_map;
 41	phys_addr_t			*ob_addr;
 42	phys_addr_t			irq_phys_addr;
 43	void __iomem			*irq_cpu_addr;
 44	u64				irq_pci_addr;
 45	u8				irq_pci_fn;
 46	u8				irq_pending;
 47};
 48
 49static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
 50				     struct pci_epf_header *hdr)
 51{
 52	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 53	struct cdns_pcie *pcie = &ep->pcie;
 54
 55	cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
 56	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
 57	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
 58	cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
 59			       hdr->subclass_code | hdr->baseclass_code << 8);
 60	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
 61			       hdr->cache_line_size);
 62	cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
 63	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
 64
 65	/*
 66	 * Vendor ID can only be modified from function 0, all other functions
 67	 * use the same vendor ID as function 0.
 68	 */
 69	if (fn == 0) {
 70		/* Update the vendor IDs. */
 71		u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
 72			 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
 73
 74		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
 75	}
 76
 77	return 0;
 78}
 79
 80static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
 81				struct pci_epf_bar *epf_bar)
 82{
 83	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 84	struct cdns_pcie *pcie = &ep->pcie;
 85	dma_addr_t bar_phys = epf_bar->phys_addr;
 86	enum pci_barno bar = epf_bar->barno;
 87	int flags = epf_bar->flags;
 88	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
 89	u64 sz;
 90
 91	/* BAR size is 2^(aperture + 7) */
 92	sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
 93	/*
 94	 * roundup_pow_of_two() returns an unsigned long, which is not suited
 95	 * for 64bit values.
 96	 */
 97	sz = 1ULL << fls64(sz - 1);
 98	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
 99
100	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
101		ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
102	} else {
103		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
104		bool is_64bits = sz > SZ_2G;
105
106		if (is_64bits && (bar & 1))
107			return -EINVAL;
108
109		if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
110			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
111
112		if (is_64bits && is_prefetch)
113			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
114		else if (is_prefetch)
115			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
116		else if (is_64bits)
117			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
118		else
119			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
120	}
121
122	addr0 = lower_32_bits(bar_phys);
123	addr1 = upper_32_bits(bar_phys);
124	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
125			 addr0);
126	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
127			 addr1);
128
129	if (bar < BAR_4) {
130		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
131		b = bar;
132	} else {
133		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
134		b = bar - BAR_4;
135	}
136
137	cfg = cdns_pcie_readl(pcie, reg);
138	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
139		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
140	cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
141		CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
142	cdns_pcie_writel(pcie, reg, cfg);
143
144	return 0;
145}
146
147static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
148				   struct pci_epf_bar *epf_bar)
149{
150	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
151	struct cdns_pcie *pcie = &ep->pcie;
152	enum pci_barno bar = epf_bar->barno;
153	u32 reg, cfg, b, ctrl;
154
155	if (bar < BAR_4) {
156		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
157		b = bar;
158	} else {
159		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
160		b = bar - BAR_4;
161	}
162
163	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
164	cfg = cdns_pcie_readl(pcie, reg);
165	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
166		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
167	cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
168	cdns_pcie_writel(pcie, reg, cfg);
169
170	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
171	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
172}
173
174static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
175				 u64 pci_addr, size_t size)
176{
177	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
178	struct cdns_pcie *pcie = &ep->pcie;
179	u32 r;
180
181	r = find_first_zero_bit(&ep->ob_region_map,
182				sizeof(ep->ob_region_map) * BITS_PER_LONG);
183	if (r >= ep->max_regions - 1) {
184		dev_err(&epc->dev, "no free outbound region\n");
185		return -EINVAL;
186	}
187
188	cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
189
190	set_bit(r, &ep->ob_region_map);
191	ep->ob_addr[r] = addr;
192
193	return 0;
194}
195
196static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
197				    phys_addr_t addr)
198{
199	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
200	struct cdns_pcie *pcie = &ep->pcie;
201	u32 r;
202
203	for (r = 0; r < ep->max_regions - 1; r++)
204		if (ep->ob_addr[r] == addr)
205			break;
206
207	if (r == ep->max_regions - 1)
208		return;
209
210	cdns_pcie_reset_outbound_region(pcie, r);
211
212	ep->ob_addr[r] = 0;
213	clear_bit(r, &ep->ob_region_map);
214}
215
216static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
217{
218	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
219	struct cdns_pcie *pcie = &ep->pcie;
220	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
221	u16 flags;
222
223	/*
224	 * Set the Multiple Message Capable bitfield into the Message Control
225	 * register.
226	 */
227	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
228	flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
229	flags |= PCI_MSI_FLAGS_64BIT;
230	flags &= ~PCI_MSI_FLAGS_MASKBIT;
231	cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
232
233	return 0;
234}
235
236static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
237{
238	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
239	struct cdns_pcie *pcie = &ep->pcie;
240	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
241	u16 flags, mme;
242
243	/* Validate that the MSI feature is actually enabled. */
244	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
245	if (!(flags & PCI_MSI_FLAGS_ENABLE))
246		return -EINVAL;
247
248	/*
249	 * Get the Multiple Message Enable bitfield from the Message Control
250	 * register.
251	 */
252	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
253
254	return mme;
255}
256
257static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
258				     u8 intx, bool is_asserted)
259{
260	struct cdns_pcie *pcie = &ep->pcie;
261	u32 offset;
262	u16 status;
263	u8 msg_code;
264
265	intx &= 3;
266
267	/* Set the outbound region if needed. */
268	if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
269		     ep->irq_pci_fn != fn)) {
270		/* First region was reserved for IRQ writes. */
271		cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
272							     ep->irq_phys_addr);
273		ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
274		ep->irq_pci_fn = fn;
275	}
276
277	if (is_asserted) {
278		ep->irq_pending |= BIT(intx);
279		msg_code = MSG_CODE_ASSERT_INTA + intx;
280	} else {
281		ep->irq_pending &= ~BIT(intx);
282		msg_code = MSG_CODE_DEASSERT_INTA + intx;
283	}
284
285	status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
286	if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
287		status ^= PCI_STATUS_INTERRUPT;
288		cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
289	}
290
291	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
292		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
293		 CDNS_PCIE_MSG_NO_DATA;
294	writel(0, ep->irq_cpu_addr + offset);
295}
296
297static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
298{
299	u16 cmd;
300
301	cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
302	if (cmd & PCI_COMMAND_INTX_DISABLE)
303		return -EINVAL;
304
305	cdns_pcie_ep_assert_intx(ep, fn, intx, true);
306	/*
307	 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
308	 * from drivers/pci/dwc/pci-dra7xx.c
309	 */
310	mdelay(1);
311	cdns_pcie_ep_assert_intx(ep, fn, intx, false);
312	return 0;
313}
314
315static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
316				     u8 interrupt_num)
317{
318	struct cdns_pcie *pcie = &ep->pcie;
319	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
320	u16 flags, mme, data, data_mask;
321	u8 msi_count;
322	u64 pci_addr, pci_addr_mask = 0xff;
323
324	/* Check whether the MSI feature has been enabled by the PCI host. */
325	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
326	if (!(flags & PCI_MSI_FLAGS_ENABLE))
327		return -EINVAL;
328
329	/* Get the number of enabled MSIs */
330	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
331	msi_count = 1 << mme;
332	if (!interrupt_num || interrupt_num > msi_count)
333		return -EINVAL;
334
335	/* Compute the data value to be written. */
336	data_mask = msi_count - 1;
337	data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
338	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
339
340	/* Get the PCI address where to write the data into. */
341	pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
342	pci_addr <<= 32;
343	pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
344	pci_addr &= GENMASK_ULL(63, 2);
345
346	/* Set the outbound region if needed. */
347	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
348		     ep->irq_pci_fn != fn)) {
349		/* First region was reserved for IRQ writes. */
350		cdns_pcie_set_outbound_region(pcie, fn, 0,
351					      false,
352					      ep->irq_phys_addr,
353					      pci_addr & ~pci_addr_mask,
354					      pci_addr_mask + 1);
355		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
356		ep->irq_pci_fn = fn;
357	}
358	writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
359
360	return 0;
361}
362
363static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
364				  enum pci_epc_irq_type type,
365				  u16 interrupt_num)
366{
367	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
368
369	switch (type) {
370	case PCI_EPC_IRQ_LEGACY:
371		return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
372
373	case PCI_EPC_IRQ_MSI:
374		return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
375
376	default:
377		break;
378	}
379
380	return -EINVAL;
381}
382
383static int cdns_pcie_ep_start(struct pci_epc *epc)
384{
385	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
386	struct cdns_pcie *pcie = &ep->pcie;
387	struct pci_epf *epf;
388	u32 cfg;
389
390	/*
391	 * BIT(0) is hardwired to 1, hence function 0 is always enabled
392	 * and can't be disabled anyway.
393	 */
394	cfg = BIT(0);
395	list_for_each_entry(epf, &epc->pci_epf, list)
396		cfg |= BIT(epf->func_no);
397	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
398
399	return 0;
400}
401
402static const struct pci_epc_features cdns_pcie_epc_features = {
403	.linkup_notifier = false,
404	.msi_capable = true,
405	.msix_capable = false,
406};
407
408static const struct pci_epc_features*
409cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
410{
411	return &cdns_pcie_epc_features;
412}
413
414static const struct pci_epc_ops cdns_pcie_epc_ops = {
415	.write_header	= cdns_pcie_ep_write_header,
416	.set_bar	= cdns_pcie_ep_set_bar,
417	.clear_bar	= cdns_pcie_ep_clear_bar,
418	.map_addr	= cdns_pcie_ep_map_addr,
419	.unmap_addr	= cdns_pcie_ep_unmap_addr,
420	.set_msi	= cdns_pcie_ep_set_msi,
421	.get_msi	= cdns_pcie_ep_get_msi,
422	.raise_irq	= cdns_pcie_ep_raise_irq,
423	.start		= cdns_pcie_ep_start,
424	.get_features	= cdns_pcie_ep_get_features,
425};
426
427static const struct of_device_id cdns_pcie_ep_of_match[] = {
428	{ .compatible = "cdns,cdns-pcie-ep" },
429
430	{ },
431};
432
433static int cdns_pcie_ep_probe(struct platform_device *pdev)
434{
435	struct device *dev = &pdev->dev;
436	struct device_node *np = dev->of_node;
437	struct cdns_pcie_ep *ep;
438	struct cdns_pcie *pcie;
439	struct pci_epc *epc;
440	struct resource *res;
441	int ret;
442	int phy_count;
443
444	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
445	if (!ep)
446		return -ENOMEM;
447
448	pcie = &ep->pcie;
449	pcie->is_rc = false;
450
451	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
452	pcie->reg_base = devm_ioremap_resource(dev, res);
453	if (IS_ERR(pcie->reg_base)) {
454		dev_err(dev, "missing \"reg\"\n");
455		return PTR_ERR(pcie->reg_base);
456	}
457
458	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
459	if (!res) {
460		dev_err(dev, "missing \"mem\"\n");
461		return -EINVAL;
462	}
463	pcie->mem_res = res;
464
465	ret = of_property_read_u32(np, "cdns,max-outbound-regions",
466				   &ep->max_regions);
467	if (ret < 0) {
468		dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
469		return ret;
470	}
471	ep->ob_addr = devm_kcalloc(dev,
472				   ep->max_regions, sizeof(*ep->ob_addr),
473				   GFP_KERNEL);
474	if (!ep->ob_addr)
475		return -ENOMEM;
476
477	ret = cdns_pcie_init_phy(dev, pcie);
478	if (ret) {
479		dev_err(dev, "failed to init phy\n");
480		return ret;
481	}
482	platform_set_drvdata(pdev, pcie);
483	pm_runtime_enable(dev);
484	ret = pm_runtime_get_sync(dev);
485	if (ret < 0) {
486		dev_err(dev, "pm_runtime_get_sync() failed\n");
487		goto err_get_sync;
488	}
489
490	/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
491	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
492
493	epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
494	if (IS_ERR(epc)) {
495		dev_err(dev, "failed to create epc device\n");
496		ret = PTR_ERR(epc);
497		goto err_init;
498	}
499
500	epc_set_drvdata(epc, ep);
501
502	if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
503		epc->max_functions = 1;
504
505	ret = pci_epc_mem_init(epc, pcie->mem_res->start,
506			       resource_size(pcie->mem_res));
507	if (ret < 0) {
508		dev_err(dev, "failed to initialize the memory space\n");
509		goto err_init;
510	}
511
512	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
513						  SZ_128K);
514	if (!ep->irq_cpu_addr) {
515		dev_err(dev, "failed to reserve memory space for MSI\n");
516		ret = -ENOMEM;
517		goto free_epc_mem;
518	}
519	ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
520	/* Reserve region 0 for IRQs */
521	set_bit(0, &ep->ob_region_map);
522
523	return 0;
524
525 free_epc_mem:
526	pci_epc_mem_exit(epc);
527
528 err_init:
529	pm_runtime_put_sync(dev);
530
531 err_get_sync:
532	pm_runtime_disable(dev);
533	cdns_pcie_disable_phy(pcie);
534	phy_count = pcie->phy_count;
535	while (phy_count--)
536		device_link_del(pcie->link[phy_count]);
537
538	return ret;
539}
540
541static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
542{
543	struct device *dev = &pdev->dev;
544	struct cdns_pcie *pcie = dev_get_drvdata(dev);
545	int ret;
546
547	ret = pm_runtime_put_sync(dev);
548	if (ret < 0)
549		dev_dbg(dev, "pm_runtime_put_sync failed\n");
550
551	pm_runtime_disable(dev);
552
553	cdns_pcie_disable_phy(pcie);
554}
555
556static struct platform_driver cdns_pcie_ep_driver = {
557	.driver = {
558		.name = "cdns-pcie-ep",
559		.of_match_table = cdns_pcie_ep_of_match,
560		.pm	= &cdns_pcie_pm_ops,
561	},
562	.probe = cdns_pcie_ep_probe,
563	.shutdown = cdns_pcie_ep_shutdown,
564};
565builtin_platform_driver(cdns_pcie_ep_driver);