Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Rockchip AXI PCIe endpoint controller driver
  4 *
  5 * Copyright (c) 2018 Rockchip, Inc.
  6 *
  7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
  8 *         Simon Xue <xxm@rock-chips.com>
  9 */
 10
 11#include <linux/configfs.h>
 12#include <linux/delay.h>
 
 
 13#include <linux/kernel.h>
 
 14#include <linux/of.h>
 15#include <linux/pci-epc.h>
 16#include <linux/platform_device.h>
 17#include <linux/pci-epf.h>
 18#include <linux/sizes.h>
 
 19
 20#include "pcie-rockchip.h"
 21
 22/**
 23 * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
 24 * @rockchip: Rockchip PCIe controller
 25 * @epc: PCI EPC device
 26 * @max_regions: maximum number of regions supported by hardware
 27 * @ob_region_map: bitmask of mapped outbound regions
 28 * @ob_addr: base addresses in the AXI bus where the outbound regions start
 29 * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
 30 *		   dedicated outbound regions is mapped.
 31 * @irq_cpu_addr: base address in the CPU space where a write access triggers
 32 *		  the sending of a memory write (MSI) / normal message (INTX
 33 *		  IRQ) TLP through the PCIe bus.
 34 * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
 35 *		  dedicated outbound region.
 36 * @irq_pci_fn: the latest PCI function that has updated the mapping of
 37 *		the MSI/INTX IRQ dedicated outbound region.
 38 * @irq_pending: bitmask of asserted INTX IRQs.
 
 
 
 
 39 */
 40struct rockchip_pcie_ep {
 41	struct rockchip_pcie	rockchip;
 42	struct pci_epc		*epc;
 43	u32			max_regions;
 44	unsigned long		ob_region_map;
 45	phys_addr_t		*ob_addr;
 46	phys_addr_t		irq_phys_addr;
 47	void __iomem		*irq_cpu_addr;
 48	u64			irq_pci_addr;
 49	u8			irq_pci_fn;
 50	u8			irq_pending;
 
 
 
 
 51};
 52
 53static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
 54					  u32 region)
 55{
 56	rockchip_pcie_write(rockchip, 0,
 57			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
 58	rockchip_pcie_write(rockchip, 0,
 59			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
 60	rockchip_pcie_write(rockchip, 0,
 61			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
 62	rockchip_pcie_write(rockchip, 0,
 63			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
 64}
 65
 
 
 
 
 
 
 
 
 
 
 66static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
 67					 u32 r, u64 cpu_addr, u64 pci_addr,
 68					 size_t size)
 69{
 70	int num_pass_bits = fls64(size - 1);
 71	u32 addr0, addr1, desc0;
 72
 73	if (num_pass_bits < 8)
 74		num_pass_bits = 8;
 75
 76	addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
 77		(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
 78	addr1 = upper_32_bits(pci_addr);
 79	desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
 80
 81	/* PCI bus address region */
 82	rockchip_pcie_write(rockchip, addr0,
 83			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
 84	rockchip_pcie_write(rockchip, addr1,
 85			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
 86	rockchip_pcie_write(rockchip, desc0,
 87			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
 88	rockchip_pcie_write(rockchip, 0,
 89			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
 90}
 91
 92static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
 93					 struct pci_epf_header *hdr)
 94{
 95	u32 reg;
 96	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 97	struct rockchip_pcie *rockchip = &ep->rockchip;
 98
 99	/* All functions share the same vendor ID with function 0 */
100	if (fn == 0) {
101		u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
102			       (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
103
104		rockchip_pcie_write(rockchip, vid_regs,
105				    PCIE_CORE_CONFIG_VENDOR);
106	}
107
108	reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
109	reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
110	rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
111
112	rockchip_pcie_write(rockchip,
113			    hdr->revid |
114			    hdr->progif_code << 8 |
115			    hdr->subclass_code << 16 |
116			    hdr->baseclass_code << 24,
117			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
118	rockchip_pcie_write(rockchip, hdr->cache_line_size,
119			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
120			    PCI_CACHE_LINE_SIZE);
121	rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
122			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
123			    PCI_SUBSYSTEM_VENDOR_ID);
124	rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
125			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
126			    PCI_INTERRUPT_LINE);
127
128	return 0;
129}
130
131static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
132				    struct pci_epf_bar *epf_bar)
133{
134	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
135	struct rockchip_pcie *rockchip = &ep->rockchip;
136	dma_addr_t bar_phys = epf_bar->phys_addr;
137	enum pci_barno bar = epf_bar->barno;
138	int flags = epf_bar->flags;
139	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
140	u64 sz;
141
142	/* BAR size is 2^(aperture + 7) */
143	sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
144
145	/*
146	 * roundup_pow_of_two() returns an unsigned long, which is not suited
147	 * for 64bit values.
148	 */
149	sz = 1ULL << fls64(sz - 1);
150	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
151
152	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
153		ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
154	} else {
155		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
156		bool is_64bits = sz > SZ_2G;
157
158		if (is_64bits && (bar & 1))
159			return -EINVAL;
160
161		if (is_64bits && is_prefetch)
162			ctrl =
163			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
164		else if (is_prefetch)
165			ctrl =
166			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
167		else if (is_64bits)
168			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
169		else
170			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
171	}
172
173	if (bar < BAR_4) {
174		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
175		b = bar;
176	} else {
177		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
178		b = bar - BAR_4;
179	}
180
181	addr0 = lower_32_bits(bar_phys);
182	addr1 = upper_32_bits(bar_phys);
183
184	cfg = rockchip_pcie_read(rockchip, reg);
185	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
186		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
187	cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
188		ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
189
190	rockchip_pcie_write(rockchip, cfg, reg);
191	rockchip_pcie_write(rockchip, addr0,
192			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
193	rockchip_pcie_write(rockchip, addr1,
194			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
195
196	return 0;
197}
198
199static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
200				       struct pci_epf_bar *epf_bar)
201{
202	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
203	struct rockchip_pcie *rockchip = &ep->rockchip;
204	u32 reg, cfg, b, ctrl;
205	enum pci_barno bar = epf_bar->barno;
206
207	if (bar < BAR_4) {
208		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
209		b = bar;
210	} else {
211		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
212		b = bar - BAR_4;
213	}
214
215	ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
216	cfg = rockchip_pcie_read(rockchip, reg);
217	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
218		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
219	cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
220
221	rockchip_pcie_write(rockchip, cfg, reg);
222	rockchip_pcie_write(rockchip, 0x0,
223			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
224	rockchip_pcie_write(rockchip, 0x0,
225			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
226}
227
228static inline u32 rockchip_ob_region(phys_addr_t addr)
229{
230	return (addr >> ilog2(SZ_1M)) & 0x1f;
231}
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
234				     phys_addr_t addr, u64 pci_addr,
235				     size_t size)
236{
237	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
238	struct rockchip_pcie *pcie = &ep->rockchip;
239	u32 r = rockchip_ob_region(addr);
240
 
 
 
241	rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
242
243	set_bit(r, &ep->ob_region_map);
244	ep->ob_addr[r] = addr;
245
246	return 0;
247}
248
249static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
250					phys_addr_t addr)
251{
252	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
253	struct rockchip_pcie *rockchip = &ep->rockchip;
254	u32 r;
255
256	for (r = 0; r < ep->max_regions; r++)
257		if (ep->ob_addr[r] == addr)
258			break;
259
260	if (r == ep->max_regions)
261		return;
262
263	rockchip_pcie_clear_ep_ob_atu(rockchip, r);
264
265	ep->ob_addr[r] = 0;
266	clear_bit(r, &ep->ob_region_map);
267}
268
269static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
270				    u8 multi_msg_cap)
271{
272	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
273	struct rockchip_pcie *rockchip = &ep->rockchip;
274	u32 flags;
275
276	flags = rockchip_pcie_read(rockchip,
277				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
278				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
279	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
280	flags |=
281	   (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
282	   (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
283	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
284	rockchip_pcie_write(rockchip, flags,
285			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
286			    ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
287	return 0;
288}
289
290static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
291{
292	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
293	struct rockchip_pcie *rockchip = &ep->rockchip;
294	u32 flags;
295
296	flags = rockchip_pcie_read(rockchip,
297				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
298				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
299	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
300		return -EINVAL;
301
302	return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
303			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
304}
305
306static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
307					 u8 intx, bool do_assert)
308{
309	struct rockchip_pcie *rockchip = &ep->rockchip;
310
311	intx &= 3;
312
313	if (do_assert) {
314		ep->irq_pending |= BIT(intx);
315		rockchip_pcie_write(rockchip,
316				    PCIE_CLIENT_INT_IN_ASSERT |
317				    PCIE_CLIENT_INT_PEND_ST_PEND,
318				    PCIE_CLIENT_LEGACY_INT_CTRL);
319	} else {
320		ep->irq_pending &= ~BIT(intx);
321		rockchip_pcie_write(rockchip,
322				    PCIE_CLIENT_INT_IN_DEASSERT |
323				    PCIE_CLIENT_INT_PEND_ST_NORMAL,
324				    PCIE_CLIENT_LEGACY_INT_CTRL);
325	}
326}
327
328static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
329					  u8 intx)
330{
331	u16 cmd;
332
333	cmd = rockchip_pcie_read(&ep->rockchip,
334				 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
335				 ROCKCHIP_PCIE_EP_CMD_STATUS);
336
337	if (cmd & PCI_COMMAND_INTX_DISABLE)
338		return -EINVAL;
339
340	/*
341	 * Should add some delay between toggling INTx per TRM vaguely saying
342	 * it depends on some cycles of the AHB bus clock to function it. So
343	 * add sufficient 1ms here.
344	 */
345	rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
346	mdelay(1);
347	rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
348	return 0;
349}
350
351static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
352					 u8 interrupt_num)
353{
354	struct rockchip_pcie *rockchip = &ep->rockchip;
355	u32 flags, mme, data, data_mask;
 
 
356	u8 msi_count;
357	u64 pci_addr;
358	u32 r;
359
360	/* Check MSI enable bit */
361	flags = rockchip_pcie_read(&ep->rockchip,
362				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
363				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
364	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
365		return -EINVAL;
366
367	/* Get MSI numbers from MME */
368	mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
369			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
370	msi_count = 1 << mme;
371	if (!interrupt_num || interrupt_num > msi_count)
372		return -EINVAL;
373
374	/* Set MSI private data */
375	data_mask = msi_count - 1;
376	data = rockchip_pcie_read(rockchip,
377				  ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
378				  ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
379				  PCI_MSI_DATA_64);
380	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
381
382	/* Get MSI PCI address */
383	pci_addr = rockchip_pcie_read(rockchip,
384				      ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
385				      ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
386				      PCI_MSI_ADDRESS_HI);
387	pci_addr <<= 32;
388	pci_addr |= rockchip_pcie_read(rockchip,
389				       ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
390				       ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
391				       PCI_MSI_ADDRESS_LO);
392
393	/* Set the outbound region if needed. */
394	if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
 
 
 
 
395		     ep->irq_pci_fn != fn)) {
396		r = rockchip_ob_region(ep->irq_phys_addr);
397		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
398					     ep->irq_phys_addr,
399					     pci_addr & PCIE_ADDR_MASK,
400					     ~PCIE_ADDR_MASK + 1);
401		ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
402		ep->irq_pci_fn = fn;
403	}
404
405	writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
406	return 0;
407}
408
409static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
410				      unsigned int type, u16 interrupt_num)
411{
412	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
413
414	switch (type) {
415	case PCI_IRQ_INTX:
416		return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
417	case PCI_IRQ_MSI:
418		return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
419	default:
420		return -EINVAL;
421	}
422}
423
424static int rockchip_pcie_ep_start(struct pci_epc *epc)
425{
426	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
427	struct rockchip_pcie *rockchip = &ep->rockchip;
428	struct pci_epf *epf;
429	u32 cfg;
430
431	cfg = BIT(0);
432	list_for_each_entry(epf, &epc->pci_epf, list)
433		cfg |= BIT(epf->func_no);
434
435	rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437	return 0;
438}
439
440static const struct pci_epc_features rockchip_pcie_epc_features = {
441	.linkup_notifier = false,
442	.msi_capable = true,
443	.msix_capable = false,
444	.align = 256,
445};
446
447static const struct pci_epc_features*
448rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
449{
450	return &rockchip_pcie_epc_features;
451}
452
453static const struct pci_epc_ops rockchip_pcie_epc_ops = {
454	.write_header	= rockchip_pcie_ep_write_header,
455	.set_bar	= rockchip_pcie_ep_set_bar,
456	.clear_bar	= rockchip_pcie_ep_clear_bar,
 
457	.map_addr	= rockchip_pcie_ep_map_addr,
458	.unmap_addr	= rockchip_pcie_ep_unmap_addr,
459	.set_msi	= rockchip_pcie_ep_set_msi,
460	.get_msi	= rockchip_pcie_ep_get_msi,
461	.raise_irq	= rockchip_pcie_ep_raise_irq,
462	.start		= rockchip_pcie_ep_start,
 
463	.get_features	= rockchip_pcie_ep_get_features,
464};
465
466static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
467				     struct rockchip_pcie_ep *ep)
468{
469	struct device *dev = rockchip->dev;
470	int err;
471
472	err = rockchip_pcie_parse_dt(rockchip);
473	if (err)
474		return err;
475
476	err = rockchip_pcie_get_phys(rockchip);
477	if (err)
478		return err;
479
480	err = of_property_read_u32(dev->of_node,
481				   "rockchip,max-outbound-regions",
482				   &ep->max_regions);
483	if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
484		ep->max_regions = MAX_REGION_LIMIT;
485
486	ep->ob_region_map = 0;
487
488	err = of_property_read_u8(dev->of_node, "max-functions",
489				  &ep->epc->max_functions);
490	if (err < 0)
491		ep->epc->max_functions = 1;
492
493	return 0;
494}
495
496static const struct of_device_id rockchip_pcie_ep_of_match[] = {
497	{ .compatible = "rockchip,rk3399-pcie-ep"},
498	{},
499};
500
501static int rockchip_pcie_ep_probe(struct platform_device *pdev)
502{
503	struct device *dev = &pdev->dev;
504	struct rockchip_pcie_ep *ep;
505	struct rockchip_pcie *rockchip;
506	struct pci_epc *epc;
507	size_t max_regions;
508	struct pci_epc_mem_window *windows = NULL;
509	int err, i;
510	u32 cfg_msi, cfg_msix_cp;
511
512	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
513	if (!ep)
514		return -ENOMEM;
515
516	rockchip = &ep->rockchip;
517	rockchip->is_rc = false;
518	rockchip->dev = dev;
519
520	epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
521	if (IS_ERR(epc)) {
522		dev_err(dev, "failed to create epc device\n");
523		return PTR_ERR(epc);
524	}
525
526	ep->epc = epc;
527	epc_set_drvdata(epc, ep);
528
529	err = rockchip_pcie_parse_ep_dt(rockchip, ep);
530	if (err)
531		return err;
532
533	err = rockchip_pcie_enable_clocks(rockchip);
534	if (err)
535		return err;
536
537	err = rockchip_pcie_init_port(rockchip);
538	if (err)
539		goto err_disable_clocks;
540
541	/* Establish the link automatically */
542	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
543			    PCIE_CLIENT_CONFIG);
544
545	max_regions = ep->max_regions;
546	ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
547				   GFP_KERNEL);
548
549	if (!ep->ob_addr) {
550		err = -ENOMEM;
551		goto err_uninit_port;
552	}
553
554	/* Only enable function 0 by default */
555	rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
556
557	windows = devm_kcalloc(dev, ep->max_regions,
558			       sizeof(struct pci_epc_mem_window), GFP_KERNEL);
559	if (!windows) {
560		err = -ENOMEM;
561		goto err_uninit_port;
562	}
563	for (i = 0; i < ep->max_regions; i++) {
564		windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
565		windows[i].size = SZ_1M;
566		windows[i].page_size = SZ_1M;
567	}
568	err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
569	devm_kfree(dev, windows);
570
571	if (err < 0) {
572		dev_err(dev, "failed to initialize the memory space\n");
573		goto err_uninit_port;
574	}
575
576	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
577						  SZ_1M);
578	if (!ep->irq_cpu_addr) {
579		dev_err(dev, "failed to reserve memory space for MSI\n");
580		err = -ENOMEM;
581		goto err_epc_mem_exit;
582	}
583
584	ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
585
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586	/*
587	 * MSI-X is not supported but the controller still advertises the MSI-X
588	 * capability by default, which can lead to the Root Complex side
589	 * allocating MSI-X vectors which cannot be used. Avoid this by skipping
590	 * the MSI-X capability entry in the PCIe capabilities linked-list: get
591	 * the next pointer from the MSI-X entry and set that in the MSI
592	 * capability entry (which is the previous entry). This way the MSI-X
593	 * entry is skipped (left out of the linked-list) and not advertised.
594	 */
595	cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
596				     ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
597
598	cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
599
600	cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
601					 ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
602					 ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
603
604	cfg_msi |= cfg_msix_cp;
605
606	rockchip_pcie_write(rockchip, cfg_msi,
607			    PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
 
608
609	rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
610			    PCIE_CLIENT_CONFIG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611
612	return 0;
613err_epc_mem_exit:
614	pci_epc_mem_exit(epc);
615err_uninit_port:
616	rockchip_pcie_deinit_phys(rockchip);
617err_disable_clocks:
618	rockchip_pcie_disable_clocks(rockchip);
 
 
619	return err;
620}
621
622static struct platform_driver rockchip_pcie_ep_driver = {
623	.driver = {
624		.name = "rockchip-pcie-ep",
625		.of_match_table = rockchip_pcie_ep_of_match,
626	},
627	.probe = rockchip_pcie_ep_probe,
628};
629
630builtin_platform_driver(rockchip_pcie_ep_driver);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Rockchip AXI PCIe endpoint controller driver
  4 *
  5 * Copyright (c) 2018 Rockchip, Inc.
  6 *
  7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
  8 *         Simon Xue <xxm@rock-chips.com>
  9 */
 10
 11#include <linux/configfs.h>
 12#include <linux/delay.h>
 13#include <linux/gpio/consumer.h>
 14#include <linux/iopoll.h>
 15#include <linux/kernel.h>
 16#include <linux/irq.h>
 17#include <linux/of.h>
 18#include <linux/pci-epc.h>
 19#include <linux/platform_device.h>
 20#include <linux/pci-epf.h>
 21#include <linux/sizes.h>
 22#include <linux/workqueue.h>
 23
 24#include "pcie-rockchip.h"
 25
 26/**
 27 * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
 28 * @rockchip: Rockchip PCIe controller
 29 * @epc: PCI EPC device
 30 * @max_regions: maximum number of regions supported by hardware
 31 * @ob_region_map: bitmask of mapped outbound regions
 32 * @ob_addr: base addresses in the AXI bus where the outbound regions start
 33 * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
 34 *		   dedicated outbound regions is mapped.
 35 * @irq_cpu_addr: base address in the CPU space where a write access triggers
 36 *		  the sending of a memory write (MSI) / normal message (INTX
 37 *		  IRQ) TLP through the PCIe bus.
 38 * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
 39 *		  dedicated outbound region.
 40 * @irq_pci_fn: the latest PCI function that has updated the mapping of
 41 *		the MSI/INTX IRQ dedicated outbound region.
 42 * @irq_pending: bitmask of asserted INTX IRQs.
 43 * @perst_irq: IRQ used for the PERST# signal.
 44 * @perst_asserted: True if the PERST# signal was asserted.
 45 * @link_up: True if the PCI link is up.
 46 * @link_training: Work item to execute PCI link training.
 47 */
 48struct rockchip_pcie_ep {
 49	struct rockchip_pcie	rockchip;
 50	struct pci_epc		*epc;
 51	u32			max_regions;
 52	unsigned long		ob_region_map;
 53	phys_addr_t		*ob_addr;
 54	phys_addr_t		irq_phys_addr;
 55	void __iomem		*irq_cpu_addr;
 56	u64			irq_pci_addr;
 57	u8			irq_pci_fn;
 58	u8			irq_pending;
 59	int			perst_irq;
 60	bool			perst_asserted;
 61	bool			link_up;
 62	struct delayed_work	link_training;
 63};
 64
 65static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
 66					  u32 region)
 67{
 68	rockchip_pcie_write(rockchip, 0,
 69			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
 70	rockchip_pcie_write(rockchip, 0,
 71			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
 72	rockchip_pcie_write(rockchip, 0,
 73			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
 74	rockchip_pcie_write(rockchip, 0,
 75			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
 76}
 77
 78static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
 79					    u64 pci_addr, size_t size)
 80{
 81	int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
 82
 83	return clamp(num_pass_bits,
 84		     ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
 85		     ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
 86}
 87
 88static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
 89					 u32 r, u64 cpu_addr, u64 pci_addr,
 90					 size_t size)
 91{
 92	int num_pass_bits;
 93	u32 addr0, addr1, desc0;
 94
 95	num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
 96							 pci_addr, size);
 97
 98	addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
 99		(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
100	addr1 = upper_32_bits(pci_addr);
101	desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
102
103	/* PCI bus address region */
104	rockchip_pcie_write(rockchip, addr0,
105			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
106	rockchip_pcie_write(rockchip, addr1,
107			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
108	rockchip_pcie_write(rockchip, desc0,
109			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
110	rockchip_pcie_write(rockchip, 0,
111			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
112}
113
114static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
115					 struct pci_epf_header *hdr)
116{
117	u32 reg;
118	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
119	struct rockchip_pcie *rockchip = &ep->rockchip;
120
121	/* All functions share the same vendor ID with function 0 */
122	if (fn == 0) {
123		rockchip_pcie_write(rockchip,
124				    hdr->vendorid | hdr->subsys_vendor_id << 16,
 
 
125				    PCIE_CORE_CONFIG_VENDOR);
126	}
127
128	reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
129	reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
130	rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
131
132	rockchip_pcie_write(rockchip,
133			    hdr->revid |
134			    hdr->progif_code << 8 |
135			    hdr->subclass_code << 16 |
136			    hdr->baseclass_code << 24,
137			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
138	rockchip_pcie_write(rockchip, hdr->cache_line_size,
139			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
140			    PCI_CACHE_LINE_SIZE);
141	rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
142			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
143			    PCI_SUBSYSTEM_VENDOR_ID);
144	rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
145			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
146			    PCI_INTERRUPT_LINE);
147
148	return 0;
149}
150
151static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
152				    struct pci_epf_bar *epf_bar)
153{
154	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
155	struct rockchip_pcie *rockchip = &ep->rockchip;
156	dma_addr_t bar_phys = epf_bar->phys_addr;
157	enum pci_barno bar = epf_bar->barno;
158	int flags = epf_bar->flags;
159	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
160	u64 sz;
161
162	/* BAR size is 2^(aperture + 7) */
163	sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
164
165	/*
166	 * roundup_pow_of_two() returns an unsigned long, which is not suited
167	 * for 64bit values.
168	 */
169	sz = 1ULL << fls64(sz - 1);
170	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
171
172	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
173		ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
174	} else {
175		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
176		bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
177
178		if (is_64bits && (bar & 1))
179			return -EINVAL;
180
181		if (is_64bits && is_prefetch)
182			ctrl =
183			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
184		else if (is_prefetch)
185			ctrl =
186			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
187		else if (is_64bits)
188			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
189		else
190			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
191	}
192
193	if (bar < BAR_4) {
194		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
195		b = bar;
196	} else {
197		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
198		b = bar - BAR_4;
199	}
200
201	addr0 = lower_32_bits(bar_phys);
202	addr1 = upper_32_bits(bar_phys);
203
204	cfg = rockchip_pcie_read(rockchip, reg);
205	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
206		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
207	cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
208		ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
209
210	rockchip_pcie_write(rockchip, cfg, reg);
211	rockchip_pcie_write(rockchip, addr0,
212			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
213	rockchip_pcie_write(rockchip, addr1,
214			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
215
216	return 0;
217}
218
219static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
220				       struct pci_epf_bar *epf_bar)
221{
222	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
223	struct rockchip_pcie *rockchip = &ep->rockchip;
224	u32 reg, cfg, b, ctrl;
225	enum pci_barno bar = epf_bar->barno;
226
227	if (bar < BAR_4) {
228		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
229		b = bar;
230	} else {
231		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
232		b = bar - BAR_4;
233	}
234
235	ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
236	cfg = rockchip_pcie_read(rockchip, reg);
237	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
238		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
239	cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
240
241	rockchip_pcie_write(rockchip, cfg, reg);
242	rockchip_pcie_write(rockchip, 0x0,
243			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
244	rockchip_pcie_write(rockchip, 0x0,
245			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
246}
247
248static inline u32 rockchip_ob_region(phys_addr_t addr)
249{
250	return (addr >> ilog2(SZ_1M)) & 0x1f;
251}
252
253static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
254				       size_t *pci_size, size_t *addr_offset)
255{
256	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
257	size_t size = *pci_size;
258	u64 offset, mask;
259	int num_bits;
260
261	num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip,
262						    pci_addr, size);
263	mask = (1ULL << num_bits) - 1;
264
265	offset = pci_addr & mask;
266	if (size + offset > SZ_1M)
267		size = SZ_1M - offset;
268
269	*pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN);
270	*addr_offset = offset;
271
272	return pci_addr & ~mask;
273}
274
275static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
276				     phys_addr_t addr, u64 pci_addr,
277				     size_t size)
278{
279	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
280	struct rockchip_pcie *pcie = &ep->rockchip;
281	u32 r = rockchip_ob_region(addr);
282
283	if (test_bit(r, &ep->ob_region_map))
284		return -EBUSY;
285
286	rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
287
288	set_bit(r, &ep->ob_region_map);
289	ep->ob_addr[r] = addr;
290
291	return 0;
292}
293
294static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
295					phys_addr_t addr)
296{
297	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
298	struct rockchip_pcie *rockchip = &ep->rockchip;
299	u32 r = rockchip_ob_region(addr);
 
 
 
 
300
301	if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map))
302		return;
303
304	rockchip_pcie_clear_ep_ob_atu(rockchip, r);
305
306	ep->ob_addr[r] = 0;
307	clear_bit(r, &ep->ob_region_map);
308}
309
310static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
311				    u8 multi_msg_cap)
312{
313	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
314	struct rockchip_pcie *rockchip = &ep->rockchip;
315	u32 flags;
316
317	flags = rockchip_pcie_read(rockchip,
318				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
319				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
320	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
321	flags |=
322	   (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
323	   (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
324	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
325	rockchip_pcie_write(rockchip, flags,
326			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
327			    ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
328	return 0;
329}
330
331static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
332{
333	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
334	struct rockchip_pcie *rockchip = &ep->rockchip;
335	u32 flags;
336
337	flags = rockchip_pcie_read(rockchip,
338				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
339				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
340	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
341		return -EINVAL;
342
343	return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
344			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
345}
346
347static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
348					 u8 intx, bool do_assert)
349{
350	struct rockchip_pcie *rockchip = &ep->rockchip;
351
352	intx &= 3;
353
354	if (do_assert) {
355		ep->irq_pending |= BIT(intx);
356		rockchip_pcie_write(rockchip,
357				    PCIE_CLIENT_INT_IN_ASSERT |
358				    PCIE_CLIENT_INT_PEND_ST_PEND,
359				    PCIE_CLIENT_LEGACY_INT_CTRL);
360	} else {
361		ep->irq_pending &= ~BIT(intx);
362		rockchip_pcie_write(rockchip,
363				    PCIE_CLIENT_INT_IN_DEASSERT |
364				    PCIE_CLIENT_INT_PEND_ST_NORMAL,
365				    PCIE_CLIENT_LEGACY_INT_CTRL);
366	}
367}
368
369static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
370					  u8 intx)
371{
372	u16 cmd;
373
374	cmd = rockchip_pcie_read(&ep->rockchip,
375				 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
376				 ROCKCHIP_PCIE_EP_CMD_STATUS);
377
378	if (cmd & PCI_COMMAND_INTX_DISABLE)
379		return -EINVAL;
380
381	/*
382	 * Should add some delay between toggling INTx per TRM vaguely saying
383	 * it depends on some cycles of the AHB bus clock to function it. So
384	 * add sufficient 1ms here.
385	 */
386	rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
387	mdelay(1);
388	rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
389	return 0;
390}
391
392static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
393					 u8 interrupt_num)
394{
395	struct rockchip_pcie *rockchip = &ep->rockchip;
396	u32 flags, mme, data, data_mask;
397	size_t irq_pci_size, offset;
398	u64 irq_pci_addr;
399	u8 msi_count;
400	u64 pci_addr;
 
401
402	/* Check MSI enable bit */
403	flags = rockchip_pcie_read(&ep->rockchip,
404				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
405				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
406	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
407		return -EINVAL;
408
409	/* Get MSI numbers from MME */
410	mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
411			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
412	msi_count = 1 << mme;
413	if (!interrupt_num || interrupt_num > msi_count)
414		return -EINVAL;
415
416	/* Set MSI private data */
417	data_mask = msi_count - 1;
418	data = rockchip_pcie_read(rockchip,
419				  ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
420				  ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
421				  PCI_MSI_DATA_64);
422	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
423
424	/* Get MSI PCI address */
425	pci_addr = rockchip_pcie_read(rockchip,
426				      ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
427				      ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
428				      PCI_MSI_ADDRESS_HI);
429	pci_addr <<= 32;
430	pci_addr |= rockchip_pcie_read(rockchip,
431				       ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
432				       ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
433				       PCI_MSI_ADDRESS_LO);
434
435	/* Set the outbound region if needed. */
436	irq_pci_size = ~PCIE_ADDR_MASK + 1;
437	irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc,
438						   pci_addr & PCIE_ADDR_MASK,
439						   &irq_pci_size, &offset);
440	if (unlikely(ep->irq_pci_addr != irq_pci_addr ||
441		     ep->irq_pci_fn != fn)) {
442		rockchip_pcie_prog_ep_ob_atu(rockchip, fn,
443					rockchip_ob_region(ep->irq_phys_addr),
444					ep->irq_phys_addr,
445					irq_pci_addr, irq_pci_size);
446		ep->irq_pci_addr = irq_pci_addr;
 
447		ep->irq_pci_fn = fn;
448	}
449
450	writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK));
451	return 0;
452}
453
454static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
455				      unsigned int type, u16 interrupt_num)
456{
457	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
458
459	switch (type) {
460	case PCI_IRQ_INTX:
461		return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
462	case PCI_IRQ_MSI:
463		return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
464	default:
465		return -EINVAL;
466	}
467}
468
469static int rockchip_pcie_ep_start(struct pci_epc *epc)
470{
471	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
472	struct rockchip_pcie *rockchip = &ep->rockchip;
473	struct pci_epf *epf;
474	u32 cfg;
475
476	cfg = BIT(0);
477	list_for_each_entry(epf, &epc->pci_epf, list)
478		cfg |= BIT(epf->func_no);
479
480	rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
481
482	if (rockchip->perst_gpio)
483		enable_irq(ep->perst_irq);
484
485	/* Enable configuration and start link training */
486	rockchip_pcie_write(rockchip,
487			    PCIE_CLIENT_LINK_TRAIN_ENABLE |
488			    PCIE_CLIENT_CONF_ENABLE,
489			    PCIE_CLIENT_CONFIG);
490
491	if (!rockchip->perst_gpio)
492		schedule_delayed_work(&ep->link_training, 0);
493
494	return 0;
495}
496
497static void rockchip_pcie_ep_stop(struct pci_epc *epc)
498{
499	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
500	struct rockchip_pcie *rockchip = &ep->rockchip;
501
502	if (rockchip->perst_gpio) {
503		ep->perst_asserted = true;
504		disable_irq(ep->perst_irq);
505	}
506
507	cancel_delayed_work_sync(&ep->link_training);
508
509	/* Stop link training and disable configuration */
510	rockchip_pcie_write(rockchip,
511			    PCIE_CLIENT_CONF_DISABLE |
512			    PCIE_CLIENT_LINK_TRAIN_DISABLE,
513			    PCIE_CLIENT_CONFIG);
514}
515
516static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
517{
518	u32 status;
519
520	status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS);
521	status |= PCI_EXP_LNKCTL_RL;
522	rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS);
523}
524
525static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
526{
527	u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
528
529	return PCIE_LINK_UP(val);
530}
531
532static void rockchip_pcie_ep_link_training(struct work_struct *work)
533{
534	struct rockchip_pcie_ep *ep =
535		container_of(work, struct rockchip_pcie_ep, link_training.work);
536	struct rockchip_pcie *rockchip = &ep->rockchip;
537	struct device *dev = rockchip->dev;
538	u32 val;
539	int ret;
540
541	/* Enable Gen1 training and wait for its completion */
542	ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
543				 val, PCIE_LINK_TRAINING_DONE(val), 50,
544				 LINK_TRAIN_TIMEOUT);
545	if (ret)
546		goto again;
547
548	/* Make sure that the link is up */
549	ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
550				 val, PCIE_LINK_UP(val), 50,
551				 LINK_TRAIN_TIMEOUT);
552	if (ret)
553		goto again;
554
555	/*
556	 * Check the current speed: if gen2 speed was requested and we are not
557	 * at gen2 speed yet, retrain again for gen2.
558	 */
559	val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
560	if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) {
561		/* Enable retrain for gen2 */
562		rockchip_pcie_ep_retrain_link(rockchip);
563		readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
564				   val, PCIE_LINK_IS_GEN2(val), 50,
565				   LINK_TRAIN_TIMEOUT);
566	}
567
568	/* Check again that the link is up */
569	if (!rockchip_pcie_ep_link_up(rockchip))
570		goto again;
571
572	/*
573	 * If PERST# was asserted while polling the link, do not notify
574	 * the function.
575	 */
576	if (ep->perst_asserted)
577		return;
578
579	val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0);
580	dev_info(dev,
581		 "link up (negotiated speed: %sGT/s, width: x%lu)\n",
582		 (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5",
583		 ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >>
584		  PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1);
585
586	/* Notify the function */
587	pci_epc_linkup(ep->epc);
588	ep->link_up = true;
589
590	return;
591
592again:
593	schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5));
594}
595
596static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep)
597{
598	struct rockchip_pcie *rockchip = &ep->rockchip;
599
600	dev_dbg(rockchip->dev, "PERST# asserted, link down\n");
601
602	if (ep->perst_asserted)
603		return;
604
605	ep->perst_asserted = true;
606
607	cancel_delayed_work_sync(&ep->link_training);
608
609	if (ep->link_up) {
610		pci_epc_linkdown(ep->epc);
611		ep->link_up = false;
612	}
613}
614
615static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep)
616{
617	struct rockchip_pcie *rockchip = &ep->rockchip;
618
619	dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n");
620
621	if (!ep->perst_asserted)
622		return;
623
624	ep->perst_asserted = false;
625
626	/* Enable link re-training */
627	rockchip_pcie_ep_retrain_link(rockchip);
628
629	/* Start link training */
630	schedule_delayed_work(&ep->link_training, 0);
631}
632
633static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data)
634{
635	struct pci_epc *epc = data;
636	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
637	struct rockchip_pcie *rockchip = &ep->rockchip;
638	u32 perst = gpiod_get_value(rockchip->perst_gpio);
639
640	if (perst)
641		rockchip_pcie_ep_perst_assert(ep);
642	else
643		rockchip_pcie_ep_perst_deassert(ep);
644
645	irq_set_irq_type(ep->perst_irq,
646			 (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
647
648	return IRQ_HANDLED;
649}
650
651static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
652{
653	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
654	struct rockchip_pcie *rockchip = &ep->rockchip;
655	struct device *dev = rockchip->dev;
656	int ret;
657
658	if (!rockchip->perst_gpio)
659		return 0;
660
661	/* PCIe reset interrupt */
662	ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio);
663	if (ep->perst_irq < 0) {
664		dev_err(dev,
665			"failed to get IRQ for PERST# GPIO: %d\n",
666			ep->perst_irq);
667
668		return ep->perst_irq;
669	}
670
671	/*
672	 * The perst_gpio is active low, so when it is inactive on start, it
673	 * is high and will trigger the perst_irq handler. So treat this initial
674	 * IRQ as a dummy one by faking the host asserting PERST#.
675	 */
676	ep->perst_asserted = true;
677	irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN);
678	ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL,
679					rockchip_pcie_ep_perst_irq_thread,
680					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
681					"pcie-ep-perst", epc);
682	if (ret) {
683		dev_err(dev,
684			"failed to request IRQ for PERST# GPIO: %d\n",
685			ret);
686
687		return ret;
688	}
689
690	return 0;
691}
692
693static const struct pci_epc_features rockchip_pcie_epc_features = {
694	.linkup_notifier = true,
695	.msi_capable = true,
696	.msix_capable = false,
697	.align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
698};
699
700static const struct pci_epc_features*
701rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
702{
703	return &rockchip_pcie_epc_features;
704}
705
706static const struct pci_epc_ops rockchip_pcie_epc_ops = {
707	.write_header	= rockchip_pcie_ep_write_header,
708	.set_bar	= rockchip_pcie_ep_set_bar,
709	.clear_bar	= rockchip_pcie_ep_clear_bar,
710	.align_addr	= rockchip_pcie_ep_align_addr,
711	.map_addr	= rockchip_pcie_ep_map_addr,
712	.unmap_addr	= rockchip_pcie_ep_unmap_addr,
713	.set_msi	= rockchip_pcie_ep_set_msi,
714	.get_msi	= rockchip_pcie_ep_get_msi,
715	.raise_irq	= rockchip_pcie_ep_raise_irq,
716	.start		= rockchip_pcie_ep_start,
717	.stop		= rockchip_pcie_ep_stop,
718	.get_features	= rockchip_pcie_ep_get_features,
719};
720
721static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip,
722					  struct rockchip_pcie_ep *ep)
723{
724	struct device *dev = rockchip->dev;
725	int err;
726
727	err = rockchip_pcie_parse_dt(rockchip);
728	if (err)
729		return err;
730
731	err = rockchip_pcie_get_phys(rockchip);
732	if (err)
733		return err;
734
735	err = of_property_read_u32(dev->of_node,
736				   "rockchip,max-outbound-regions",
737				   &ep->max_regions);
738	if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
739		ep->max_regions = MAX_REGION_LIMIT;
740
741	ep->ob_region_map = 0;
742
743	err = of_property_read_u8(dev->of_node, "max-functions",
744				  &ep->epc->max_functions);
745	if (err < 0)
746		ep->epc->max_functions = 1;
747
748	return 0;
749}
750
751static const struct of_device_id rockchip_pcie_ep_of_match[] = {
752	{ .compatible = "rockchip,rk3399-pcie-ep"},
753	{},
754};
755
756static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
757{
758	struct rockchip_pcie *rockchip = &ep->rockchip;
759	struct device *dev = rockchip->dev;
 
 
 
760	struct pci_epc_mem_window *windows = NULL;
761	int err, i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
762
763	ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr),
 
764				   GFP_KERNEL);
765
766	if (!ep->ob_addr)
767		return -ENOMEM;
 
 
 
 
 
768
769	windows = devm_kcalloc(dev, ep->max_regions,
770			       sizeof(struct pci_epc_mem_window), GFP_KERNEL);
771	if (!windows)
772		return -ENOMEM;
773
 
774	for (i = 0; i < ep->max_regions; i++) {
775		windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
776		windows[i].size = SZ_1M;
777		windows[i].page_size = SZ_1M;
778	}
779	err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions);
780	devm_kfree(dev, windows);
781
782	if (err < 0) {
783		dev_err(dev, "failed to initialize the memory space\n");
784		return err;
785	}
786
787	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr,
788						  SZ_1M);
789	if (!ep->irq_cpu_addr) {
790		dev_err(dev, "failed to reserve memory space for MSI\n");
791		err = -ENOMEM;
792		goto err_epc_mem_exit;
793	}
794
795	ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
796
797	return 0;
798
799err_epc_mem_exit:
800	pci_epc_mem_exit(ep->epc);
801
802	return err;
803}
804
805static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep)
806{
807	pci_epc_mem_exit(ep->epc);
808}
809
810static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip)
811{
812	u32 cfg_msi, cfg_msix_cp;
813
814	/*
815	 * MSI-X is not supported but the controller still advertises the MSI-X
816	 * capability by default, which can lead to the Root Complex side
817	 * allocating MSI-X vectors which cannot be used. Avoid this by skipping
818	 * the MSI-X capability entry in the PCIe capabilities linked-list: get
819	 * the next pointer from the MSI-X entry and set that in the MSI
820	 * capability entry (which is the previous entry). This way the MSI-X
821	 * entry is skipped (left out of the linked-list) and not advertised.
822	 */
823	cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
824				     ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
825
826	cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
827
828	cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
829					 ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
830					 ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
831
832	cfg_msi |= cfg_msix_cp;
833
834	rockchip_pcie_write(rockchip, cfg_msi,
835			    PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
836}
837
838static int rockchip_pcie_ep_probe(struct platform_device *pdev)
839{
840	struct device *dev = &pdev->dev;
841	struct rockchip_pcie_ep *ep;
842	struct rockchip_pcie *rockchip;
843	struct pci_epc *epc;
844	int err;
845
846	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
847	if (!ep)
848		return -ENOMEM;
849
850	rockchip = &ep->rockchip;
851	rockchip->is_rc = false;
852	rockchip->dev = dev;
853	INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training);
854
855	epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
856	if (IS_ERR(epc)) {
857		dev_err(dev, "failed to create EPC device\n");
858		return PTR_ERR(epc);
859	}
860
861	ep->epc = epc;
862	epc_set_drvdata(epc, ep);
863
864	err = rockchip_pcie_ep_get_resources(rockchip, ep);
865	if (err)
866		return err;
867
868	err = rockchip_pcie_ep_init_ob_mem(ep);
869	if (err)
870		return err;
871
872	err = rockchip_pcie_enable_clocks(rockchip);
873	if (err)
874		goto err_exit_ob_mem;
875
876	err = rockchip_pcie_init_port(rockchip);
877	if (err)
878		goto err_disable_clocks;
879
880	rockchip_pcie_ep_hide_broken_msix_cap(rockchip);
881
882	/* Only enable function 0 by default */
883	rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
884
885	pci_epc_init_notify(epc);
886
887	err = rockchip_pcie_ep_setup_irq(epc);
888	if (err < 0)
889		goto err_uninit_port;
890
891	return 0;
 
 
892err_uninit_port:
893	rockchip_pcie_deinit_phys(rockchip);
894err_disable_clocks:
895	rockchip_pcie_disable_clocks(rockchip);
896err_exit_ob_mem:
897	rockchip_pcie_ep_exit_ob_mem(ep);
898	return err;
899}
900
901static struct platform_driver rockchip_pcie_ep_driver = {
902	.driver = {
903		.name = "rockchip-pcie-ep",
904		.of_match_table = rockchip_pcie_ep_of_match,
905	},
906	.probe = rockchip_pcie_ep_probe,
907};
908
909builtin_platform_driver(rockchip_pcie_ep_driver);