Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017 Cadence
3// Cadence PCIe endpoint controller driver.
4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6#include <linux/delay.h>
7#include <linux/kernel.h>
8#include <linux/of.h>
9#include <linux/pci-epc.h>
10#include <linux/platform_device.h>
11#include <linux/sizes.h>
12
13#include "pcie-cadence.h"
14
15#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
16#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
17#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
18
19static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
20 struct pci_epf_header *hdr)
21{
22 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
23 struct cdns_pcie *pcie = &ep->pcie;
24
25 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
26 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
27 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
28 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
29 hdr->subclass_code | hdr->baseclass_code << 8);
30 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
31 hdr->cache_line_size);
32 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
33 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
34
35 /*
36 * Vendor ID can only be modified from function 0, all other functions
37 * use the same vendor ID as function 0.
38 */
39 if (fn == 0) {
40 /* Update the vendor IDs. */
41 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
42 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
43
44 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
45 }
46
47 return 0;
48}
49
50static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
51 struct pci_epf_bar *epf_bar)
52{
53 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
54 struct cdns_pcie_epf *epf = &ep->epf[fn];
55 struct cdns_pcie *pcie = &ep->pcie;
56 dma_addr_t bar_phys = epf_bar->phys_addr;
57 enum pci_barno bar = epf_bar->barno;
58 int flags = epf_bar->flags;
59 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
60 u64 sz;
61
62 /* BAR size is 2^(aperture + 7) */
63 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
64 /*
65 * roundup_pow_of_two() returns an unsigned long, which is not suited
66 * for 64bit values.
67 */
68 sz = 1ULL << fls64(sz - 1);
69 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
70
71 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
72 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
73 } else {
74 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
75 bool is_64bits = sz > SZ_2G;
76
77 if (is_64bits && (bar & 1))
78 return -EINVAL;
79
80 if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
81 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
82
83 if (is_64bits && is_prefetch)
84 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
85 else if (is_prefetch)
86 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
87 else if (is_64bits)
88 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
89 else
90 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
91 }
92
93 addr0 = lower_32_bits(bar_phys);
94 addr1 = upper_32_bits(bar_phys);
95 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
96 addr0);
97 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
98 addr1);
99
100 if (bar < BAR_4) {
101 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
102 b = bar;
103 } else {
104 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
105 b = bar - BAR_4;
106 }
107
108 cfg = cdns_pcie_readl(pcie, reg);
109 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
110 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
111 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
112 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
113 cdns_pcie_writel(pcie, reg, cfg);
114
115 epf->epf_bar[bar] = epf_bar;
116
117 return 0;
118}
119
120static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
121 struct pci_epf_bar *epf_bar)
122{
123 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
124 struct cdns_pcie_epf *epf = &ep->epf[fn];
125 struct cdns_pcie *pcie = &ep->pcie;
126 enum pci_barno bar = epf_bar->barno;
127 u32 reg, cfg, b, ctrl;
128
129 if (bar < BAR_4) {
130 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
131 b = bar;
132 } else {
133 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
134 b = bar - BAR_4;
135 }
136
137 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
138 cfg = cdns_pcie_readl(pcie, reg);
139 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
140 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
141 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
142 cdns_pcie_writel(pcie, reg, cfg);
143
144 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
145 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
146
147 epf->epf_bar[bar] = NULL;
148}
149
150static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
151 u64 pci_addr, size_t size)
152{
153 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
154 struct cdns_pcie *pcie = &ep->pcie;
155 u32 r;
156
157 r = find_first_zero_bit(&ep->ob_region_map,
158 sizeof(ep->ob_region_map) * BITS_PER_LONG);
159 if (r >= ep->max_regions - 1) {
160 dev_err(&epc->dev, "no free outbound region\n");
161 return -EINVAL;
162 }
163
164 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
165
166 set_bit(r, &ep->ob_region_map);
167 ep->ob_addr[r] = addr;
168
169 return 0;
170}
171
172static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
173 phys_addr_t addr)
174{
175 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
176 struct cdns_pcie *pcie = &ep->pcie;
177 u32 r;
178
179 for (r = 0; r < ep->max_regions - 1; r++)
180 if (ep->ob_addr[r] == addr)
181 break;
182
183 if (r == ep->max_regions - 1)
184 return;
185
186 cdns_pcie_reset_outbound_region(pcie, r);
187
188 ep->ob_addr[r] = 0;
189 clear_bit(r, &ep->ob_region_map);
190}
191
192static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
193{
194 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
195 struct cdns_pcie *pcie = &ep->pcie;
196 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
197 u16 flags;
198
199 /*
200 * Set the Multiple Message Capable bitfield into the Message Control
201 * register.
202 */
203 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
204 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
205 flags |= PCI_MSI_FLAGS_64BIT;
206 flags &= ~PCI_MSI_FLAGS_MASKBIT;
207 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
208
209 return 0;
210}
211
212static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
213{
214 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
215 struct cdns_pcie *pcie = &ep->pcie;
216 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
217 u16 flags, mme;
218
219 /* Validate that the MSI feature is actually enabled. */
220 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
221 if (!(flags & PCI_MSI_FLAGS_ENABLE))
222 return -EINVAL;
223
224 /*
225 * Get the Multiple Message Enable bitfield from the Message Control
226 * register.
227 */
228 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
229
230 return mme;
231}
232
233static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
234{
235 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
236 struct cdns_pcie *pcie = &ep->pcie;
237 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
238 u32 val, reg;
239
240 reg = cap + PCI_MSIX_FLAGS;
241 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
242 if (!(val & PCI_MSIX_FLAGS_ENABLE))
243 return -EINVAL;
244
245 val &= PCI_MSIX_FLAGS_QSIZE;
246
247 return val;
248}
249
250static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
251 enum pci_barno bir, u32 offset)
252{
253 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
254 struct cdns_pcie *pcie = &ep->pcie;
255 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
256 u32 val, reg;
257
258 reg = cap + PCI_MSIX_FLAGS;
259 val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
260 val &= ~PCI_MSIX_FLAGS_QSIZE;
261 val |= interrupts;
262 cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
263
264 /* Set MSIX BAR and offset */
265 reg = cap + PCI_MSIX_TABLE;
266 val = offset | bir;
267 cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
268
269 /* Set PBA BAR and offset. BAR must match MSIX BAR */
270 reg = cap + PCI_MSIX_PBA;
271 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
272 cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
273
274 return 0;
275}
276
277static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
278 u8 intx, bool is_asserted)
279{
280 struct cdns_pcie *pcie = &ep->pcie;
281 unsigned long flags;
282 u32 offset;
283 u16 status;
284 u8 msg_code;
285
286 intx &= 3;
287
288 /* Set the outbound region if needed. */
289 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
290 ep->irq_pci_fn != fn)) {
291 /* First region was reserved for IRQ writes. */
292 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
293 ep->irq_phys_addr);
294 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
295 ep->irq_pci_fn = fn;
296 }
297
298 if (is_asserted) {
299 ep->irq_pending |= BIT(intx);
300 msg_code = MSG_CODE_ASSERT_INTA + intx;
301 } else {
302 ep->irq_pending &= ~BIT(intx);
303 msg_code = MSG_CODE_DEASSERT_INTA + intx;
304 }
305
306 spin_lock_irqsave(&ep->lock, flags);
307 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
308 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
309 status ^= PCI_STATUS_INTERRUPT;
310 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
311 }
312 spin_unlock_irqrestore(&ep->lock, flags);
313
314 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
315 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
316 CDNS_PCIE_MSG_NO_DATA;
317 writel(0, ep->irq_cpu_addr + offset);
318}
319
320static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
321{
322 u16 cmd;
323
324 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
325 if (cmd & PCI_COMMAND_INTX_DISABLE)
326 return -EINVAL;
327
328 cdns_pcie_ep_assert_intx(ep, fn, intx, true);
329 /*
330 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
331 * from drivers/pci/dwc/pci-dra7xx.c
332 */
333 mdelay(1);
334 cdns_pcie_ep_assert_intx(ep, fn, intx, false);
335 return 0;
336}
337
338static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
339 u8 interrupt_num)
340{
341 struct cdns_pcie *pcie = &ep->pcie;
342 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
343 u16 flags, mme, data, data_mask;
344 u8 msi_count;
345 u64 pci_addr, pci_addr_mask = 0xff;
346
347 /* Check whether the MSI feature has been enabled by the PCI host. */
348 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
349 if (!(flags & PCI_MSI_FLAGS_ENABLE))
350 return -EINVAL;
351
352 /* Get the number of enabled MSIs */
353 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
354 msi_count = 1 << mme;
355 if (!interrupt_num || interrupt_num > msi_count)
356 return -EINVAL;
357
358 /* Compute the data value to be written. */
359 data_mask = msi_count - 1;
360 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
361 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
362
363 /* Get the PCI address where to write the data into. */
364 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
365 pci_addr <<= 32;
366 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
367 pci_addr &= GENMASK_ULL(63, 2);
368
369 /* Set the outbound region if needed. */
370 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
371 ep->irq_pci_fn != fn)) {
372 /* First region was reserved for IRQ writes. */
373 cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
374 false,
375 ep->irq_phys_addr,
376 pci_addr & ~pci_addr_mask,
377 pci_addr_mask + 1);
378 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
379 ep->irq_pci_fn = fn;
380 }
381 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
382
383 return 0;
384}
385
386static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
387 u16 interrupt_num)
388{
389 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
390 u32 tbl_offset, msg_data, reg;
391 struct cdns_pcie *pcie = &ep->pcie;
392 struct pci_epf_msix_tbl *msix_tbl;
393 struct cdns_pcie_epf *epf;
394 u64 pci_addr_mask = 0xff;
395 u64 msg_addr;
396 u16 flags;
397 u8 bir;
398
399 /* Check whether the MSI-X feature has been enabled by the PCI host. */
400 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
401 if (!(flags & PCI_MSIX_FLAGS_ENABLE))
402 return -EINVAL;
403
404 reg = cap + PCI_MSIX_TABLE;
405 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
406 bir = tbl_offset & PCI_MSIX_TABLE_BIR;
407 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
408
409 epf = &ep->epf[fn];
410 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
411 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
412 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
413
414 /* Set the outbound region if needed. */
415 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
416 ep->irq_pci_fn != fn) {
417 /* First region was reserved for IRQ writes. */
418 cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
419 false,
420 ep->irq_phys_addr,
421 msg_addr & ~pci_addr_mask,
422 pci_addr_mask + 1);
423 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
424 ep->irq_pci_fn = fn;
425 }
426 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
427
428 return 0;
429}
430
431static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
432 enum pci_epc_irq_type type,
433 u16 interrupt_num)
434{
435 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
436
437 switch (type) {
438 case PCI_EPC_IRQ_LEGACY:
439 return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
440
441 case PCI_EPC_IRQ_MSI:
442 return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
443
444 case PCI_EPC_IRQ_MSIX:
445 return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
446
447 default:
448 break;
449 }
450
451 return -EINVAL;
452}
453
454static int cdns_pcie_ep_start(struct pci_epc *epc)
455{
456 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
457 struct cdns_pcie *pcie = &ep->pcie;
458 struct device *dev = pcie->dev;
459 struct pci_epf *epf;
460 u32 cfg;
461 int ret;
462
463 /*
464 * BIT(0) is hardwired to 1, hence function 0 is always enabled
465 * and can't be disabled anyway.
466 */
467 cfg = BIT(0);
468 list_for_each_entry(epf, &epc->pci_epf, list)
469 cfg |= BIT(epf->func_no);
470 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
471
472 ret = cdns_pcie_start_link(pcie);
473 if (ret) {
474 dev_err(dev, "Failed to start link\n");
475 return ret;
476 }
477
478 return 0;
479}
480
481static const struct pci_epc_features cdns_pcie_epc_features = {
482 .linkup_notifier = false,
483 .msi_capable = true,
484 .msix_capable = true,
485};
486
487static const struct pci_epc_features*
488cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
489{
490 return &cdns_pcie_epc_features;
491}
492
493static const struct pci_epc_ops cdns_pcie_epc_ops = {
494 .write_header = cdns_pcie_ep_write_header,
495 .set_bar = cdns_pcie_ep_set_bar,
496 .clear_bar = cdns_pcie_ep_clear_bar,
497 .map_addr = cdns_pcie_ep_map_addr,
498 .unmap_addr = cdns_pcie_ep_unmap_addr,
499 .set_msi = cdns_pcie_ep_set_msi,
500 .get_msi = cdns_pcie_ep_get_msi,
501 .set_msix = cdns_pcie_ep_set_msix,
502 .get_msix = cdns_pcie_ep_get_msix,
503 .raise_irq = cdns_pcie_ep_raise_irq,
504 .start = cdns_pcie_ep_start,
505 .get_features = cdns_pcie_ep_get_features,
506};
507
508
509int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
510{
511 struct device *dev = ep->pcie.dev;
512 struct platform_device *pdev = to_platform_device(dev);
513 struct device_node *np = dev->of_node;
514 struct cdns_pcie *pcie = &ep->pcie;
515 struct resource *res;
516 struct pci_epc *epc;
517 int ret;
518
519 pcie->is_rc = false;
520
521 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
522 if (IS_ERR(pcie->reg_base)) {
523 dev_err(dev, "missing \"reg\"\n");
524 return PTR_ERR(pcie->reg_base);
525 }
526
527 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
528 if (!res) {
529 dev_err(dev, "missing \"mem\"\n");
530 return -EINVAL;
531 }
532 pcie->mem_res = res;
533
534 ret = of_property_read_u32(np, "cdns,max-outbound-regions",
535 &ep->max_regions);
536 if (ret < 0) {
537 dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
538 return ret;
539 }
540 ep->ob_addr = devm_kcalloc(dev,
541 ep->max_regions, sizeof(*ep->ob_addr),
542 GFP_KERNEL);
543 if (!ep->ob_addr)
544 return -ENOMEM;
545
546 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
547 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
548
549 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
550 if (IS_ERR(epc)) {
551 dev_err(dev, "failed to create epc device\n");
552 return PTR_ERR(epc);
553 }
554
555 epc_set_drvdata(epc, ep);
556
557 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
558 epc->max_functions = 1;
559
560 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
561 GFP_KERNEL);
562 if (!ep->epf)
563 return -ENOMEM;
564
565 ret = pci_epc_mem_init(epc, pcie->mem_res->start,
566 resource_size(pcie->mem_res), PAGE_SIZE);
567 if (ret < 0) {
568 dev_err(dev, "failed to initialize the memory space\n");
569 return ret;
570 }
571
572 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
573 SZ_128K);
574 if (!ep->irq_cpu_addr) {
575 dev_err(dev, "failed to reserve memory space for MSI\n");
576 ret = -ENOMEM;
577 goto free_epc_mem;
578 }
579 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
580 /* Reserve region 0 for IRQs */
581 set_bit(0, &ep->ob_region_map);
582 spin_lock_init(&ep->lock);
583
584 return 0;
585
586 free_epc_mem:
587 pci_epc_mem_exit(epc);
588
589 return ret;
590}