Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe endpoint driver for Renesas R-Car SoCs
4 * Copyright (c) 2020 Renesas Electronics Europe GmbH
5 *
6 * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
7 */
8
9#include <linux/delay.h>
10#include <linux/of_address.h>
11#include <linux/of_platform.h>
12#include <linux/pci.h>
13#include <linux/pci-epc.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16
17#include "pcie-rcar.h"
18
19#define RCAR_EPC_MAX_FUNCTIONS 1
20
21/* Structure representing the PCIe interface */
22struct rcar_pcie_endpoint {
23 struct rcar_pcie pcie;
24 phys_addr_t *ob_mapped_addr;
25 struct pci_epc_mem_window *ob_window;
26 u8 max_functions;
27 unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
28 unsigned long *ib_window_map;
29 u32 num_ib_windows;
30 u32 num_ob_windows;
31};
32
33static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
34{
35 u32 val;
36
37 rcar_pci_write_reg(pcie, 0, PCIETCTLR);
38
39 /* Set endpoint mode */
40 rcar_pci_write_reg(pcie, 0, PCIEMSR);
41
42 /* Initialize default capabilities. */
43 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
44 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
45 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
46 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
47 PCI_HEADER_TYPE_NORMAL);
48
49 /* Write out the physical slot number = 0 */
50 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
51
52 val = rcar_pci_read_reg(pcie, EXPCAP(1));
53 /* device supports fixed 128 bytes MPSS */
54 val &= ~GENMASK(2, 0);
55 rcar_pci_write_reg(pcie, val, EXPCAP(1));
56
57 val = rcar_pci_read_reg(pcie, EXPCAP(2));
58 /* read requests size 128 bytes */
59 val &= ~GENMASK(14, 12);
60 /* payload size 128 bytes */
61 val &= ~GENMASK(7, 5);
62 rcar_pci_write_reg(pcie, val, EXPCAP(2));
63
64 /* Set target link speed to 5.0 GT/s */
65 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
66 PCI_EXP_LNKSTA_CLS_5_0GB);
67
68 /* Set the completion timer timeout to the maximum 50ms. */
69 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
70
71 /* Terminate list of capabilities (Next Capability Offset=0) */
72 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
73
74 /* flush modifications */
75 wmb();
76}
77
78static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
79 phys_addr_t addr)
80{
81 int i;
82
83 for (i = 0; i < ep->num_ob_windows; i++)
84 if (ep->ob_window[i].phys_base == addr)
85 return i;
86
87 return -EINVAL;
88}
89
90static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
91 struct platform_device *pdev)
92{
93 struct rcar_pcie *pcie = &ep->pcie;
94 char outbound_name[10];
95 struct resource *res;
96 unsigned int i = 0;
97
98 ep->num_ob_windows = 0;
99 for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
100 sprintf(outbound_name, "memory%u", i);
101 res = platform_get_resource_byname(pdev,
102 IORESOURCE_MEM,
103 outbound_name);
104 if (!res) {
105 dev_err(pcie->dev, "missing outbound window %u\n", i);
106 return -EINVAL;
107 }
108 if (!devm_request_mem_region(&pdev->dev, res->start,
109 resource_size(res),
110 outbound_name)) {
111 dev_err(pcie->dev, "Cannot request memory region %s.\n",
112 outbound_name);
113 return -EIO;
114 }
115
116 ep->ob_window[i].phys_base = res->start;
117 ep->ob_window[i].size = resource_size(res);
118 /* controller doesn't support multiple allocation
119 * from same window, so set page_size to window size
120 */
121 ep->ob_window[i].page_size = resource_size(res);
122 }
123 ep->num_ob_windows = i;
124
125 return 0;
126}
127
128static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
129 struct platform_device *pdev)
130{
131 struct rcar_pcie *pcie = &ep->pcie;
132 struct pci_epc_mem_window *window;
133 struct device *dev = pcie->dev;
134 struct resource res;
135 int err;
136
137 err = of_address_to_resource(dev->of_node, 0, &res);
138 if (err)
139 return err;
140 pcie->base = devm_ioremap_resource(dev, &res);
141 if (IS_ERR(pcie->base))
142 return PTR_ERR(pcie->base);
143
144 ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
145 sizeof(*window), GFP_KERNEL);
146 if (!ep->ob_window)
147 return -ENOMEM;
148
149 rcar_pcie_parse_outbound_ranges(ep, pdev);
150
151 err = of_property_read_u8(dev->of_node, "max-functions",
152 &ep->max_functions);
153 if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
154 ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
155
156 return 0;
157}
158
159static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
160 struct pci_epf_header *hdr)
161{
162 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
163 struct rcar_pcie *pcie = &ep->pcie;
164 u32 val;
165
166 if (!fn)
167 val = hdr->vendorid;
168 else
169 val = rcar_pci_read_reg(pcie, IDSETR0);
170 val |= hdr->deviceid << 16;
171 rcar_pci_write_reg(pcie, val, IDSETR0);
172
173 val = hdr->revid;
174 val |= hdr->progif_code << 8;
175 val |= hdr->subclass_code << 16;
176 val |= hdr->baseclass_code << 24;
177 rcar_pci_write_reg(pcie, val, IDSETR1);
178
179 if (!fn)
180 val = hdr->subsys_vendor_id;
181 else
182 val = rcar_pci_read_reg(pcie, SUBIDSETR);
183 val |= hdr->subsys_id << 16;
184 rcar_pci_write_reg(pcie, val, SUBIDSETR);
185
186 if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
187 return -EINVAL;
188 val = rcar_pci_read_reg(pcie, PCICONF(15));
189 val |= (hdr->interrupt_pin << 8);
190 rcar_pci_write_reg(pcie, val, PCICONF(15));
191
192 return 0;
193}
194
195static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
196 struct pci_epf_bar *epf_bar)
197{
198 int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
199 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
200 u64 size = 1ULL << fls64(epf_bar->size - 1);
201 dma_addr_t cpu_addr = epf_bar->phys_addr;
202 enum pci_barno bar = epf_bar->barno;
203 struct rcar_pcie *pcie = &ep->pcie;
204 u32 mask;
205 int idx;
206 int err;
207
208 idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
209 if (idx >= ep->num_ib_windows) {
210 dev_err(pcie->dev, "no free inbound window\n");
211 return -EINVAL;
212 }
213
214 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
215 flags |= IO_SPACE;
216
217 ep->bar_to_atu[bar] = idx;
218 /* use 64-bit BARs */
219 set_bit(idx, ep->ib_window_map);
220 set_bit(idx + 1, ep->ib_window_map);
221
222 if (cpu_addr > 0) {
223 unsigned long nr_zeros = __ffs64(cpu_addr);
224 u64 alignment = 1ULL << nr_zeros;
225
226 size = min(size, alignment);
227 }
228
229 size = min(size, 1ULL << 32);
230
231 mask = roundup_pow_of_two(size) - 1;
232 mask &= ~0xf;
233
234 rcar_pcie_set_inbound(pcie, cpu_addr,
235 0x0, mask | flags, idx, false);
236
237 err = rcar_pcie_wait_for_phyrdy(pcie);
238 if (err) {
239 dev_err(pcie->dev, "phy not ready\n");
240 return -EINVAL;
241 }
242
243 return 0;
244}
245
246static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
247 struct pci_epf_bar *epf_bar)
248{
249 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
250 enum pci_barno bar = epf_bar->barno;
251 u32 atu_index = ep->bar_to_atu[bar];
252
253 rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
254
255 clear_bit(atu_index, ep->ib_window_map);
256 clear_bit(atu_index + 1, ep->ib_window_map);
257}
258
259static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
260 u8 interrupts)
261{
262 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
263 struct rcar_pcie *pcie = &ep->pcie;
264 u32 flags;
265
266 flags = rcar_pci_read_reg(pcie, MSICAP(fn));
267 flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
268 rcar_pci_write_reg(pcie, flags, MSICAP(fn));
269
270 return 0;
271}
272
273static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
274{
275 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
276 struct rcar_pcie *pcie = &ep->pcie;
277 u32 flags;
278
279 flags = rcar_pci_read_reg(pcie, MSICAP(fn));
280 if (!(flags & MSICAP0_MSIE))
281 return -EINVAL;
282
283 return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
284}
285
286static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
287 phys_addr_t addr, u64 pci_addr, size_t size)
288{
289 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
290 struct rcar_pcie *pcie = &ep->pcie;
291 struct resource_entry win;
292 struct resource res;
293 int window;
294 int err;
295
296 /* check if we have a link. */
297 err = rcar_pcie_wait_for_dl(pcie);
298 if (err) {
299 dev_err(pcie->dev, "link not up\n");
300 return err;
301 }
302
303 window = rcar_pcie_ep_get_window(ep, addr);
304 if (window < 0) {
305 dev_err(pcie->dev, "failed to get corresponding window\n");
306 return -EINVAL;
307 }
308
309 memset(&win, 0x0, sizeof(win));
310 memset(&res, 0x0, sizeof(res));
311 res.start = pci_addr;
312 res.end = pci_addr + size - 1;
313 res.flags = IORESOURCE_MEM;
314 win.res = &res;
315
316 rcar_pcie_set_outbound(pcie, window, &win);
317
318 ep->ob_mapped_addr[window] = addr;
319
320 return 0;
321}
322
323static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
324 phys_addr_t addr)
325{
326 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
327 struct resource_entry win;
328 struct resource res;
329 int idx;
330
331 for (idx = 0; idx < ep->num_ob_windows; idx++)
332 if (ep->ob_mapped_addr[idx] == addr)
333 break;
334
335 if (idx >= ep->num_ob_windows)
336 return;
337
338 memset(&win, 0x0, sizeof(win));
339 memset(&res, 0x0, sizeof(res));
340 win.res = &res;
341 rcar_pcie_set_outbound(&ep->pcie, idx, &win);
342
343 ep->ob_mapped_addr[idx] = 0;
344}
345
346static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
347 u8 fn, u8 intx)
348{
349 struct rcar_pcie *pcie = &ep->pcie;
350 u32 val;
351
352 val = rcar_pci_read_reg(pcie, PCIEMSITXR);
353 if ((val & PCI_MSI_FLAGS_ENABLE)) {
354 dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
355 return -EINVAL;
356 }
357
358 val = rcar_pci_read_reg(pcie, PCICONF(1));
359 if ((val & INTDIS)) {
360 dev_err(pcie->dev, "INTx message transmission is disabled\n");
361 return -EINVAL;
362 }
363
364 val = rcar_pci_read_reg(pcie, PCIEINTXR);
365 if ((val & ASTINTX)) {
366 dev_err(pcie->dev, "INTx is already asserted\n");
367 return -EINVAL;
368 }
369
370 val |= ASTINTX;
371 rcar_pci_write_reg(pcie, val, PCIEINTXR);
372 usleep_range(1000, 1001);
373 val = rcar_pci_read_reg(pcie, PCIEINTXR);
374 val &= ~ASTINTX;
375 rcar_pci_write_reg(pcie, val, PCIEINTXR);
376
377 return 0;
378}
379
380static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
381 u8 fn, u8 interrupt_num)
382{
383 u16 msi_count;
384 u32 val;
385
386 /* Check MSI enable bit */
387 val = rcar_pci_read_reg(pcie, MSICAP(fn));
388 if (!(val & MSICAP0_MSIE))
389 return -EINVAL;
390
391 /* Get MSI numbers from MME */
392 msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
393 msi_count = 1 << msi_count;
394
395 if (!interrupt_num || interrupt_num > msi_count)
396 return -EINVAL;
397
398 val = rcar_pci_read_reg(pcie, PCIEMSITXR);
399 rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
400
401 return 0;
402}
403
404static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
405 unsigned int type, u16 interrupt_num)
406{
407 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
408
409 switch (type) {
410 case PCI_IRQ_INTX:
411 return rcar_pcie_ep_assert_intx(ep, fn, 0);
412
413 case PCI_IRQ_MSI:
414 return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
415
416 default:
417 return -EINVAL;
418 }
419}
420
421static int rcar_pcie_ep_start(struct pci_epc *epc)
422{
423 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
424
425 rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
426 rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
427
428 return 0;
429}
430
431static void rcar_pcie_ep_stop(struct pci_epc *epc)
432{
433 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
434
435 rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
436}
437
438static const struct pci_epc_features rcar_pcie_epc_features = {
439 .linkup_notifier = false,
440 .msi_capable = true,
441 .msix_capable = false,
442 /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
443 .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
444 .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
445 .bar_fixed_size[0] = 128,
446 .bar_fixed_size[2] = 256,
447 .bar_fixed_size[4] = 256,
448};
449
450static const struct pci_epc_features*
451rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
452{
453 return &rcar_pcie_epc_features;
454}
455
456static const struct pci_epc_ops rcar_pcie_epc_ops = {
457 .write_header = rcar_pcie_ep_write_header,
458 .set_bar = rcar_pcie_ep_set_bar,
459 .clear_bar = rcar_pcie_ep_clear_bar,
460 .set_msi = rcar_pcie_ep_set_msi,
461 .get_msi = rcar_pcie_ep_get_msi,
462 .map_addr = rcar_pcie_ep_map_addr,
463 .unmap_addr = rcar_pcie_ep_unmap_addr,
464 .raise_irq = rcar_pcie_ep_raise_irq,
465 .start = rcar_pcie_ep_start,
466 .stop = rcar_pcie_ep_stop,
467 .get_features = rcar_pcie_ep_get_features,
468};
469
470static const struct of_device_id rcar_pcie_ep_of_match[] = {
471 { .compatible = "renesas,r8a774c0-pcie-ep", },
472 { .compatible = "renesas,rcar-gen3-pcie-ep" },
473 { },
474};
475
476static int rcar_pcie_ep_probe(struct platform_device *pdev)
477{
478 struct device *dev = &pdev->dev;
479 struct rcar_pcie_endpoint *ep;
480 struct rcar_pcie *pcie;
481 struct pci_epc *epc;
482 int err;
483
484 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
485 if (!ep)
486 return -ENOMEM;
487
488 pcie = &ep->pcie;
489 pcie->dev = dev;
490
491 pm_runtime_enable(dev);
492 err = pm_runtime_resume_and_get(dev);
493 if (err < 0) {
494 dev_err(dev, "pm_runtime_resume_and_get failed\n");
495 goto err_pm_disable;
496 }
497
498 err = rcar_pcie_ep_get_pdata(ep, pdev);
499 if (err < 0) {
500 dev_err(dev, "failed to request resources: %d\n", err);
501 goto err_pm_put;
502 }
503
504 ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
505 ep->ib_window_map =
506 devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
507 sizeof(long), GFP_KERNEL);
508 if (!ep->ib_window_map) {
509 err = -ENOMEM;
510 dev_err(dev, "failed to allocate memory for inbound map\n");
511 goto err_pm_put;
512 }
513
514 ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
515 sizeof(*ep->ob_mapped_addr),
516 GFP_KERNEL);
517 if (!ep->ob_mapped_addr) {
518 err = -ENOMEM;
519 dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
520 goto err_pm_put;
521 }
522
523 epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
524 if (IS_ERR(epc)) {
525 dev_err(dev, "failed to create epc device\n");
526 err = PTR_ERR(epc);
527 goto err_pm_put;
528 }
529
530 epc->max_functions = ep->max_functions;
531 epc_set_drvdata(epc, ep);
532
533 rcar_pcie_ep_hw_init(pcie);
534
535 err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
536 if (err < 0) {
537 dev_err(dev, "failed to initialize the epc memory space\n");
538 goto err_pm_put;
539 }
540
541 return 0;
542
543err_pm_put:
544 pm_runtime_put(dev);
545
546err_pm_disable:
547 pm_runtime_disable(dev);
548
549 return err;
550}
551
552static struct platform_driver rcar_pcie_ep_driver = {
553 .driver = {
554 .name = "rcar-pcie-ep",
555 .of_match_table = rcar_pcie_ep_of_match,
556 .suppress_bind_attrs = true,
557 },
558 .probe = rcar_pcie_ep_probe,
559};
560builtin_platform_driver(rcar_pcie_ep_driver);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe endpoint driver for Renesas R-Car SoCs
4 * Copyright (c) 2020 Renesas Electronics Europe GmbH
5 *
6 * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/of_address.h>
12#include <linux/of_irq.h>
13#include <linux/of_pci.h>
14#include <linux/of_platform.h>
15#include <linux/pci.h>
16#include <linux/pci-epc.h>
17#include <linux/phy/phy.h>
18#include <linux/platform_device.h>
19
20#include "pcie-rcar.h"
21
22#define RCAR_EPC_MAX_FUNCTIONS 1
23
24/* Structure representing the PCIe interface */
25struct rcar_pcie_endpoint {
26 struct rcar_pcie pcie;
27 phys_addr_t *ob_mapped_addr;
28 struct pci_epc_mem_window *ob_window;
29 u8 max_functions;
30 unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
31 unsigned long *ib_window_map;
32 u32 num_ib_windows;
33 u32 num_ob_windows;
34};
35
36static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
37{
38 u32 val;
39
40 rcar_pci_write_reg(pcie, 0, PCIETCTLR);
41
42 /* Set endpoint mode */
43 rcar_pci_write_reg(pcie, 0, PCIEMSR);
44
45 /* Initialize default capabilities. */
46 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
47 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
48 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
49 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
50 PCI_HEADER_TYPE_NORMAL);
51
52 /* Write out the physical slot number = 0 */
53 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
54
55 val = rcar_pci_read_reg(pcie, EXPCAP(1));
56 /* device supports fixed 128 bytes MPSS */
57 val &= ~GENMASK(2, 0);
58 rcar_pci_write_reg(pcie, val, EXPCAP(1));
59
60 val = rcar_pci_read_reg(pcie, EXPCAP(2));
61 /* read requests size 128 bytes */
62 val &= ~GENMASK(14, 12);
63 /* payload size 128 bytes */
64 val &= ~GENMASK(7, 5);
65 rcar_pci_write_reg(pcie, val, EXPCAP(2));
66
67 /* Set target link speed to 5.0 GT/s */
68 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
69 PCI_EXP_LNKSTA_CLS_5_0GB);
70
71 /* Set the completion timer timeout to the maximum 50ms. */
72 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
73
74 /* Terminate list of capabilities (Next Capability Offset=0) */
75 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
76
77 /* flush modifications */
78 wmb();
79}
80
81static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
82 phys_addr_t addr)
83{
84 int i;
85
86 for (i = 0; i < ep->num_ob_windows; i++)
87 if (ep->ob_window[i].phys_base == addr)
88 return i;
89
90 return -EINVAL;
91}
92
93static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
94 struct platform_device *pdev)
95{
96 struct rcar_pcie *pcie = &ep->pcie;
97 char outbound_name[10];
98 struct resource *res;
99 unsigned int i = 0;
100
101 ep->num_ob_windows = 0;
102 for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
103 sprintf(outbound_name, "memory%u", i);
104 res = platform_get_resource_byname(pdev,
105 IORESOURCE_MEM,
106 outbound_name);
107 if (!res) {
108 dev_err(pcie->dev, "missing outbound window %u\n", i);
109 return -EINVAL;
110 }
111 if (!devm_request_mem_region(&pdev->dev, res->start,
112 resource_size(res),
113 outbound_name)) {
114 dev_err(pcie->dev, "Cannot request memory region %s.\n",
115 outbound_name);
116 return -EIO;
117 }
118
119 ep->ob_window[i].phys_base = res->start;
120 ep->ob_window[i].size = resource_size(res);
121 /* controller doesn't support multiple allocation
122 * from same window, so set page_size to window size
123 */
124 ep->ob_window[i].page_size = resource_size(res);
125 }
126 ep->num_ob_windows = i;
127
128 return 0;
129}
130
131static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
132 struct platform_device *pdev)
133{
134 struct rcar_pcie *pcie = &ep->pcie;
135 struct pci_epc_mem_window *window;
136 struct device *dev = pcie->dev;
137 struct resource res;
138 int err;
139
140 err = of_address_to_resource(dev->of_node, 0, &res);
141 if (err)
142 return err;
143 pcie->base = devm_ioremap_resource(dev, &res);
144 if (IS_ERR(pcie->base))
145 return PTR_ERR(pcie->base);
146
147 ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
148 sizeof(*window), GFP_KERNEL);
149 if (!ep->ob_window)
150 return -ENOMEM;
151
152 rcar_pcie_parse_outbound_ranges(ep, pdev);
153
154 err = of_property_read_u8(dev->of_node, "max-functions",
155 &ep->max_functions);
156 if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
157 ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
158
159 return 0;
160}
161
162static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
163 struct pci_epf_header *hdr)
164{
165 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
166 struct rcar_pcie *pcie = &ep->pcie;
167 u32 val;
168
169 if (!fn)
170 val = hdr->vendorid;
171 else
172 val = rcar_pci_read_reg(pcie, IDSETR0);
173 val |= hdr->deviceid << 16;
174 rcar_pci_write_reg(pcie, val, IDSETR0);
175
176 val = hdr->revid;
177 val |= hdr->progif_code << 8;
178 val |= hdr->subclass_code << 16;
179 val |= hdr->baseclass_code << 24;
180 rcar_pci_write_reg(pcie, val, IDSETR1);
181
182 if (!fn)
183 val = hdr->subsys_vendor_id;
184 else
185 val = rcar_pci_read_reg(pcie, SUBIDSETR);
186 val |= hdr->subsys_id << 16;
187 rcar_pci_write_reg(pcie, val, SUBIDSETR);
188
189 if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
190 return -EINVAL;
191 val = rcar_pci_read_reg(pcie, PCICONF(15));
192 val |= (hdr->interrupt_pin << 8);
193 rcar_pci_write_reg(pcie, val, PCICONF(15));
194
195 return 0;
196}
197
198static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
199 struct pci_epf_bar *epf_bar)
200{
201 int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
202 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
203 u64 size = 1ULL << fls64(epf_bar->size - 1);
204 dma_addr_t cpu_addr = epf_bar->phys_addr;
205 enum pci_barno bar = epf_bar->barno;
206 struct rcar_pcie *pcie = &ep->pcie;
207 u32 mask;
208 int idx;
209 int err;
210
211 idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
212 if (idx >= ep->num_ib_windows) {
213 dev_err(pcie->dev, "no free inbound window\n");
214 return -EINVAL;
215 }
216
217 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
218 flags |= IO_SPACE;
219
220 ep->bar_to_atu[bar] = idx;
221 /* use 64-bit BARs */
222 set_bit(idx, ep->ib_window_map);
223 set_bit(idx + 1, ep->ib_window_map);
224
225 if (cpu_addr > 0) {
226 unsigned long nr_zeros = __ffs64(cpu_addr);
227 u64 alignment = 1ULL << nr_zeros;
228
229 size = min(size, alignment);
230 }
231
232 size = min(size, 1ULL << 32);
233
234 mask = roundup_pow_of_two(size) - 1;
235 mask &= ~0xf;
236
237 rcar_pcie_set_inbound(pcie, cpu_addr,
238 0x0, mask | flags, idx, false);
239
240 err = rcar_pcie_wait_for_phyrdy(pcie);
241 if (err) {
242 dev_err(pcie->dev, "phy not ready\n");
243 return -EINVAL;
244 }
245
246 return 0;
247}
248
249static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
250 struct pci_epf_bar *epf_bar)
251{
252 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
253 enum pci_barno bar = epf_bar->barno;
254 u32 atu_index = ep->bar_to_atu[bar];
255
256 rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
257
258 clear_bit(atu_index, ep->ib_window_map);
259 clear_bit(atu_index + 1, ep->ib_window_map);
260}
261
262static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
263{
264 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
265 struct rcar_pcie *pcie = &ep->pcie;
266 u32 flags;
267
268 flags = rcar_pci_read_reg(pcie, MSICAP(fn));
269 flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
270 rcar_pci_write_reg(pcie, flags, MSICAP(fn));
271
272 return 0;
273}
274
275static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
276{
277 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
278 struct rcar_pcie *pcie = &ep->pcie;
279 u32 flags;
280
281 flags = rcar_pci_read_reg(pcie, MSICAP(fn));
282 if (!(flags & MSICAP0_MSIE))
283 return -EINVAL;
284
285 return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
286}
287
288static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
289 phys_addr_t addr, u64 pci_addr, size_t size)
290{
291 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
292 struct rcar_pcie *pcie = &ep->pcie;
293 struct resource_entry win;
294 struct resource res;
295 int window;
296 int err;
297
298 /* check if we have a link. */
299 err = rcar_pcie_wait_for_dl(pcie);
300 if (err) {
301 dev_err(pcie->dev, "link not up\n");
302 return err;
303 }
304
305 window = rcar_pcie_ep_get_window(ep, addr);
306 if (window < 0) {
307 dev_err(pcie->dev, "failed to get corresponding window\n");
308 return -EINVAL;
309 }
310
311 memset(&win, 0x0, sizeof(win));
312 memset(&res, 0x0, sizeof(res));
313 res.start = pci_addr;
314 res.end = pci_addr + size - 1;
315 res.flags = IORESOURCE_MEM;
316 win.res = &res;
317
318 rcar_pcie_set_outbound(pcie, window, &win);
319
320 ep->ob_mapped_addr[window] = addr;
321
322 return 0;
323}
324
325static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
326 phys_addr_t addr)
327{
328 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
329 struct resource_entry win;
330 struct resource res;
331 int idx;
332
333 for (idx = 0; idx < ep->num_ob_windows; idx++)
334 if (ep->ob_mapped_addr[idx] == addr)
335 break;
336
337 if (idx >= ep->num_ob_windows)
338 return;
339
340 memset(&win, 0x0, sizeof(win));
341 memset(&res, 0x0, sizeof(res));
342 win.res = &res;
343 rcar_pcie_set_outbound(&ep->pcie, idx, &win);
344
345 ep->ob_mapped_addr[idx] = 0;
346}
347
348static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
349 u8 fn, u8 intx)
350{
351 struct rcar_pcie *pcie = &ep->pcie;
352 u32 val;
353
354 val = rcar_pci_read_reg(pcie, PCIEMSITXR);
355 if ((val & PCI_MSI_FLAGS_ENABLE)) {
356 dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
357 return -EINVAL;
358 }
359
360 val = rcar_pci_read_reg(pcie, PCICONF(1));
361 if ((val & INTDIS)) {
362 dev_err(pcie->dev, "INTx message transmission is disabled\n");
363 return -EINVAL;
364 }
365
366 val = rcar_pci_read_reg(pcie, PCIEINTXR);
367 if ((val & ASTINTX)) {
368 dev_err(pcie->dev, "INTx is already asserted\n");
369 return -EINVAL;
370 }
371
372 val |= ASTINTX;
373 rcar_pci_write_reg(pcie, val, PCIEINTXR);
374 usleep_range(1000, 1001);
375 val = rcar_pci_read_reg(pcie, PCIEINTXR);
376 val &= ~ASTINTX;
377 rcar_pci_write_reg(pcie, val, PCIEINTXR);
378
379 return 0;
380}
381
382static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
383 u8 fn, u8 interrupt_num)
384{
385 u16 msi_count;
386 u32 val;
387
388 /* Check MSI enable bit */
389 val = rcar_pci_read_reg(pcie, MSICAP(fn));
390 if (!(val & MSICAP0_MSIE))
391 return -EINVAL;
392
393 /* Get MSI numbers from MME */
394 msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
395 msi_count = 1 << msi_count;
396
397 if (!interrupt_num || interrupt_num > msi_count)
398 return -EINVAL;
399
400 val = rcar_pci_read_reg(pcie, PCIEMSITXR);
401 rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
402
403 return 0;
404}
405
406static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
407 enum pci_epc_irq_type type,
408 u16 interrupt_num)
409{
410 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
411
412 switch (type) {
413 case PCI_EPC_IRQ_LEGACY:
414 return rcar_pcie_ep_assert_intx(ep, fn, 0);
415
416 case PCI_EPC_IRQ_MSI:
417 return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
418
419 default:
420 return -EINVAL;
421 }
422}
423
424static int rcar_pcie_ep_start(struct pci_epc *epc)
425{
426 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
427
428 rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
429 rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
430
431 return 0;
432}
433
434static void rcar_pcie_ep_stop(struct pci_epc *epc)
435{
436 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
437
438 rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
439}
440
441static const struct pci_epc_features rcar_pcie_epc_features = {
442 .linkup_notifier = false,
443 .msi_capable = true,
444 .msix_capable = false,
445 /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
446 .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
447 .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
448 .bar_fixed_size[0] = 128,
449 .bar_fixed_size[2] = 256,
450 .bar_fixed_size[4] = 256,
451};
452
453static const struct pci_epc_features*
454rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
455{
456 return &rcar_pcie_epc_features;
457}
458
459static const struct pci_epc_ops rcar_pcie_epc_ops = {
460 .write_header = rcar_pcie_ep_write_header,
461 .set_bar = rcar_pcie_ep_set_bar,
462 .clear_bar = rcar_pcie_ep_clear_bar,
463 .set_msi = rcar_pcie_ep_set_msi,
464 .get_msi = rcar_pcie_ep_get_msi,
465 .map_addr = rcar_pcie_ep_map_addr,
466 .unmap_addr = rcar_pcie_ep_unmap_addr,
467 .raise_irq = rcar_pcie_ep_raise_irq,
468 .start = rcar_pcie_ep_start,
469 .stop = rcar_pcie_ep_stop,
470 .get_features = rcar_pcie_ep_get_features,
471};
472
473static const struct of_device_id rcar_pcie_ep_of_match[] = {
474 { .compatible = "renesas,r8a774c0-pcie-ep", },
475 { .compatible = "renesas,rcar-gen3-pcie-ep" },
476 { },
477};
478
479static int rcar_pcie_ep_probe(struct platform_device *pdev)
480{
481 struct device *dev = &pdev->dev;
482 struct rcar_pcie_endpoint *ep;
483 struct rcar_pcie *pcie;
484 struct pci_epc *epc;
485 int err;
486
487 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
488 if (!ep)
489 return -ENOMEM;
490
491 pcie = &ep->pcie;
492 pcie->dev = dev;
493
494 pm_runtime_enable(dev);
495 err = pm_runtime_get_sync(dev);
496 if (err < 0) {
497 dev_err(dev, "pm_runtime_get_sync failed\n");
498 goto err_pm_disable;
499 }
500
501 err = rcar_pcie_ep_get_pdata(ep, pdev);
502 if (err < 0) {
503 dev_err(dev, "failed to request resources: %d\n", err);
504 goto err_pm_put;
505 }
506
507 ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
508 ep->ib_window_map =
509 devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
510 sizeof(long), GFP_KERNEL);
511 if (!ep->ib_window_map) {
512 err = -ENOMEM;
513 dev_err(dev, "failed to allocate memory for inbound map\n");
514 goto err_pm_put;
515 }
516
517 ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
518 sizeof(*ep->ob_mapped_addr),
519 GFP_KERNEL);
520 if (!ep->ob_mapped_addr) {
521 err = -ENOMEM;
522 dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
523 goto err_pm_put;
524 }
525
526 epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
527 if (IS_ERR(epc)) {
528 dev_err(dev, "failed to create epc device\n");
529 err = PTR_ERR(epc);
530 goto err_pm_put;
531 }
532
533 epc->max_functions = ep->max_functions;
534 epc_set_drvdata(epc, ep);
535
536 rcar_pcie_ep_hw_init(pcie);
537
538 err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
539 if (err < 0) {
540 dev_err(dev, "failed to initialize the epc memory space\n");
541 goto err_pm_put;
542 }
543
544 return 0;
545
546err_pm_put:
547 pm_runtime_put(dev);
548
549err_pm_disable:
550 pm_runtime_disable(dev);
551
552 return err;
553}
554
555static struct platform_driver rcar_pcie_ep_driver = {
556 .driver = {
557 .name = "rcar-pcie-ep",
558 .of_match_table = rcar_pcie_ep_of_match,
559 .suppress_bind_attrs = true,
560 },
561 .probe = rcar_pcie_ep_probe,
562};
563builtin_platform_driver(rcar_pcie_ep_driver);