Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Synopsys DesignWare PCIe host controller driver
4 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * https://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
10
11#include <linux/delay.h>
12#include <linux/of.h>
13#include <linux/types.h>
14
15#include "../../pci.h"
16#include "pcie-designware.h"
17
18/*
19 * These interfaces resemble the pci_find_*capability() interfaces, but these
20 * are for configuring host controllers, which are bridges *to* PCI devices but
21 * are not PCI devices themselves.
22 */
23static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
24 u8 cap)
25{
26 u8 cap_id, next_cap_ptr;
27 u16 reg;
28
29 if (!cap_ptr)
30 return 0;
31
32 reg = dw_pcie_readw_dbi(pci, cap_ptr);
33 cap_id = (reg & 0x00ff);
34
35 if (cap_id > PCI_CAP_ID_MAX)
36 return 0;
37
38 if (cap_id == cap)
39 return cap_ptr;
40
41 next_cap_ptr = (reg & 0xff00) >> 8;
42 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
43}
44
45u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
46{
47 u8 next_cap_ptr;
48 u16 reg;
49
50 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
51 next_cap_ptr = (reg & 0x00ff);
52
53 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
54}
55EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
56
57static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
58 u8 cap)
59{
60 u32 header;
61 int ttl;
62 int pos = PCI_CFG_SPACE_SIZE;
63
64 /* minimum 8 bytes per capability */
65 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
66
67 if (start)
68 pos = start;
69
70 header = dw_pcie_readl_dbi(pci, pos);
71 /*
72 * If we have no capabilities, this is indicated by cap ID,
73 * cap version and next pointer all being 0.
74 */
75 if (header == 0)
76 return 0;
77
78 while (ttl-- > 0) {
79 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
80 return pos;
81
82 pos = PCI_EXT_CAP_NEXT(header);
83 if (pos < PCI_CFG_SPACE_SIZE)
84 break;
85
86 header = dw_pcie_readl_dbi(pci, pos);
87 }
88
89 return 0;
90}
91
92u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
93{
94 return dw_pcie_find_next_ext_capability(pci, 0, cap);
95}
96EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
97
98int dw_pcie_read(void __iomem *addr, int size, u32 *val)
99{
100 if (!IS_ALIGNED((uintptr_t)addr, size)) {
101 *val = 0;
102 return PCIBIOS_BAD_REGISTER_NUMBER;
103 }
104
105 if (size == 4) {
106 *val = readl(addr);
107 } else if (size == 2) {
108 *val = readw(addr);
109 } else if (size == 1) {
110 *val = readb(addr);
111 } else {
112 *val = 0;
113 return PCIBIOS_BAD_REGISTER_NUMBER;
114 }
115
116 return PCIBIOS_SUCCESSFUL;
117}
118EXPORT_SYMBOL_GPL(dw_pcie_read);
119
120int dw_pcie_write(void __iomem *addr, int size, u32 val)
121{
122 if (!IS_ALIGNED((uintptr_t)addr, size))
123 return PCIBIOS_BAD_REGISTER_NUMBER;
124
125 if (size == 4)
126 writel(val, addr);
127 else if (size == 2)
128 writew(val, addr);
129 else if (size == 1)
130 writeb(val, addr);
131 else
132 return PCIBIOS_BAD_REGISTER_NUMBER;
133
134 return PCIBIOS_SUCCESSFUL;
135}
136EXPORT_SYMBOL_GPL(dw_pcie_write);
137
138u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
139{
140 int ret;
141 u32 val;
142
143 if (pci->ops->read_dbi)
144 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
145
146 ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
147 if (ret)
148 dev_err(pci->dev, "Read DBI address failed\n");
149
150 return val;
151}
152EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
153
154void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
155{
156 int ret;
157
158 if (pci->ops->write_dbi) {
159 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
160 return;
161 }
162
163 ret = dw_pcie_write(pci->dbi_base + reg, size, val);
164 if (ret)
165 dev_err(pci->dev, "Write DBI address failed\n");
166}
167EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
168
169u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
170{
171 int ret;
172 u32 val;
173
174 if (pci->ops->read_dbi2)
175 return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
176
177 ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
178 if (ret)
179 dev_err(pci->dev, "read DBI address failed\n");
180
181 return val;
182}
183
184void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
185{
186 int ret;
187
188 if (pci->ops->write_dbi2) {
189 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
190 return;
191 }
192
193 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
194 if (ret)
195 dev_err(pci->dev, "write DBI address failed\n");
196}
197
198u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
199{
200 int ret;
201 u32 val;
202
203 if (pci->ops->read_dbi)
204 return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
205
206 ret = dw_pcie_read(pci->atu_base + reg, size, &val);
207 if (ret)
208 dev_err(pci->dev, "Read ATU address failed\n");
209
210 return val;
211}
212
213void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
214{
215 int ret;
216
217 if (pci->ops->write_dbi) {
218 pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
219 return;
220 }
221
222 ret = dw_pcie_write(pci->atu_base + reg, size, val);
223 if (ret)
224 dev_err(pci->dev, "Write ATU address failed\n");
225}
226
227static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
228{
229 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
230
231 return dw_pcie_readl_atu(pci, offset + reg);
232}
233
234static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
235 u32 val)
236{
237 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
238
239 dw_pcie_writel_atu(pci, offset + reg, val);
240}
241
242static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
243 int type, u64 cpu_addr,
244 u64 pci_addr, u32 size)
245{
246 u32 retries, val;
247 u64 limit_addr = cpu_addr + size - 1;
248
249 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
250 lower_32_bits(cpu_addr));
251 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
252 upper_32_bits(cpu_addr));
253 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
254 lower_32_bits(limit_addr));
255 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
256 upper_32_bits(limit_addr));
257 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
258 lower_32_bits(pci_addr));
259 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
260 upper_32_bits(pci_addr));
261 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
262 type);
263 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
264 PCIE_ATU_ENABLE);
265
266 /*
267 * Make sure ATU enable takes effect before any subsequent config
268 * and I/O accesses.
269 */
270 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
271 val = dw_pcie_readl_ob_unroll(pci, index,
272 PCIE_ATU_UNR_REGION_CTRL2);
273 if (val & PCIE_ATU_ENABLE)
274 return;
275
276 mdelay(LINK_WAIT_IATU);
277 }
278 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
279}
280
281void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
282 u64 cpu_addr, u64 pci_addr, u32 size)
283{
284 u32 retries, val;
285
286 if (pci->ops->cpu_addr_fixup)
287 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
288
289 if (pci->iatu_unroll_enabled) {
290 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
291 pci_addr, size);
292 return;
293 }
294
295 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
296 PCIE_ATU_REGION_OUTBOUND | index);
297 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
298 lower_32_bits(cpu_addr));
299 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
300 upper_32_bits(cpu_addr));
301 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
302 lower_32_bits(cpu_addr + size - 1));
303 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
304 lower_32_bits(pci_addr));
305 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
306 upper_32_bits(pci_addr));
307 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
308 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
309
310 /*
311 * Make sure ATU enable takes effect before any subsequent config
312 * and I/O accesses.
313 */
314 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
315 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
316 if (val & PCIE_ATU_ENABLE)
317 return;
318
319 mdelay(LINK_WAIT_IATU);
320 }
321 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
322}
323
324static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
325{
326 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
327
328 return dw_pcie_readl_atu(pci, offset + reg);
329}
330
331static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
332 u32 val)
333{
334 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
335
336 dw_pcie_writel_atu(pci, offset + reg, val);
337}
338
339static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
340 int bar, u64 cpu_addr,
341 enum dw_pcie_as_type as_type)
342{
343 int type;
344 u32 retries, val;
345
346 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
347 lower_32_bits(cpu_addr));
348 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
349 upper_32_bits(cpu_addr));
350
351 switch (as_type) {
352 case DW_PCIE_AS_MEM:
353 type = PCIE_ATU_TYPE_MEM;
354 break;
355 case DW_PCIE_AS_IO:
356 type = PCIE_ATU_TYPE_IO;
357 break;
358 default:
359 return -EINVAL;
360 }
361
362 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
363 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
364 PCIE_ATU_ENABLE |
365 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
366
367 /*
368 * Make sure ATU enable takes effect before any subsequent config
369 * and I/O accesses.
370 */
371 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
372 val = dw_pcie_readl_ib_unroll(pci, index,
373 PCIE_ATU_UNR_REGION_CTRL2);
374 if (val & PCIE_ATU_ENABLE)
375 return 0;
376
377 mdelay(LINK_WAIT_IATU);
378 }
379 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
380
381 return -EBUSY;
382}
383
384int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
385 u64 cpu_addr, enum dw_pcie_as_type as_type)
386{
387 int type;
388 u32 retries, val;
389
390 if (pci->iatu_unroll_enabled)
391 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
392 cpu_addr, as_type);
393
394 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
395 index);
396 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
397 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
398
399 switch (as_type) {
400 case DW_PCIE_AS_MEM:
401 type = PCIE_ATU_TYPE_MEM;
402 break;
403 case DW_PCIE_AS_IO:
404 type = PCIE_ATU_TYPE_IO;
405 break;
406 default:
407 return -EINVAL;
408 }
409
410 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
411 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
412 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
413
414 /*
415 * Make sure ATU enable takes effect before any subsequent config
416 * and I/O accesses.
417 */
418 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
419 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
420 if (val & PCIE_ATU_ENABLE)
421 return 0;
422
423 mdelay(LINK_WAIT_IATU);
424 }
425 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
426
427 return -EBUSY;
428}
429
430void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
431 enum dw_pcie_region_type type)
432{
433 int region;
434
435 switch (type) {
436 case DW_PCIE_REGION_INBOUND:
437 region = PCIE_ATU_REGION_INBOUND;
438 break;
439 case DW_PCIE_REGION_OUTBOUND:
440 region = PCIE_ATU_REGION_OUTBOUND;
441 break;
442 default:
443 return;
444 }
445
446 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
447 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
448}
449
450int dw_pcie_wait_for_link(struct dw_pcie *pci)
451{
452 int retries;
453
454 /* Check if the link is up or not */
455 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
456 if (dw_pcie_link_up(pci)) {
457 dev_info(pci->dev, "Link up\n");
458 return 0;
459 }
460 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
461 }
462
463 dev_info(pci->dev, "Phy link never came up\n");
464
465 return -ETIMEDOUT;
466}
467EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
468
469int dw_pcie_link_up(struct dw_pcie *pci)
470{
471 u32 val;
472
473 if (pci->ops->link_up)
474 return pci->ops->link_up(pci);
475
476 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
477 return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
478 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
479}
480
481void dw_pcie_upconfig_setup(struct dw_pcie *pci)
482{
483 u32 val;
484
485 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
486 val |= PORT_MLTI_UPCFG_SUPPORT;
487 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
488}
489EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
490
491void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
492{
493 u32 reg, val;
494 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
495
496 reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
497 reg &= ~PCI_EXP_LNKCTL2_TLS;
498
499 switch (pcie_link_speed[link_gen]) {
500 case PCIE_SPEED_2_5GT:
501 reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
502 break;
503 case PCIE_SPEED_5_0GT:
504 reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
505 break;
506 case PCIE_SPEED_8_0GT:
507 reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
508 break;
509 case PCIE_SPEED_16_0GT:
510 reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
511 break;
512 default:
513 /* Use hardware capability */
514 val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
515 val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
516 reg &= ~PCI_EXP_LNKCTL2_HASD;
517 reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
518 break;
519 }
520
521 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
522}
523EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
524
525void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
526{
527 u32 val;
528
529 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
530 val &= ~PORT_LOGIC_N_FTS_MASK;
531 val |= n_fts & PORT_LOGIC_N_FTS_MASK;
532 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
533}
534EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
535
536static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
537{
538 u32 val;
539
540 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
541 if (val == 0xffffffff)
542 return 1;
543
544 return 0;
545}
546
547void dw_pcie_setup(struct dw_pcie *pci)
548{
549 int ret;
550 u32 val;
551 u32 lanes;
552 struct device *dev = pci->dev;
553 struct device_node *np = dev->of_node;
554
555 if (pci->version >= 0x480A || (!pci->version &&
556 dw_pcie_iatu_unroll_enabled(pci))) {
557 pci->iatu_unroll_enabled = true;
558 if (!pci->atu_base)
559 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
560 }
561 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
562 "enabled" : "disabled");
563
564
565 ret = of_property_read_u32(np, "num-lanes", &lanes);
566 if (ret) {
567 dev_dbg(pci->dev, "property num-lanes isn't found\n");
568 return;
569 }
570
571 /* Set the number of lanes */
572 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
573 val &= ~PORT_LINK_MODE_MASK;
574 switch (lanes) {
575 case 1:
576 val |= PORT_LINK_MODE_1_LANES;
577 break;
578 case 2:
579 val |= PORT_LINK_MODE_2_LANES;
580 break;
581 case 4:
582 val |= PORT_LINK_MODE_4_LANES;
583 break;
584 case 8:
585 val |= PORT_LINK_MODE_8_LANES;
586 break;
587 default:
588 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
589 return;
590 }
591 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
592
593 /* Set link width speed control register */
594 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
595 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
596 switch (lanes) {
597 case 1:
598 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
599 break;
600 case 2:
601 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
602 break;
603 case 4:
604 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
605 break;
606 case 8:
607 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
608 break;
609 }
610 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
611
612 if (of_property_read_bool(np, "snps,enable-cdm-check")) {
613 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
614 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
615 PCIE_PL_CHK_REG_CHK_REG_START;
616 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
617 }
618}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Synopsys DesignWare PCIe host controller driver
4 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
10
11#include <linux/delay.h>
12#include <linux/of.h>
13#include <linux/types.h>
14
15#include "pcie-designware.h"
16
17/*
18 * These interfaces resemble the pci_find_*capability() interfaces, but these
19 * are for configuring host controllers, which are bridges *to* PCI devices but
20 * are not PCI devices themselves.
21 */
22static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
23 u8 cap)
24{
25 u8 cap_id, next_cap_ptr;
26 u16 reg;
27
28 if (!cap_ptr)
29 return 0;
30
31 reg = dw_pcie_readw_dbi(pci, cap_ptr);
32 cap_id = (reg & 0x00ff);
33
34 if (cap_id > PCI_CAP_ID_MAX)
35 return 0;
36
37 if (cap_id == cap)
38 return cap_ptr;
39
40 next_cap_ptr = (reg & 0xff00) >> 8;
41 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
42}
43
44u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
45{
46 u8 next_cap_ptr;
47 u16 reg;
48
49 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
50 next_cap_ptr = (reg & 0x00ff);
51
52 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
53}
54EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
55
56static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
57 u8 cap)
58{
59 u32 header;
60 int ttl;
61 int pos = PCI_CFG_SPACE_SIZE;
62
63 /* minimum 8 bytes per capability */
64 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
65
66 if (start)
67 pos = start;
68
69 header = dw_pcie_readl_dbi(pci, pos);
70 /*
71 * If we have no capabilities, this is indicated by cap ID,
72 * cap version and next pointer all being 0.
73 */
74 if (header == 0)
75 return 0;
76
77 while (ttl-- > 0) {
78 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
79 return pos;
80
81 pos = PCI_EXT_CAP_NEXT(header);
82 if (pos < PCI_CFG_SPACE_SIZE)
83 break;
84
85 header = dw_pcie_readl_dbi(pci, pos);
86 }
87
88 return 0;
89}
90
91u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
92{
93 return dw_pcie_find_next_ext_capability(pci, 0, cap);
94}
95EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
96
97int dw_pcie_read(void __iomem *addr, int size, u32 *val)
98{
99 if (!IS_ALIGNED((uintptr_t)addr, size)) {
100 *val = 0;
101 return PCIBIOS_BAD_REGISTER_NUMBER;
102 }
103
104 if (size == 4) {
105 *val = readl(addr);
106 } else if (size == 2) {
107 *val = readw(addr);
108 } else if (size == 1) {
109 *val = readb(addr);
110 } else {
111 *val = 0;
112 return PCIBIOS_BAD_REGISTER_NUMBER;
113 }
114
115 return PCIBIOS_SUCCESSFUL;
116}
117EXPORT_SYMBOL_GPL(dw_pcie_read);
118
119int dw_pcie_write(void __iomem *addr, int size, u32 val)
120{
121 if (!IS_ALIGNED((uintptr_t)addr, size))
122 return PCIBIOS_BAD_REGISTER_NUMBER;
123
124 if (size == 4)
125 writel(val, addr);
126 else if (size == 2)
127 writew(val, addr);
128 else if (size == 1)
129 writeb(val, addr);
130 else
131 return PCIBIOS_BAD_REGISTER_NUMBER;
132
133 return PCIBIOS_SUCCESSFUL;
134}
135EXPORT_SYMBOL_GPL(dw_pcie_write);
136
137u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
138{
139 int ret;
140 u32 val;
141
142 if (pci->ops->read_dbi)
143 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
144
145 ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
146 if (ret)
147 dev_err(pci->dev, "Read DBI address failed\n");
148
149 return val;
150}
151EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
152
153void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
154{
155 int ret;
156
157 if (pci->ops->write_dbi) {
158 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
159 return;
160 }
161
162 ret = dw_pcie_write(pci->dbi_base + reg, size, val);
163 if (ret)
164 dev_err(pci->dev, "Write DBI address failed\n");
165}
166EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
167
168u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
169{
170 int ret;
171 u32 val;
172
173 if (pci->ops->read_dbi2)
174 return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
175
176 ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
177 if (ret)
178 dev_err(pci->dev, "read DBI address failed\n");
179
180 return val;
181}
182
183void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
184{
185 int ret;
186
187 if (pci->ops->write_dbi2) {
188 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
189 return;
190 }
191
192 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
193 if (ret)
194 dev_err(pci->dev, "write DBI address failed\n");
195}
196
197u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
198{
199 int ret;
200 u32 val;
201
202 if (pci->ops->read_dbi)
203 return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
204
205 ret = dw_pcie_read(pci->atu_base + reg, size, &val);
206 if (ret)
207 dev_err(pci->dev, "Read ATU address failed\n");
208
209 return val;
210}
211
212void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
213{
214 int ret;
215
216 if (pci->ops->write_dbi) {
217 pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
218 return;
219 }
220
221 ret = dw_pcie_write(pci->atu_base + reg, size, val);
222 if (ret)
223 dev_err(pci->dev, "Write ATU address failed\n");
224}
225
226static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
227{
228 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
229
230 return dw_pcie_readl_atu(pci, offset + reg);
231}
232
233static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
234 u32 val)
235{
236 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
237
238 dw_pcie_writel_atu(pci, offset + reg, val);
239}
240
241static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
242 int type, u64 cpu_addr,
243 u64 pci_addr, u32 size)
244{
245 u32 retries, val;
246
247 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
248 lower_32_bits(cpu_addr));
249 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
250 upper_32_bits(cpu_addr));
251 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
252 lower_32_bits(cpu_addr + size - 1));
253 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
254 lower_32_bits(pci_addr));
255 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
256 upper_32_bits(pci_addr));
257 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
258 type);
259 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
260 PCIE_ATU_ENABLE);
261
262 /*
263 * Make sure ATU enable takes effect before any subsequent config
264 * and I/O accesses.
265 */
266 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
267 val = dw_pcie_readl_ob_unroll(pci, index,
268 PCIE_ATU_UNR_REGION_CTRL2);
269 if (val & PCIE_ATU_ENABLE)
270 return;
271
272 mdelay(LINK_WAIT_IATU);
273 }
274 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
275}
276
277void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
278 u64 cpu_addr, u64 pci_addr, u32 size)
279{
280 u32 retries, val;
281
282 if (pci->ops->cpu_addr_fixup)
283 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
284
285 if (pci->iatu_unroll_enabled) {
286 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
287 pci_addr, size);
288 return;
289 }
290
291 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
292 PCIE_ATU_REGION_OUTBOUND | index);
293 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
294 lower_32_bits(cpu_addr));
295 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
296 upper_32_bits(cpu_addr));
297 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
298 lower_32_bits(cpu_addr + size - 1));
299 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
300 lower_32_bits(pci_addr));
301 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
302 upper_32_bits(pci_addr));
303 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
304 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
305
306 /*
307 * Make sure ATU enable takes effect before any subsequent config
308 * and I/O accesses.
309 */
310 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
311 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
312 if (val & PCIE_ATU_ENABLE)
313 return;
314
315 mdelay(LINK_WAIT_IATU);
316 }
317 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
318}
319
320static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
321{
322 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
323
324 return dw_pcie_readl_atu(pci, offset + reg);
325}
326
327static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
328 u32 val)
329{
330 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
331
332 dw_pcie_writel_atu(pci, offset + reg, val);
333}
334
335static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
336 int bar, u64 cpu_addr,
337 enum dw_pcie_as_type as_type)
338{
339 int type;
340 u32 retries, val;
341
342 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
343 lower_32_bits(cpu_addr));
344 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
345 upper_32_bits(cpu_addr));
346
347 switch (as_type) {
348 case DW_PCIE_AS_MEM:
349 type = PCIE_ATU_TYPE_MEM;
350 break;
351 case DW_PCIE_AS_IO:
352 type = PCIE_ATU_TYPE_IO;
353 break;
354 default:
355 return -EINVAL;
356 }
357
358 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
359 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
360 PCIE_ATU_ENABLE |
361 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
362
363 /*
364 * Make sure ATU enable takes effect before any subsequent config
365 * and I/O accesses.
366 */
367 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
368 val = dw_pcie_readl_ib_unroll(pci, index,
369 PCIE_ATU_UNR_REGION_CTRL2);
370 if (val & PCIE_ATU_ENABLE)
371 return 0;
372
373 mdelay(LINK_WAIT_IATU);
374 }
375 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
376
377 return -EBUSY;
378}
379
380int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
381 u64 cpu_addr, enum dw_pcie_as_type as_type)
382{
383 int type;
384 u32 retries, val;
385
386 if (pci->iatu_unroll_enabled)
387 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
388 cpu_addr, as_type);
389
390 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
391 index);
392 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
393 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
394
395 switch (as_type) {
396 case DW_PCIE_AS_MEM:
397 type = PCIE_ATU_TYPE_MEM;
398 break;
399 case DW_PCIE_AS_IO:
400 type = PCIE_ATU_TYPE_IO;
401 break;
402 default:
403 return -EINVAL;
404 }
405
406 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
407 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
408 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
409
410 /*
411 * Make sure ATU enable takes effect before any subsequent config
412 * and I/O accesses.
413 */
414 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
415 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
416 if (val & PCIE_ATU_ENABLE)
417 return 0;
418
419 mdelay(LINK_WAIT_IATU);
420 }
421 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
422
423 return -EBUSY;
424}
425
426void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
427 enum dw_pcie_region_type type)
428{
429 int region;
430
431 switch (type) {
432 case DW_PCIE_REGION_INBOUND:
433 region = PCIE_ATU_REGION_INBOUND;
434 break;
435 case DW_PCIE_REGION_OUTBOUND:
436 region = PCIE_ATU_REGION_OUTBOUND;
437 break;
438 default:
439 return;
440 }
441
442 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
443 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
444}
445
446int dw_pcie_wait_for_link(struct dw_pcie *pci)
447{
448 int retries;
449
450 /* Check if the link is up or not */
451 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
452 if (dw_pcie_link_up(pci)) {
453 dev_info(pci->dev, "Link up\n");
454 return 0;
455 }
456 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
457 }
458
459 dev_info(pci->dev, "Phy link never came up\n");
460
461 return -ETIMEDOUT;
462}
463EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
464
465int dw_pcie_link_up(struct dw_pcie *pci)
466{
467 u32 val;
468
469 if (pci->ops->link_up)
470 return pci->ops->link_up(pci);
471
472 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
473 return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
474 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
475}
476
477static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
478{
479 u32 val;
480
481 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
482 if (val == 0xffffffff)
483 return 1;
484
485 return 0;
486}
487
488void dw_pcie_setup(struct dw_pcie *pci)
489{
490 int ret;
491 u32 val;
492 u32 lanes;
493 struct device *dev = pci->dev;
494 struct device_node *np = dev->of_node;
495
496 if (pci->version >= 0x480A || (!pci->version &&
497 dw_pcie_iatu_unroll_enabled(pci))) {
498 pci->iatu_unroll_enabled = true;
499 if (!pci->atu_base)
500 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
501 }
502 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
503 "enabled" : "disabled");
504
505
506 ret = of_property_read_u32(np, "num-lanes", &lanes);
507 if (ret) {
508 dev_dbg(pci->dev, "property num-lanes isn't found\n");
509 return;
510 }
511
512 /* Set the number of lanes */
513 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
514 val &= ~PORT_LINK_MODE_MASK;
515 switch (lanes) {
516 case 1:
517 val |= PORT_LINK_MODE_1_LANES;
518 break;
519 case 2:
520 val |= PORT_LINK_MODE_2_LANES;
521 break;
522 case 4:
523 val |= PORT_LINK_MODE_4_LANES;
524 break;
525 case 8:
526 val |= PORT_LINK_MODE_8_LANES;
527 break;
528 default:
529 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
530 return;
531 }
532 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
533
534 /* Set link width speed control register */
535 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
536 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
537 switch (lanes) {
538 case 1:
539 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
540 break;
541 case 2:
542 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
543 break;
544 case 4:
545 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
546 break;
547 case 8:
548 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
549 break;
550 }
551 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
552
553 if (of_property_read_bool(np, "snps,enable-cdm-check")) {
554 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
555 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
556 PCIE_PL_CHK_REG_CHK_REG_START;
557 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
558 }
559}