Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include <linux/module.h>
8#include <linux/phy/phy.h>
9#include <net/dcbnl.h>
10
11#include "sparx5_main_regs.h"
12#include "sparx5_main.h"
13#include "sparx5_port.h"
14
15#define SPX5_ETYPE_TAG_C 0x8100
16#define SPX5_ETYPE_TAG_S 0x88a8
17
18#define SPX5_WAIT_US 1000
19#define SPX5_WAIT_MAX_US 2000
20
21enum port_error {
22 SPX5_PERR_SPEED,
23 SPX5_PERR_IFTYPE,
24};
25
26#define PAUSE_DISCARD 0xC
27#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
28
29static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
30{
31 status->an_complete = true;
32 if (!(lp_abil & LPA_SGMII_LINK)) {
33 status->link = false;
34 return;
35 }
36
37 switch (lp_abil & LPA_SGMII_SPD_MASK) {
38 case LPA_SGMII_10:
39 status->speed = SPEED_10;
40 break;
41 case LPA_SGMII_100:
42 status->speed = SPEED_100;
43 break;
44 case LPA_SGMII_1000:
45 status->speed = SPEED_1000;
46 break;
47 default:
48 status->link = false;
49 return;
50 }
51 if (lp_abil & LPA_SGMII_FULL_DUPLEX)
52 status->duplex = DUPLEX_FULL;
53 else
54 status->duplex = DUPLEX_HALF;
55}
56
57static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
58{
59 status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
60 status->an_complete = true;
61 status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
62 DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
63
64 if ((ld_abil & ADVERTISE_1000XPAUSE) &&
65 (lp_abil & ADVERTISE_1000XPAUSE)) {
66 status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
67 } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
68 (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
69 status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
70 MLO_PAUSE_TX : 0;
71 status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
72 MLO_PAUSE_RX : 0;
73 } else {
74 status->pause = MLO_PAUSE_NONE;
75 }
76}
77
78static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
79 struct sparx5_port *port,
80 struct sparx5_port_status *status)
81{
82 u32 portno = port->portno;
83 u16 lp_adv, ld_adv;
84 u32 value;
85
86 /* Get PCS Link down sticky */
87 value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
88 status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
89 if (status->link_down) /* Clear the sticky */
90 spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
91
92 /* Get both current Link and Sync status */
93 value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
94 status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
95 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
96
97 if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
98 status->speed = SPEED_1000;
99 else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
100 status->speed = SPEED_2500;
101
102 status->duplex = DUPLEX_FULL;
103
104 /* Get PCS ANEG status register */
105 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
106
107 /* Aneg complete provides more information */
108 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
109 lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
110 if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
111 decode_sgmii_word(lp_adv, status);
112 } else {
113 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
114 ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
115 decode_cl37_word(lp_adv, ld_adv, status);
116 }
117 }
118 return 0;
119}
120
121static int sparx5_get_sfi_status(struct sparx5 *sparx5,
122 struct sparx5_port *port,
123 struct sparx5_port_status *status)
124{
125 bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
126 u32 portno = port->portno;
127 u32 value, dev, tinst;
128 void __iomem *inst;
129
130 if (!high_speed_dev) {
131 netdev_err(port->ndev, "error: low speed and SFI mode\n");
132 return -EINVAL;
133 }
134
135 dev = sparx5_to_high_dev(portno);
136 tinst = sparx5_port_dev_index(portno);
137 inst = spx5_inst_get(sparx5, dev, tinst);
138
139 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
140 if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
141 /* The link is or has been down. Clear the sticky bit */
142 status->link_down = 1;
143 spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
145 }
146 status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
147 status->duplex = DUPLEX_FULL;
148 if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
149 status->speed = SPEED_5000;
150 else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
151 status->speed = SPEED_10000;
152 else
153 status->speed = SPEED_25000;
154
155 return 0;
156}
157
158/* Get link status of 1000Base-X/in-band and SFI ports.
159 */
160int sparx5_get_port_status(struct sparx5 *sparx5,
161 struct sparx5_port *port,
162 struct sparx5_port_status *status)
163{
164 memset(status, 0, sizeof(*status));
165 status->speed = port->conf.speed;
166 if (port->conf.power_down) {
167 status->link = false;
168 return 0;
169 }
170 switch (port->conf.portmode) {
171 case PHY_INTERFACE_MODE_SGMII:
172 case PHY_INTERFACE_MODE_QSGMII:
173 case PHY_INTERFACE_MODE_1000BASEX:
174 case PHY_INTERFACE_MODE_2500BASEX:
175 return sparx5_get_dev2g5_status(sparx5, port, status);
176 case PHY_INTERFACE_MODE_5GBASER:
177 case PHY_INTERFACE_MODE_10GBASER:
178 case PHY_INTERFACE_MODE_25GBASER:
179 return sparx5_get_sfi_status(sparx5, port, status);
180 case PHY_INTERFACE_MODE_NA:
181 return 0;
182 default:
183 netdev_err(port->ndev, "Status not supported");
184 return -ENODEV;
185 }
186 return 0;
187}
188
189static int sparx5_port_error(struct sparx5_port *port,
190 struct sparx5_port_config *conf,
191 enum port_error errtype)
192{
193 switch (errtype) {
194 case SPX5_PERR_SPEED:
195 netdev_err(port->ndev,
196 "Interface does not support speed: %u: for %s\n",
197 conf->speed, phy_modes(conf->portmode));
198 break;
199 case SPX5_PERR_IFTYPE:
200 netdev_err(port->ndev,
201 "Switch port does not support interface type: %s\n",
202 phy_modes(conf->portmode));
203 break;
204 default:
205 netdev_err(port->ndev,
206 "Interface configuration error\n");
207 }
208
209 return -EINVAL;
210}
211
212static int sparx5_port_verify_speed(struct sparx5 *sparx5,
213 struct sparx5_port *port,
214 struct sparx5_port_config *conf)
215{
216 if ((sparx5_port_is_2g5(port->portno) &&
217 conf->speed > SPEED_2500) ||
218 (sparx5_port_is_5g(port->portno) &&
219 conf->speed > SPEED_5000) ||
220 (sparx5_port_is_10g(port->portno) &&
221 conf->speed > SPEED_10000))
222 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
223
224 switch (conf->portmode) {
225 case PHY_INTERFACE_MODE_NA:
226 return -EINVAL;
227 case PHY_INTERFACE_MODE_1000BASEX:
228 if (conf->speed != SPEED_1000 ||
229 sparx5_port_is_2g5(port->portno))
230 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
231 if (sparx5_port_is_2g5(port->portno))
232 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
233 break;
234 case PHY_INTERFACE_MODE_2500BASEX:
235 if (conf->speed != SPEED_2500 ||
236 sparx5_port_is_2g5(port->portno))
237 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
238 break;
239 case PHY_INTERFACE_MODE_QSGMII:
240 if (port->portno > 47)
241 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
242 fallthrough;
243 case PHY_INTERFACE_MODE_SGMII:
244 if (conf->speed != SPEED_1000 &&
245 conf->speed != SPEED_100 &&
246 conf->speed != SPEED_10 &&
247 conf->speed != SPEED_2500)
248 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
249 break;
250 case PHY_INTERFACE_MODE_5GBASER:
251 case PHY_INTERFACE_MODE_10GBASER:
252 case PHY_INTERFACE_MODE_25GBASER:
253 if ((conf->speed != SPEED_5000 &&
254 conf->speed != SPEED_10000 &&
255 conf->speed != SPEED_25000))
256 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
257 break;
258 default:
259 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
260 }
261 return 0;
262}
263
264static bool sparx5_dev_change(struct sparx5 *sparx5,
265 struct sparx5_port *port,
266 struct sparx5_port_config *conf)
267{
268 return sparx5_is_baser(port->conf.portmode) ^
269 sparx5_is_baser(conf->portmode);
270}
271
272static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
273{
274 u32 value, resource, prio, delay_cnt = 0;
275 bool poll_src = true;
276 char *mem = "";
277
278 /* Resource == 0: Memory tracked per source (SRC-MEM)
279 * Resource == 1: Frame references tracked per source (SRC-REF)
280 * Resource == 2: Memory tracked per destination (DST-MEM)
281 * Resource == 3: Frame references tracked per destination. (DST-REF)
282 */
283 while (1) {
284 bool empty = true;
285
286 for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
287 u32 base;
288
289 base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
290 for (prio = 0; prio < SPX5_PRIOS; prio++) {
291 value = spx5_rd(sparx5,
292 QRES_RES_STAT(base + prio));
293 if (value) {
294 mem = resource == 0 ?
295 "DST-MEM" : "SRC-MEM";
296 empty = false;
297 }
298 }
299 }
300
301 if (empty)
302 break;
303
304 if (delay_cnt++ == 2000) {
305 dev_err(sparx5->dev,
306 "Flush timeout port %u. %s queue not empty\n",
307 portno, mem);
308 return -EINVAL;
309 }
310
311 usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
312 }
313 return 0;
314}
315
316static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
317{
318 u32 tinst = high_spd_dev ?
319 sparx5_port_dev_index(port->portno) : port->portno;
320 u32 dev = high_spd_dev ?
321 sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
322 void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
323 u32 spd = port->conf.speed;
324 u32 spd_prm;
325 int err;
326
327 if (high_spd_dev) {
328 /* 1: Reset the PCS Rx clock domain */
329 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
330 DEV10G_DEV_RST_CTRL_PCS_RX_RST,
331 devinst,
332 DEV10G_DEV_RST_CTRL(0));
333
334 /* 2: Disable MAC frame reception */
335 spx5_inst_rmw(0,
336 DEV10G_MAC_ENA_CFG_RX_ENA,
337 devinst,
338 DEV10G_MAC_ENA_CFG(0));
339 } else {
340 /* 1: Reset the PCS Rx clock domain */
341 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
342 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
343 devinst,
344 DEV2G5_DEV_RST_CTRL(0));
345 /* 2: Disable MAC frame reception */
346 spx5_inst_rmw(0,
347 DEV2G5_MAC_ENA_CFG_RX_ENA,
348 devinst,
349 DEV2G5_MAC_ENA_CFG(0));
350 }
351 /* 3: Disable traffic being sent to or from switch port->portno */
352 spx5_rmw(0,
353 QFWD_SWITCH_PORT_MODE_PORT_ENA,
354 sparx5,
355 QFWD_SWITCH_PORT_MODE(port->portno));
356
357 /* 4: Disable dequeuing from the egress queues */
358 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
359 HSCH_PORT_MODE_DEQUEUE_DIS,
360 sparx5,
361 HSCH_PORT_MODE(port->portno));
362
363 /* 5: Disable Flowcontrol */
364 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
365 QSYS_PAUSE_CFG_PAUSE_STOP,
366 sparx5,
367 QSYS_PAUSE_CFG(port->portno));
368
369 spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
370 /* 6: Wait while the last frame is exiting the queues */
371 usleep_range(8 * spd_prm, 10 * spd_prm);
372
373 /* 7: Flush the queues accociated with the port->portno */
374 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
375 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
376 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
377 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
378 HSCH_FLUSH_CTRL_FLUSH_PORT |
379 HSCH_FLUSH_CTRL_FLUSH_DST |
380 HSCH_FLUSH_CTRL_FLUSH_SRC |
381 HSCH_FLUSH_CTRL_FLUSH_ENA,
382 sparx5,
383 HSCH_FLUSH_CTRL);
384
385 /* 8: Enable dequeuing from the egress queues */
386 spx5_rmw(0,
387 HSCH_PORT_MODE_DEQUEUE_DIS,
388 sparx5,
389 HSCH_PORT_MODE(port->portno));
390
391 /* 9: Wait until flushing is complete */
392 err = sparx5_port_flush_poll(sparx5, port->portno);
393 if (err)
394 return err;
395
396 /* 10: Reset the MAC clock domain */
397 if (high_spd_dev) {
398 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
399 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
400 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
401 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
402 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
403 DEV10G_DEV_RST_CTRL_MAC_TX_RST,
404 devinst,
405 DEV10G_DEV_RST_CTRL(0));
406
407 } else {
408 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
409 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
410 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
411 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
412 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
413 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
414 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
415 DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
416 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
417 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
418 devinst,
419 DEV2G5_DEV_RST_CTRL(0));
420 }
421 /* 11: Clear flushing */
422 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
423 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
424 HSCH_FLUSH_CTRL_FLUSH_PORT |
425 HSCH_FLUSH_CTRL_FLUSH_ENA,
426 sparx5,
427 HSCH_FLUSH_CTRL);
428
429 if (high_spd_dev) {
430 u32 pcs = sparx5_to_pcs_dev(port->portno);
431 void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
432
433 /* 12: Disable 5G/10G/25 BaseR PCS */
434 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
435 PCS10G_BR_PCS_CFG_PCS_ENA,
436 pcsinst,
437 PCS10G_BR_PCS_CFG(0));
438
439 if (sparx5_port_is_25g(port->portno))
440 /* Disable 25G PCS */
441 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
442 DEV25G_PCS25G_CFG_PCS25G_ENA,
443 sparx5,
444 DEV25G_PCS25G_CFG(tinst));
445 } else {
446 /* 12: Disable 1G PCS */
447 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
448 DEV2G5_PCS1G_CFG_PCS_ENA,
449 sparx5,
450 DEV2G5_PCS1G_CFG(port->portno));
451 }
452
453 /* The port is now flushed and disabled */
454 return 0;
455}
456
457static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
458 u32 portno, u32 speed)
459{
460 u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
461 const u32 taxi_dist[SPX5_PORTS_ALL] = {
462 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
463 4, 4, 4, 4,
464 11, 12, 13, 14, 15, 16, 17, 18,
465 11, 12, 13, 14, 15, 16, 17, 18,
466 11, 12, 13, 14, 15, 16, 17, 18,
467 11, 12, 13, 14, 15, 16, 17, 18,
468 4, 6, 8, 4, 6, 8, 6, 8,
469 2, 2, 2, 2, 2, 2, 2, 4, 2
470 };
471 u32 mac_per = 6400, tmp1, tmp2, tmp3;
472 u32 fifo_width = 16;
473 u32 mac_width = 8;
474 u32 addition = 0;
475
476 switch (speed) {
477 case SPEED_25000:
478 return 0;
479 case SPEED_10000:
480 mac_per = 6400;
481 mac_width = 8;
482 addition = 1;
483 break;
484 case SPEED_5000:
485 mac_per = 12800;
486 mac_width = 8;
487 addition = 0;
488 break;
489 case SPEED_2500:
490 mac_per = 3200;
491 mac_width = 1;
492 addition = 0;
493 break;
494 case SPEED_1000:
495 mac_per = 8000;
496 mac_width = 1;
497 addition = 0;
498 break;
499 case SPEED_100:
500 case SPEED_10:
501 return 1;
502 default:
503 break;
504 }
505
506 tmp1 = 1000 * mac_width / fifo_width;
507 tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
508 * sys_clk / mac_per);
509 tmp3 = tmp1 * tmp2 / 1000;
510 return (tmp3 + 2000 + 999) / 1000 + addition;
511}
512
513/* Configure port muxing:
514 * QSGMII: 4x2G5 devices
515 */
516static int sparx5_port_mux_set(struct sparx5 *sparx5,
517 struct sparx5_port *port,
518 struct sparx5_port_config *conf)
519{
520 u32 portno = port->portno;
521 u32 inst;
522
523 if (port->conf.portmode == conf->portmode)
524 return 0; /* Nothing to do */
525
526 switch (conf->portmode) {
527 case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
528 inst = (portno - portno % 4) / 4;
529 spx5_rmw(BIT(inst),
530 BIT(inst),
531 sparx5,
532 PORT_CONF_QSGMII_ENA);
533
534 if ((portno / 4 % 2) == 0) {
535 /* Affects d0-d3,d8-d11..d40-d43 */
536 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
537 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
538 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
539 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
540 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
541 PORT_CONF_USGMII_CFG_QUAD_MODE,
542 sparx5,
543 PORT_CONF_USGMII_CFG((portno / 8)));
544 }
545 break;
546 default:
547 break;
548 }
549 return 0;
550}
551
552static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
553 struct sparx5_port *port)
554{
555 enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
556 int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
557 max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
558 bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
559 enum sparx5_vlan_port_type vlan_type = port->vlan_type;
560 bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
561 u32 dev = sparx5_to_high_dev(port->portno);
562 u32 tinst = sparx5_port_dev_index(port->portno);
563 void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
564 u32 etype;
565
566 etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
567 port->custom_etype :
568 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
569 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
570
571 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
572 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
573 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
574 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
575 sparx5,
576 DEV2G5_MAC_TAGS_CFG(port->portno));
577
578 if (sparx5_port_is_2g5(port->portno))
579 return 0;
580
581 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
582 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
583 DEV10G_MAC_TAGS_CFG_TAG_ID |
584 DEV10G_MAC_TAGS_CFG_TAG_ENA,
585 inst,
586 DEV10G_MAC_TAGS_CFG(0, 0));
587
588 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
589 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
590 inst,
591 DEV10G_MAC_NUM_TAGS_CFG(0));
592
593 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
594 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
595 inst,
596 DEV10G_MAC_MAXLEN_CFG(0));
597 return 0;
598}
599
600int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
601{
602 u32 clk_period_ps = 1600; /* 625Mhz for now */
603 u32 urg = 672000;
604
605 switch (speed) {
606 case SPEED_10:
607 case SPEED_100:
608 case SPEED_1000:
609 urg = 672000;
610 break;
611 case SPEED_2500:
612 urg = 270000;
613 break;
614 case SPEED_5000:
615 urg = 135000;
616 break;
617 case SPEED_10000:
618 urg = 67200;
619 break;
620 case SPEED_25000:
621 urg = 27000;
622 break;
623 }
624 return urg / clk_period_ps - 1;
625}
626
627static u16 sparx5_wm_enc(u16 value)
628{
629 if (value >= 2048)
630 return 2048 + value / 16;
631
632 return value;
633}
634
635static int sparx5_port_fc_setup(struct sparx5 *sparx5,
636 struct sparx5_port *port,
637 struct sparx5_port_config *conf)
638{
639 bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
640 u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
641
642 if (conf->pause & MLO_PAUSE_TX)
643 pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
644 SPX5_BUFFER_CELL_SZ));
645
646 /* Set HDX flowcontrol */
647 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
648 DSM_MAC_CFG_HDX_BACKPREASSURE,
649 sparx5,
650 DSM_MAC_CFG(port->portno));
651
652 /* Obey flowcontrol */
653 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
654 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
655 sparx5,
656 DSM_RX_PAUSE_CFG(port->portno));
657
658 /* Disable forward pressure */
659 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
660 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
661 sparx5,
662 QSYS_FWD_PRESSURE(port->portno));
663
664 /* Generate pause frames */
665 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
666 QSYS_PAUSE_CFG_PAUSE_STOP,
667 sparx5,
668 QSYS_PAUSE_CFG(port->portno));
669
670 return 0;
671}
672
673static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
674{
675 if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
676 return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
677 else
678 return 1; /* Enable SGMII Aneg */
679}
680
681int sparx5_serdes_set(struct sparx5 *sparx5,
682 struct sparx5_port *port,
683 struct sparx5_port_config *conf)
684{
685 int portmode, err, speed = conf->speed;
686
687 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
688 ((port->portno % 4) != 0)) {
689 return 0;
690 }
691 if (sparx5_is_baser(conf->portmode)) {
692 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
693 speed = SPEED_25000;
694 else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
695 speed = SPEED_10000;
696 else
697 speed = SPEED_5000;
698 }
699
700 err = phy_set_media(port->serdes, conf->media);
701 if (err)
702 return err;
703 if (speed > 0) {
704 err = phy_set_speed(port->serdes, speed);
705 if (err)
706 return err;
707 }
708 if (conf->serdes_reset) {
709 err = phy_reset(port->serdes);
710 if (err)
711 return err;
712 }
713
714 /* Configure SerDes with port parameters
715 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
716 */
717 portmode = conf->portmode;
718 if (sparx5_is_baser(conf->portmode))
719 portmode = PHY_INTERFACE_MODE_10GBASER;
720 err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
721 if (err)
722 return err;
723 conf->serdes_reset = false;
724 return err;
725}
726
727static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
728 struct sparx5_port *port,
729 struct sparx5_port_config *conf)
730{
731 bool sgmii = false, inband_aneg = false;
732 int err;
733
734 if (port->conf.inband) {
735 if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
736 conf->portmode == PHY_INTERFACE_MODE_QSGMII)
737 inband_aneg = true; /* Cisco-SGMII in-band-aneg */
738 else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
739 conf->autoneg)
740 inband_aneg = true; /* Clause-37 in-band-aneg */
741
742 err = sparx5_serdes_set(sparx5, port, conf);
743 if (err)
744 return -EINVAL;
745 } else {
746 sgmii = true; /* Phy is connected to the MAC */
747 }
748
749 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
750 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
751 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
752 sparx5,
753 DEV2G5_PCS1G_MODE_CFG(port->portno));
754
755 /* Enable PCS */
756 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
757 sparx5,
758 DEV2G5_PCS1G_CFG(port->portno));
759
760 if (inband_aneg) {
761 u16 abil = sparx5_get_aneg_word(conf);
762
763 /* Enable in-band aneg */
764 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
765 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
766 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
767 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
768 sparx5,
769 DEV2G5_PCS1G_ANEG_CFG(port->portno));
770 } else {
771 spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
772 }
773
774 /* Take PCS out of reset */
775 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
776 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
777 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
778 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
779 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
780 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
781 sparx5,
782 DEV2G5_DEV_RST_CTRL(port->portno));
783
784 return 0;
785}
786
787static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
788 struct sparx5_port *port,
789 struct sparx5_port_config *conf)
790{
791 u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
792 u32 pix = sparx5_port_dev_index(port->portno);
793 u32 dev = sparx5_to_high_dev(port->portno);
794 u32 pcs = sparx5_to_pcs_dev(port->portno);
795 void __iomem *devinst;
796 void __iomem *pcsinst;
797 int err;
798
799 devinst = spx5_inst_get(sparx5, dev, pix);
800 pcsinst = spx5_inst_get(sparx5, pcs, pix);
801
802 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
803 err = sparx5_serdes_set(sparx5, port, conf);
804 if (err)
805 return -EINVAL;
806 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
807 /* Enable PCS for 25G device, speed 25G */
808 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
809 DEV25G_PCS25G_CFG_PCS25G_ENA,
810 sparx5,
811 DEV25G_PCS25G_CFG(pix));
812 } else {
813 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
814 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
815 PCS10G_BR_PCS_CFG_PCS_ENA,
816 pcsinst,
817 PCS10G_BR_PCS_CFG(0));
818 }
819
820 /* Enable 5G/10G/25G MAC module */
821 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
822 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
823 devinst,
824 DEV10G_MAC_ENA_CFG(0));
825
826 /* Take the device out of reset */
827 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
828 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
829 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
830 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
831 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
832 DEV10G_DEV_RST_CTRL_PCS_RX_RST |
833 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
834 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
835 DEV10G_DEV_RST_CTRL_MAC_TX_RST |
836 DEV10G_DEV_RST_CTRL_SPEED_SEL,
837 devinst,
838 DEV10G_DEV_RST_CTRL(0));
839
840 return 0;
841}
842
843/* Switch between 1G/2500 and 5G/10G/25G devices */
844static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
845{
846 int bt_indx = BIT(sparx5_port_dev_index(port));
847
848 if (sparx5_port_is_5g(port)) {
849 spx5_rmw(hsd ? 0 : bt_indx,
850 bt_indx,
851 sparx5,
852 PORT_CONF_DEV5G_MODES);
853 } else if (sparx5_port_is_10g(port)) {
854 spx5_rmw(hsd ? 0 : bt_indx,
855 bt_indx,
856 sparx5,
857 PORT_CONF_DEV10G_MODES);
858 } else if (sparx5_port_is_25g(port)) {
859 spx5_rmw(hsd ? 0 : bt_indx,
860 bt_indx,
861 sparx5,
862 PORT_CONF_DEV25G_MODES);
863 }
864}
865
866/* Configure speed/duplex dependent registers */
867static int sparx5_port_config_low_set(struct sparx5 *sparx5,
868 struct sparx5_port *port,
869 struct sparx5_port_config *conf)
870{
871 u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
872 bool fdx = conf->duplex == DUPLEX_FULL;
873 int spd = conf->speed;
874
875 clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
876 gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
877 tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
878 hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
879 hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
880
881 /* GIG/FDX mode */
882 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
883 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
884 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
885 DEV2G5_MAC_MODE_CFG_FDX_ENA,
886 sparx5,
887 DEV2G5_MAC_MODE_CFG(port->portno));
888
889 /* Set MAC IFG Gaps */
890 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
891 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
892 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
893 sparx5,
894 DEV2G5_MAC_IFG_CFG(port->portno));
895
896 /* Disabling frame aging when in HDX (due to HDX issue) */
897 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
898 HSCH_PORT_MODE_AGE_DIS,
899 sparx5,
900 HSCH_PORT_MODE(port->portno));
901
902 /* Enable MAC module */
903 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
904 DEV2G5_MAC_ENA_CFG_TX_ENA,
905 sparx5,
906 DEV2G5_MAC_ENA_CFG(port->portno));
907
908 /* Select speed and take MAC out of reset */
909 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
910 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
911 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
912 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
913 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
914 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
915 sparx5,
916 DEV2G5_DEV_RST_CTRL(port->portno));
917
918 return 0;
919}
920
921int sparx5_port_pcs_set(struct sparx5 *sparx5,
922 struct sparx5_port *port,
923 struct sparx5_port_config *conf)
924
925{
926 bool high_speed_dev = sparx5_is_baser(conf->portmode);
927 int err;
928
929 if (sparx5_dev_change(sparx5, port, conf)) {
930 /* switch device */
931 sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
932
933 /* Disable the not-in-use device */
934 err = sparx5_port_disable(sparx5, port, !high_speed_dev);
935 if (err)
936 return err;
937 }
938 /* Disable the port before re-configuring */
939 err = sparx5_port_disable(sparx5, port, high_speed_dev);
940 if (err)
941 return -EINVAL;
942
943 if (high_speed_dev)
944 err = sparx5_port_pcs_high_set(sparx5, port, conf);
945 else
946 err = sparx5_port_pcs_low_set(sparx5, port, conf);
947
948 if (err)
949 return -EINVAL;
950
951 if (port->conf.inband) {
952 /* Enable/disable 1G counters in ASM */
953 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
954 ASM_PORT_CFG_CSC_STAT_DIS,
955 sparx5,
956 ASM_PORT_CFG(port->portno));
957
958 /* Enable/disable 1G counters in DSM */
959 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
960 DSM_BUF_CFG_CSC_STAT_DIS,
961 sparx5,
962 DSM_BUF_CFG(port->portno));
963 }
964
965 port->conf = *conf;
966
967 return 0;
968}
969
970int sparx5_port_config(struct sparx5 *sparx5,
971 struct sparx5_port *port,
972 struct sparx5_port_config *conf)
973{
974 bool high_speed_dev = sparx5_is_baser(conf->portmode);
975 int err, urgency, stop_wm;
976
977 err = sparx5_port_verify_speed(sparx5, port, conf);
978 if (err)
979 return err;
980
981 /* high speed device is already configured */
982 if (!high_speed_dev)
983 sparx5_port_config_low_set(sparx5, port, conf);
984
985 /* Configure flow control */
986 err = sparx5_port_fc_setup(sparx5, port, conf);
987 if (err)
988 return err;
989
990 /* Set the DSM stop watermark */
991 stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
992 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
993 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
994 sparx5,
995 DSM_DEV_TX_STOP_WM_CFG(port->portno));
996
997 /* Enable port in queue system */
998 urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
999 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1000 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1001 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1002 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1003 sparx5,
1004 QFWD_SWITCH_PORT_MODE(port->portno));
1005
1006 /* Save the new values */
1007 port->conf = *conf;
1008
1009 return 0;
1010}
1011
1012/* Initialize port config to default */
1013int sparx5_port_init(struct sparx5 *sparx5,
1014 struct sparx5_port *port,
1015 struct sparx5_port_config *conf)
1016{
1017 u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1018 u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1019 u32 devhigh = sparx5_to_high_dev(port->portno);
1020 u32 pix = sparx5_port_dev_index(port->portno);
1021 u32 pcs = sparx5_to_pcs_dev(port->portno);
1022 bool sd_pol = port->signd_active_high;
1023 bool sd_sel = !port->signd_internal;
1024 bool sd_ena = port->signd_enable;
1025 u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1026 void __iomem *devinst;
1027 void __iomem *pcsinst;
1028 int err;
1029
1030 devinst = spx5_inst_get(sparx5, devhigh, pix);
1031 pcsinst = spx5_inst_get(sparx5, pcs, pix);
1032
1033 /* Set the mux port mode */
1034 err = sparx5_port_mux_set(sparx5, port, conf);
1035 if (err)
1036 return err;
1037
1038 /* Configure MAC vlan awareness */
1039 err = sparx5_port_max_tags_set(sparx5, port);
1040 if (err)
1041 return err;
1042
1043 /* Set Max Length */
1044 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1045 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1046 sparx5,
1047 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1048
1049 /* 1G/2G5: Signal Detect configuration */
1050 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1051 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1052 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1053 sparx5,
1054 DEV2G5_PCS1G_SD_CFG(port->portno));
1055
1056 /* Set Pause WM hysteresis */
1057 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1058 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1059 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1060 QSYS_PAUSE_CFG_PAUSE_START |
1061 QSYS_PAUSE_CFG_PAUSE_STOP |
1062 QSYS_PAUSE_CFG_PAUSE_ENA,
1063 sparx5,
1064 QSYS_PAUSE_CFG(port->portno));
1065
1066 /* Port ATOP. Frames are tail dropped when this WM is hit */
1067 spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1068 sparx5,
1069 QSYS_ATOP(port->portno));
1070
1071 /* Discard pause frame 01-80-C2-00-00-01 */
1072 spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1073
1074 /* Discard SMAC multicast */
1075 spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1076 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1077 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1078
1079 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1080 conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1081 err = sparx5_serdes_set(sparx5, port, conf);
1082 if (err)
1083 return err;
1084
1085 if (!sparx5_port_is_2g5(port->portno))
1086 /* Enable shadow device */
1087 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1088 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1089 sparx5,
1090 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1091
1092 sparx5_dev_switch(sparx5, port->portno, false);
1093 }
1094 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1095 // All ports must be PCS enabled in QSGMII mode
1096 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1097 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1098 sparx5,
1099 DEV2G5_DEV_RST_CTRL(port->portno));
1100 }
1101 /* Default IFGs for 1G */
1102 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1103 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1104 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1105 sparx5,
1106 DEV2G5_MAC_IFG_CFG(port->portno));
1107
1108 if (sparx5_port_is_2g5(port->portno))
1109 return 0; /* Low speed device only - return */
1110
1111 /* Now setup the high speed device */
1112 if (conf->portmode == PHY_INTERFACE_MODE_NA)
1113 conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1114
1115 if (sparx5_is_baser(conf->portmode))
1116 sparx5_dev_switch(sparx5, port->portno, true);
1117
1118 /* Set Max Length */
1119 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1120 DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1121 devinst,
1122 DEV10G_MAC_ENA_CFG(0));
1123
1124 /* Handle Signal Detect in 10G PCS */
1125 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1126 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1127 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1128 pcsinst,
1129 PCS10G_BR_PCS_SD_CFG(0));
1130
1131 if (sparx5_port_is_25g(port->portno)) {
1132 /* Handle Signal Detect in 25G PCS */
1133 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1134 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1135 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1136 sparx5,
1137 DEV25G_PCS25G_SD_CFG(pix));
1138 }
1139
1140 return 0;
1141}
1142
1143void sparx5_port_enable(struct sparx5_port *port, bool enable)
1144{
1145 struct sparx5 *sparx5 = port->sparx5;
1146
1147 /* Enable port for frame transfer? */
1148 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1149 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1150 sparx5,
1151 QFWD_SWITCH_PORT_MODE(port->portno));
1152}
1153
1154int sparx5_port_qos_set(struct sparx5_port *port,
1155 struct sparx5_port_qos *qos)
1156{
1157 sparx5_port_qos_dscp_set(port, &qos->dscp);
1158 sparx5_port_qos_pcp_set(port, &qos->pcp);
1159 sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1160 sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1161 sparx5_port_qos_default_set(port, qos);
1162
1163 return 0;
1164}
1165
1166int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1167 struct sparx5_port_qos_pcp_rewr *qos)
1168{
1169 int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1170 struct sparx5 *sparx5 = port->sparx5;
1171 u8 pcp, dei;
1172
1173 /* Use mapping table, with classified QoS as index, to map QoS and DP
1174 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1175 * PCP. Classified PCP equals frame PCP.
1176 */
1177 if (qos->enable)
1178 mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1179
1180 spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1181 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1182 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1183 port->sparx5, REW_TAG_CTRL(port->portno));
1184
1185 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1186 /* Extract PCP and DEI */
1187 pcp = qos->map.map[i];
1188 if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1189 dei = 1;
1190 else
1191 dei = 0;
1192
1193 /* Rewrite PCP and DEI, for each classified QoS class and DP
1194 * level. This table is only used if tag ctrl mode is set to
1195 * 'mapped'.
1196 *
1197 * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
1198 * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
1199 */
1200 if (dei) {
1201 spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1202 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1203 REW_PCP_MAP_DE1(port->portno, i));
1204
1205 spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1206 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1207 REW_DEI_MAP_DE1(port->portno, i));
1208 } else {
1209 spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1210 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1211 REW_PCP_MAP_DE0(port->portno, i));
1212
1213 spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1214 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1215 REW_DEI_MAP_DE0(port->portno, i));
1216 }
1217 }
1218
1219 return 0;
1220}
1221
1222int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1223 struct sparx5_port_qos_pcp *qos)
1224{
1225 struct sparx5 *sparx5 = port->sparx5;
1226 u8 *pcp_itr = qos->map.map;
1227 u8 pcp, dp;
1228 int i;
1229
1230 /* Enable/disable pcp and dp for qos classification. */
1231 spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1232 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1233 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1234 sparx5, ANA_CL_QOS_CFG(port->portno));
1235
1236 /* Map each pcp and dei value to priority and dp */
1237 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1238 pcp = *(pcp_itr + i);
1239 dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1240 spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1241 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1242 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1243 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1244 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1245 }
1246
1247 return 0;
1248}
1249
1250void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1251 int mode)
1252{
1253 spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1254 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1255 ANA_CL_QOS_CFG(port->portno));
1256}
1257
1258int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1259 struct sparx5_port_qos_dscp_rewr *qos)
1260{
1261 struct sparx5 *sparx5 = port->sparx5;
1262 bool rewr = false;
1263 u16 dscp;
1264 int i;
1265
1266 /* On egress, rewrite DSCP value to either classified DSCP or frame
1267 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1268 */
1269 if (qos->enable)
1270 rewr = true;
1271
1272 spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1273 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1274 REW_DSCP_MAP(port->portno));
1275
1276 /* On ingress, map each classified QoS class and DP to classified DSCP
1277 * value. This mapping table is global for all ports.
1278 */
1279 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1280 dscp = qos->map.map[i];
1281 spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1282 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1283 ANA_CL_QOS_MAP_CFG(i));
1284 }
1285
1286 return 0;
1287}
1288
1289int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1290 struct sparx5_port_qos_dscp *qos)
1291{
1292 struct sparx5 *sparx5 = port->sparx5;
1293 u8 *dscp = qos->map.map;
1294 int i;
1295
1296 /* Enable/disable dscp and dp for qos classification.
1297 * Disable rewrite of dscp values for now.
1298 */
1299 spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1300 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1301 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1302 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1303 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1304 ANA_CL_QOS_CFG(port->portno));
1305
1306 /* Map each dscp value to priority and dp */
1307 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1308 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1309 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1310 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1311 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1312 ANA_CL_DSCP_CFG(i));
1313 }
1314
1315 /* Set per-dscp trust */
1316 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1317 if (qos->qos_enable) {
1318 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1319 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1320 ANA_CL_DSCP_CFG(i));
1321 }
1322 }
1323
1324 return 0;
1325}
1326
1327int sparx5_port_qos_default_set(const struct sparx5_port *port,
1328 const struct sparx5_port_qos *qos)
1329{
1330 struct sparx5 *sparx5 = port->sparx5;
1331
1332 /* Set default prio and dp level */
1333 spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1334 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1335 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1336 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1337 sparx5, ANA_CL_QOS_CFG(port->portno));
1338
1339 /* Set default pcp and dei for untagged frames */
1340 spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1341 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1342 ANA_CL_VLAN_CTRL_PORT_PCP |
1343 ANA_CL_VLAN_CTRL_PORT_DEI,
1344 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1345
1346 return 0;
1347}
1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include <linux/module.h>
8#include <linux/phy/phy.h>
9
10#include "sparx5_main_regs.h"
11#include "sparx5_main.h"
12#include "sparx5_port.h"
13
14#define SPX5_ETYPE_TAG_C 0x8100
15#define SPX5_ETYPE_TAG_S 0x88a8
16
17#define SPX5_WAIT_US 1000
18#define SPX5_WAIT_MAX_US 2000
19
20enum port_error {
21 SPX5_PERR_SPEED,
22 SPX5_PERR_IFTYPE,
23};
24
25#define PAUSE_DISCARD 0xC
26#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
27
28static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
29{
30 status->an_complete = true;
31 if (!(lp_abil & LPA_SGMII_LINK)) {
32 status->link = false;
33 return;
34 }
35
36 switch (lp_abil & LPA_SGMII_SPD_MASK) {
37 case LPA_SGMII_10:
38 status->speed = SPEED_10;
39 break;
40 case LPA_SGMII_100:
41 status->speed = SPEED_100;
42 break;
43 case LPA_SGMII_1000:
44 status->speed = SPEED_1000;
45 break;
46 default:
47 status->link = false;
48 return;
49 }
50 if (lp_abil & LPA_SGMII_FULL_DUPLEX)
51 status->duplex = DUPLEX_FULL;
52 else
53 status->duplex = DUPLEX_HALF;
54}
55
56static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
57{
58 status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
59 status->an_complete = true;
60 status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
61 DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
62
63 if ((ld_abil & ADVERTISE_1000XPAUSE) &&
64 (lp_abil & ADVERTISE_1000XPAUSE)) {
65 status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
66 } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
67 (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
68 status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
69 MLO_PAUSE_TX : 0;
70 status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
71 MLO_PAUSE_RX : 0;
72 } else {
73 status->pause = MLO_PAUSE_NONE;
74 }
75}
76
77static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
78 struct sparx5_port *port,
79 struct sparx5_port_status *status)
80{
81 u32 portno = port->portno;
82 u16 lp_adv, ld_adv;
83 u32 value;
84
85 /* Get PCS Link down sticky */
86 value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
87 status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
88 if (status->link_down) /* Clear the sticky */
89 spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
90
91 /* Get both current Link and Sync status */
92 value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
93 status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
94 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
95
96 if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
97 status->speed = SPEED_1000;
98 else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
99 status->speed = SPEED_2500;
100
101 status->duplex = DUPLEX_FULL;
102
103 /* Get PCS ANEG status register */
104 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
105
106 /* Aneg complete provides more information */
107 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
108 lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
109 if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
110 decode_sgmii_word(lp_adv, status);
111 } else {
112 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
113 ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
114 decode_cl37_word(lp_adv, ld_adv, status);
115 }
116 }
117 return 0;
118}
119
120static int sparx5_get_sfi_status(struct sparx5 *sparx5,
121 struct sparx5_port *port,
122 struct sparx5_port_status *status)
123{
124 bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
125 u32 portno = port->portno;
126 u32 value, dev, tinst;
127 void __iomem *inst;
128
129 if (!high_speed_dev) {
130 netdev_err(port->ndev, "error: low speed and SFI mode\n");
131 return -EINVAL;
132 }
133
134 dev = sparx5_to_high_dev(portno);
135 tinst = sparx5_port_dev_index(portno);
136 inst = spx5_inst_get(sparx5, dev, tinst);
137
138 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
139 if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
140 /* The link is or has been down. Clear the sticky bit */
141 status->link_down = 1;
142 spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
143 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 }
145 status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
146 status->duplex = DUPLEX_FULL;
147 if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
148 status->speed = SPEED_5000;
149 else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
150 status->speed = SPEED_10000;
151 else
152 status->speed = SPEED_25000;
153
154 return 0;
155}
156
157/* Get link status of 1000Base-X/in-band and SFI ports.
158 */
159int sparx5_get_port_status(struct sparx5 *sparx5,
160 struct sparx5_port *port,
161 struct sparx5_port_status *status)
162{
163 memset(status, 0, sizeof(*status));
164 status->speed = port->conf.speed;
165 if (port->conf.power_down) {
166 status->link = false;
167 return 0;
168 }
169 switch (port->conf.portmode) {
170 case PHY_INTERFACE_MODE_SGMII:
171 case PHY_INTERFACE_MODE_QSGMII:
172 case PHY_INTERFACE_MODE_1000BASEX:
173 case PHY_INTERFACE_MODE_2500BASEX:
174 return sparx5_get_dev2g5_status(sparx5, port, status);
175 case PHY_INTERFACE_MODE_5GBASER:
176 case PHY_INTERFACE_MODE_10GBASER:
177 case PHY_INTERFACE_MODE_25GBASER:
178 return sparx5_get_sfi_status(sparx5, port, status);
179 case PHY_INTERFACE_MODE_NA:
180 return 0;
181 default:
182 netdev_err(port->ndev, "Status not supported");
183 return -ENODEV;
184 }
185 return 0;
186}
187
188static int sparx5_port_error(struct sparx5_port *port,
189 struct sparx5_port_config *conf,
190 enum port_error errtype)
191{
192 switch (errtype) {
193 case SPX5_PERR_SPEED:
194 netdev_err(port->ndev,
195 "Interface does not support speed: %u: for %s\n",
196 conf->speed, phy_modes(conf->portmode));
197 break;
198 case SPX5_PERR_IFTYPE:
199 netdev_err(port->ndev,
200 "Switch port does not support interface type: %s\n",
201 phy_modes(conf->portmode));
202 break;
203 default:
204 netdev_err(port->ndev,
205 "Interface configuration error\n");
206 }
207
208 return -EINVAL;
209}
210
211static int sparx5_port_verify_speed(struct sparx5 *sparx5,
212 struct sparx5_port *port,
213 struct sparx5_port_config *conf)
214{
215 if ((sparx5_port_is_2g5(port->portno) &&
216 conf->speed > SPEED_2500) ||
217 (sparx5_port_is_5g(port->portno) &&
218 conf->speed > SPEED_5000) ||
219 (sparx5_port_is_10g(port->portno) &&
220 conf->speed > SPEED_10000))
221 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
222
223 switch (conf->portmode) {
224 case PHY_INTERFACE_MODE_NA:
225 return -EINVAL;
226 case PHY_INTERFACE_MODE_1000BASEX:
227 if (conf->speed != SPEED_1000 ||
228 sparx5_port_is_2g5(port->portno))
229 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
230 if (sparx5_port_is_2g5(port->portno))
231 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
232 break;
233 case PHY_INTERFACE_MODE_2500BASEX:
234 if (conf->speed != SPEED_2500 ||
235 sparx5_port_is_2g5(port->portno))
236 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
237 break;
238 case PHY_INTERFACE_MODE_QSGMII:
239 if (port->portno > 47)
240 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
241 fallthrough;
242 case PHY_INTERFACE_MODE_SGMII:
243 if (conf->speed != SPEED_1000 &&
244 conf->speed != SPEED_100 &&
245 conf->speed != SPEED_10 &&
246 conf->speed != SPEED_2500)
247 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
248 break;
249 case PHY_INTERFACE_MODE_5GBASER:
250 case PHY_INTERFACE_MODE_10GBASER:
251 case PHY_INTERFACE_MODE_25GBASER:
252 if ((conf->speed != SPEED_5000 &&
253 conf->speed != SPEED_10000 &&
254 conf->speed != SPEED_25000))
255 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
256 break;
257 default:
258 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
259 }
260 return 0;
261}
262
263static bool sparx5_dev_change(struct sparx5 *sparx5,
264 struct sparx5_port *port,
265 struct sparx5_port_config *conf)
266{
267 return sparx5_is_baser(port->conf.portmode) ^
268 sparx5_is_baser(conf->portmode);
269}
270
271static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
272{
273 u32 value, resource, prio, delay_cnt = 0;
274 bool poll_src = true;
275 char *mem = "";
276
277 /* Resource == 0: Memory tracked per source (SRC-MEM)
278 * Resource == 1: Frame references tracked per source (SRC-REF)
279 * Resource == 2: Memory tracked per destination (DST-MEM)
280 * Resource == 3: Frame references tracked per destination. (DST-REF)
281 */
282 while (1) {
283 bool empty = true;
284
285 for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
286 u32 base;
287
288 base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
289 for (prio = 0; prio < SPX5_PRIOS; prio++) {
290 value = spx5_rd(sparx5,
291 QRES_RES_STAT(base + prio));
292 if (value) {
293 mem = resource == 0 ?
294 "DST-MEM" : "SRC-MEM";
295 empty = false;
296 }
297 }
298 }
299
300 if (empty)
301 break;
302
303 if (delay_cnt++ == 2000) {
304 dev_err(sparx5->dev,
305 "Flush timeout port %u. %s queue not empty\n",
306 portno, mem);
307 return -EINVAL;
308 }
309
310 usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
311 }
312 return 0;
313}
314
315static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
316{
317 u32 tinst = high_spd_dev ?
318 sparx5_port_dev_index(port->portno) : port->portno;
319 u32 dev = high_spd_dev ?
320 sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
321 void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
322 u32 spd = port->conf.speed;
323 u32 spd_prm;
324 int err;
325
326 if (high_spd_dev) {
327 /* 1: Reset the PCS Rx clock domain */
328 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
329 DEV10G_DEV_RST_CTRL_PCS_RX_RST,
330 devinst,
331 DEV10G_DEV_RST_CTRL(0));
332
333 /* 2: Disable MAC frame reception */
334 spx5_inst_rmw(0,
335 DEV10G_MAC_ENA_CFG_RX_ENA,
336 devinst,
337 DEV10G_MAC_ENA_CFG(0));
338 } else {
339 /* 1: Reset the PCS Rx clock domain */
340 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
341 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
342 devinst,
343 DEV2G5_DEV_RST_CTRL(0));
344 /* 2: Disable MAC frame reception */
345 spx5_inst_rmw(0,
346 DEV2G5_MAC_ENA_CFG_RX_ENA,
347 devinst,
348 DEV2G5_MAC_ENA_CFG(0));
349 }
350 /* 3: Disable traffic being sent to or from switch port->portno */
351 spx5_rmw(0,
352 QFWD_SWITCH_PORT_MODE_PORT_ENA,
353 sparx5,
354 QFWD_SWITCH_PORT_MODE(port->portno));
355
356 /* 4: Disable dequeuing from the egress queues */
357 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
358 HSCH_PORT_MODE_DEQUEUE_DIS,
359 sparx5,
360 HSCH_PORT_MODE(port->portno));
361
362 /* 5: Disable Flowcontrol */
363 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
364 QSYS_PAUSE_CFG_PAUSE_STOP,
365 sparx5,
366 QSYS_PAUSE_CFG(port->portno));
367
368 spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
369 /* 6: Wait while the last frame is exiting the queues */
370 usleep_range(8 * spd_prm, 10 * spd_prm);
371
372 /* 7: Flush the queues accociated with the port->portno */
373 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
374 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
375 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
376 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
377 HSCH_FLUSH_CTRL_FLUSH_PORT |
378 HSCH_FLUSH_CTRL_FLUSH_DST |
379 HSCH_FLUSH_CTRL_FLUSH_SRC |
380 HSCH_FLUSH_CTRL_FLUSH_ENA,
381 sparx5,
382 HSCH_FLUSH_CTRL);
383
384 /* 8: Enable dequeuing from the egress queues */
385 spx5_rmw(0,
386 HSCH_PORT_MODE_DEQUEUE_DIS,
387 sparx5,
388 HSCH_PORT_MODE(port->portno));
389
390 /* 9: Wait until flushing is complete */
391 err = sparx5_port_flush_poll(sparx5, port->portno);
392 if (err)
393 return err;
394
395 /* 10: Reset the MAC clock domain */
396 if (high_spd_dev) {
397 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
398 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
399 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
400 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
401 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
402 DEV10G_DEV_RST_CTRL_MAC_TX_RST,
403 devinst,
404 DEV10G_DEV_RST_CTRL(0));
405
406 } else {
407 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
408 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
409 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
410 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
411 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
412 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
413 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
414 DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
415 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
416 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
417 devinst,
418 DEV2G5_DEV_RST_CTRL(0));
419 }
420 /* 11: Clear flushing */
421 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
422 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
423 HSCH_FLUSH_CTRL_FLUSH_PORT |
424 HSCH_FLUSH_CTRL_FLUSH_ENA,
425 sparx5,
426 HSCH_FLUSH_CTRL);
427
428 if (high_spd_dev) {
429 u32 pcs = sparx5_to_pcs_dev(port->portno);
430 void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
431
432 /* 12: Disable 5G/10G/25 BaseR PCS */
433 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
434 PCS10G_BR_PCS_CFG_PCS_ENA,
435 pcsinst,
436 PCS10G_BR_PCS_CFG(0));
437
438 if (sparx5_port_is_25g(port->portno))
439 /* Disable 25G PCS */
440 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
441 DEV25G_PCS25G_CFG_PCS25G_ENA,
442 sparx5,
443 DEV25G_PCS25G_CFG(tinst));
444 } else {
445 /* 12: Disable 1G PCS */
446 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
447 DEV2G5_PCS1G_CFG_PCS_ENA,
448 sparx5,
449 DEV2G5_PCS1G_CFG(port->portno));
450 }
451
452 /* The port is now flushed and disabled */
453 return 0;
454}
455
456static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
457 u32 portno, u32 speed)
458{
459 u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
460 const u32 taxi_dist[SPX5_PORTS_ALL] = {
461 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
462 4, 4, 4, 4,
463 11, 12, 13, 14, 15, 16, 17, 18,
464 11, 12, 13, 14, 15, 16, 17, 18,
465 11, 12, 13, 14, 15, 16, 17, 18,
466 11, 12, 13, 14, 15, 16, 17, 18,
467 4, 6, 8, 4, 6, 8, 6, 8,
468 2, 2, 2, 2, 2, 2, 2, 4, 2
469 };
470 u32 mac_per = 6400, tmp1, tmp2, tmp3;
471 u32 fifo_width = 16;
472 u32 mac_width = 8;
473 u32 addition = 0;
474
475 switch (speed) {
476 case SPEED_25000:
477 return 0;
478 case SPEED_10000:
479 mac_per = 6400;
480 mac_width = 8;
481 addition = 1;
482 break;
483 case SPEED_5000:
484 mac_per = 12800;
485 mac_width = 8;
486 addition = 0;
487 break;
488 case SPEED_2500:
489 mac_per = 3200;
490 mac_width = 1;
491 addition = 0;
492 break;
493 case SPEED_1000:
494 mac_per = 8000;
495 mac_width = 1;
496 addition = 0;
497 break;
498 case SPEED_100:
499 case SPEED_10:
500 return 1;
501 default:
502 break;
503 }
504
505 tmp1 = 1000 * mac_width / fifo_width;
506 tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
507 * sys_clk / mac_per);
508 tmp3 = tmp1 * tmp2 / 1000;
509 return (tmp3 + 2000 + 999) / 1000 + addition;
510}
511
512/* Configure port muxing:
513 * QSGMII: 4x2G5 devices
514 */
515static int sparx5_port_mux_set(struct sparx5 *sparx5,
516 struct sparx5_port *port,
517 struct sparx5_port_config *conf)
518{
519 u32 portno = port->portno;
520 u32 inst;
521
522 if (port->conf.portmode == conf->portmode)
523 return 0; /* Nothing to do */
524
525 switch (conf->portmode) {
526 case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
527 inst = (portno - portno % 4) / 4;
528 spx5_rmw(BIT(inst),
529 BIT(inst),
530 sparx5,
531 PORT_CONF_QSGMII_ENA);
532
533 if ((portno / 4 % 2) == 0) {
534 /* Affects d0-d3,d8-d11..d40-d43 */
535 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
536 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
537 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
538 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
539 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
540 PORT_CONF_USGMII_CFG_QUAD_MODE,
541 sparx5,
542 PORT_CONF_USGMII_CFG((portno / 8)));
543 }
544 break;
545 default:
546 break;
547 }
548 return 0;
549}
550
551static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
552 struct sparx5_port *port)
553{
554 enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
555 int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
556 max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
557 bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
558 enum sparx5_vlan_port_type vlan_type = port->vlan_type;
559 bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
560 u32 dev = sparx5_to_high_dev(port->portno);
561 u32 tinst = sparx5_port_dev_index(port->portno);
562 void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
563 u32 etype;
564
565 etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
566 port->custom_etype :
567 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
568 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
569
570 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
571 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
572 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
573 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
574 sparx5,
575 DEV2G5_MAC_TAGS_CFG(port->portno));
576
577 if (sparx5_port_is_2g5(port->portno))
578 return 0;
579
580 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
581 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
582 DEV10G_MAC_TAGS_CFG_TAG_ID |
583 DEV10G_MAC_TAGS_CFG_TAG_ENA,
584 inst,
585 DEV10G_MAC_TAGS_CFG(0, 0));
586
587 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
588 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
589 inst,
590 DEV10G_MAC_NUM_TAGS_CFG(0));
591
592 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
593 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
594 inst,
595 DEV10G_MAC_MAXLEN_CFG(0));
596 return 0;
597}
598
599static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
600{
601 u32 clk_period_ps = 1600; /* 625Mhz for now */
602 u32 urg = 672000;
603
604 switch (speed) {
605 case SPEED_10:
606 case SPEED_100:
607 case SPEED_1000:
608 urg = 672000;
609 break;
610 case SPEED_2500:
611 urg = 270000;
612 break;
613 case SPEED_5000:
614 urg = 135000;
615 break;
616 case SPEED_10000:
617 urg = 67200;
618 break;
619 case SPEED_25000:
620 urg = 27000;
621 break;
622 }
623 return urg / clk_period_ps - 1;
624}
625
626static u16 sparx5_wm_enc(u16 value)
627{
628 if (value >= 2048)
629 return 2048 + value / 16;
630
631 return value;
632}
633
634static int sparx5_port_fc_setup(struct sparx5 *sparx5,
635 struct sparx5_port *port,
636 struct sparx5_port_config *conf)
637{
638 bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
639 u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
640
641 if (conf->pause & MLO_PAUSE_TX)
642 pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
643 SPX5_BUFFER_CELL_SZ));
644
645 /* Set HDX flowcontrol */
646 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
647 DSM_MAC_CFG_HDX_BACKPREASSURE,
648 sparx5,
649 DSM_MAC_CFG(port->portno));
650
651 /* Obey flowcontrol */
652 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
653 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
654 sparx5,
655 DSM_RX_PAUSE_CFG(port->portno));
656
657 /* Disable forward pressure */
658 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
659 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
660 sparx5,
661 QSYS_FWD_PRESSURE(port->portno));
662
663 /* Generate pause frames */
664 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
665 QSYS_PAUSE_CFG_PAUSE_STOP,
666 sparx5,
667 QSYS_PAUSE_CFG(port->portno));
668
669 return 0;
670}
671
672static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
673{
674 if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
675 return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
676 else
677 return 1; /* Enable SGMII Aneg */
678}
679
680int sparx5_serdes_set(struct sparx5 *sparx5,
681 struct sparx5_port *port,
682 struct sparx5_port_config *conf)
683{
684 int portmode, err, speed = conf->speed;
685
686 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
687 ((port->portno % 4) != 0)) {
688 return 0;
689 }
690 if (sparx5_is_baser(conf->portmode)) {
691 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
692 speed = SPEED_25000;
693 else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
694 speed = SPEED_10000;
695 else
696 speed = SPEED_5000;
697 }
698
699 err = phy_set_media(port->serdes, conf->media);
700 if (err)
701 return err;
702 if (speed > 0) {
703 err = phy_set_speed(port->serdes, speed);
704 if (err)
705 return err;
706 }
707 if (conf->serdes_reset) {
708 err = phy_reset(port->serdes);
709 if (err)
710 return err;
711 }
712
713 /* Configure SerDes with port parameters
714 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
715 */
716 portmode = conf->portmode;
717 if (sparx5_is_baser(conf->portmode))
718 portmode = PHY_INTERFACE_MODE_10GBASER;
719 err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
720 if (err)
721 return err;
722 conf->serdes_reset = false;
723 return err;
724}
725
726static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
727 struct sparx5_port *port,
728 struct sparx5_port_config *conf)
729{
730 bool sgmii = false, inband_aneg = false;
731 int err;
732
733 if (port->conf.inband) {
734 if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
735 conf->portmode == PHY_INTERFACE_MODE_QSGMII)
736 inband_aneg = true; /* Cisco-SGMII in-band-aneg */
737 else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
738 conf->autoneg)
739 inband_aneg = true; /* Clause-37 in-band-aneg */
740
741 err = sparx5_serdes_set(sparx5, port, conf);
742 if (err)
743 return -EINVAL;
744 } else {
745 sgmii = true; /* Phy is connnected to the MAC */
746 }
747
748 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
749 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
750 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
751 sparx5,
752 DEV2G5_PCS1G_MODE_CFG(port->portno));
753
754 /* Enable PCS */
755 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
756 sparx5,
757 DEV2G5_PCS1G_CFG(port->portno));
758
759 if (inband_aneg) {
760 u16 abil = sparx5_get_aneg_word(conf);
761
762 /* Enable in-band aneg */
763 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
764 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
765 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
766 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
767 sparx5,
768 DEV2G5_PCS1G_ANEG_CFG(port->portno));
769 } else {
770 spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
771 }
772
773 /* Take PCS out of reset */
774 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
775 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
776 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
777 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
778 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
779 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
780 sparx5,
781 DEV2G5_DEV_RST_CTRL(port->portno));
782
783 return 0;
784}
785
786static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
787 struct sparx5_port *port,
788 struct sparx5_port_config *conf)
789{
790 u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
791 u32 pix = sparx5_port_dev_index(port->portno);
792 u32 dev = sparx5_to_high_dev(port->portno);
793 u32 pcs = sparx5_to_pcs_dev(port->portno);
794 void __iomem *devinst;
795 void __iomem *pcsinst;
796 int err;
797
798 devinst = spx5_inst_get(sparx5, dev, pix);
799 pcsinst = spx5_inst_get(sparx5, pcs, pix);
800
801 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
802 err = sparx5_serdes_set(sparx5, port, conf);
803 if (err)
804 return -EINVAL;
805 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
806 /* Enable PCS for 25G device, speed 25G */
807 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
808 DEV25G_PCS25G_CFG_PCS25G_ENA,
809 sparx5,
810 DEV25G_PCS25G_CFG(pix));
811 } else {
812 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
813 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
814 PCS10G_BR_PCS_CFG_PCS_ENA,
815 pcsinst,
816 PCS10G_BR_PCS_CFG(0));
817 }
818
819 /* Enable 5G/10G/25G MAC module */
820 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
821 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
822 devinst,
823 DEV10G_MAC_ENA_CFG(0));
824
825 /* Take the device out of reset */
826 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
827 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
828 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
829 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
830 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
831 DEV10G_DEV_RST_CTRL_PCS_RX_RST |
832 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
833 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
834 DEV10G_DEV_RST_CTRL_MAC_TX_RST |
835 DEV10G_DEV_RST_CTRL_SPEED_SEL,
836 devinst,
837 DEV10G_DEV_RST_CTRL(0));
838
839 return 0;
840}
841
842/* Switch between 1G/2500 and 5G/10G/25G devices */
843static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
844{
845 int bt_indx = BIT(sparx5_port_dev_index(port));
846
847 if (sparx5_port_is_5g(port)) {
848 spx5_rmw(hsd ? 0 : bt_indx,
849 bt_indx,
850 sparx5,
851 PORT_CONF_DEV5G_MODES);
852 } else if (sparx5_port_is_10g(port)) {
853 spx5_rmw(hsd ? 0 : bt_indx,
854 bt_indx,
855 sparx5,
856 PORT_CONF_DEV10G_MODES);
857 } else if (sparx5_port_is_25g(port)) {
858 spx5_rmw(hsd ? 0 : bt_indx,
859 bt_indx,
860 sparx5,
861 PORT_CONF_DEV25G_MODES);
862 }
863}
864
865/* Configure speed/duplex dependent registers */
866static int sparx5_port_config_low_set(struct sparx5 *sparx5,
867 struct sparx5_port *port,
868 struct sparx5_port_config *conf)
869{
870 u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
871 bool fdx = conf->duplex == DUPLEX_FULL;
872 int spd = conf->speed;
873
874 clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
875 gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
876 tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
877 hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
878 hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
879
880 /* GIG/FDX mode */
881 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
882 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
883 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
884 DEV2G5_MAC_MODE_CFG_FDX_ENA,
885 sparx5,
886 DEV2G5_MAC_MODE_CFG(port->portno));
887
888 /* Set MAC IFG Gaps */
889 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
890 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
891 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
892 sparx5,
893 DEV2G5_MAC_IFG_CFG(port->portno));
894
895 /* Disabling frame aging when in HDX (due to HDX issue) */
896 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
897 HSCH_PORT_MODE_AGE_DIS,
898 sparx5,
899 HSCH_PORT_MODE(port->portno));
900
901 /* Enable MAC module */
902 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
903 DEV2G5_MAC_ENA_CFG_TX_ENA,
904 sparx5,
905 DEV2G5_MAC_ENA_CFG(port->portno));
906
907 /* Select speed and take MAC out of reset */
908 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
909 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
910 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
911 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
912 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
913 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
914 sparx5,
915 DEV2G5_DEV_RST_CTRL(port->portno));
916
917 return 0;
918}
919
920int sparx5_port_pcs_set(struct sparx5 *sparx5,
921 struct sparx5_port *port,
922 struct sparx5_port_config *conf)
923
924{
925 bool high_speed_dev = sparx5_is_baser(conf->portmode);
926 int err;
927
928 if (sparx5_dev_change(sparx5, port, conf)) {
929 /* switch device */
930 sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
931
932 /* Disable the not-in-use device */
933 err = sparx5_port_disable(sparx5, port, !high_speed_dev);
934 if (err)
935 return err;
936 }
937 /* Disable the port before re-configuring */
938 err = sparx5_port_disable(sparx5, port, high_speed_dev);
939 if (err)
940 return -EINVAL;
941
942 if (high_speed_dev)
943 err = sparx5_port_pcs_high_set(sparx5, port, conf);
944 else
945 err = sparx5_port_pcs_low_set(sparx5, port, conf);
946
947 if (err)
948 return -EINVAL;
949
950 if (port->conf.inband) {
951 /* Enable/disable 1G counters in ASM */
952 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
953 ASM_PORT_CFG_CSC_STAT_DIS,
954 sparx5,
955 ASM_PORT_CFG(port->portno));
956
957 /* Enable/disable 1G counters in DSM */
958 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
959 DSM_BUF_CFG_CSC_STAT_DIS,
960 sparx5,
961 DSM_BUF_CFG(port->portno));
962 }
963
964 port->conf = *conf;
965
966 return 0;
967}
968
969int sparx5_port_config(struct sparx5 *sparx5,
970 struct sparx5_port *port,
971 struct sparx5_port_config *conf)
972{
973 bool high_speed_dev = sparx5_is_baser(conf->portmode);
974 int err, urgency, stop_wm;
975
976 err = sparx5_port_verify_speed(sparx5, port, conf);
977 if (err)
978 return err;
979
980 /* high speed device is already configured */
981 if (!high_speed_dev)
982 sparx5_port_config_low_set(sparx5, port, conf);
983
984 /* Configure flow control */
985 err = sparx5_port_fc_setup(sparx5, port, conf);
986 if (err)
987 return err;
988
989 /* Set the DSM stop watermark */
990 stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
991 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
992 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
993 sparx5,
994 DSM_DEV_TX_STOP_WM_CFG(port->portno));
995
996 /* Enable port in queue system */
997 urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
998 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
999 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1000 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1001 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1002 sparx5,
1003 QFWD_SWITCH_PORT_MODE(port->portno));
1004
1005 /* Save the new values */
1006 port->conf = *conf;
1007
1008 return 0;
1009}
1010
1011/* Initialize port config to default */
1012int sparx5_port_init(struct sparx5 *sparx5,
1013 struct sparx5_port *port,
1014 struct sparx5_port_config *conf)
1015{
1016 u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1017 u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1018 u32 devhigh = sparx5_to_high_dev(port->portno);
1019 u32 pix = sparx5_port_dev_index(port->portno);
1020 u32 pcs = sparx5_to_pcs_dev(port->portno);
1021 bool sd_pol = port->signd_active_high;
1022 bool sd_sel = !port->signd_internal;
1023 bool sd_ena = port->signd_enable;
1024 u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1025 void __iomem *devinst;
1026 void __iomem *pcsinst;
1027 int err;
1028
1029 devinst = spx5_inst_get(sparx5, devhigh, pix);
1030 pcsinst = spx5_inst_get(sparx5, pcs, pix);
1031
1032 /* Set the mux port mode */
1033 err = sparx5_port_mux_set(sparx5, port, conf);
1034 if (err)
1035 return err;
1036
1037 /* Configure MAC vlan awareness */
1038 err = sparx5_port_max_tags_set(sparx5, port);
1039 if (err)
1040 return err;
1041
1042 /* Set Max Length */
1043 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1044 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1045 sparx5,
1046 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1047
1048 /* 1G/2G5: Signal Detect configuration */
1049 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1050 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1051 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1052 sparx5,
1053 DEV2G5_PCS1G_SD_CFG(port->portno));
1054
1055 /* Set Pause WM hysteresis */
1056 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1057 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1058 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1059 QSYS_PAUSE_CFG_PAUSE_START |
1060 QSYS_PAUSE_CFG_PAUSE_STOP |
1061 QSYS_PAUSE_CFG_PAUSE_ENA,
1062 sparx5,
1063 QSYS_PAUSE_CFG(port->portno));
1064
1065 /* Port ATOP. Frames are tail dropped when this WM is hit */
1066 spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1067 sparx5,
1068 QSYS_ATOP(port->portno));
1069
1070 /* Discard pause frame 01-80-C2-00-00-01 */
1071 spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1072
1073 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1074 conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1075 err = sparx5_serdes_set(sparx5, port, conf);
1076 if (err)
1077 return err;
1078
1079 if (!sparx5_port_is_2g5(port->portno))
1080 /* Enable shadow device */
1081 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1082 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1083 sparx5,
1084 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1085
1086 sparx5_dev_switch(sparx5, port->portno, false);
1087 }
1088 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1089 // All ports must be PCS enabled in QSGMII mode
1090 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1091 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1092 sparx5,
1093 DEV2G5_DEV_RST_CTRL(port->portno));
1094 }
1095 /* Default IFGs for 1G */
1096 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1097 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1098 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1099 sparx5,
1100 DEV2G5_MAC_IFG_CFG(port->portno));
1101
1102 if (sparx5_port_is_2g5(port->portno))
1103 return 0; /* Low speed device only - return */
1104
1105 /* Now setup the high speed device */
1106 if (conf->portmode == PHY_INTERFACE_MODE_NA)
1107 conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1108
1109 if (sparx5_is_baser(conf->portmode))
1110 sparx5_dev_switch(sparx5, port->portno, true);
1111
1112 /* Set Max Length */
1113 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1114 DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1115 devinst,
1116 DEV10G_MAC_ENA_CFG(0));
1117
1118 /* Handle Signal Detect in 10G PCS */
1119 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1120 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1121 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1122 pcsinst,
1123 PCS10G_BR_PCS_SD_CFG(0));
1124
1125 if (sparx5_port_is_25g(port->portno)) {
1126 /* Handle Signal Detect in 25G PCS */
1127 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1128 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1129 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1130 sparx5,
1131 DEV25G_PCS25G_SD_CFG(pix));
1132 }
1133
1134 return 0;
1135}
1136
1137void sparx5_port_enable(struct sparx5_port *port, bool enable)
1138{
1139 struct sparx5 *sparx5 = port->sparx5;
1140
1141 /* Enable port for frame transfer? */
1142 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1143 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1144 sparx5,
1145 QFWD_SWITCH_PORT_MODE(port->portno));
1146}