Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include <linux/module.h>
8#include <linux/phy/phy.h>
9#include <net/dcbnl.h>
10
11#include "sparx5_main_regs.h"
12#include "sparx5_main.h"
13#include "sparx5_port.h"
14
15#define SPX5_ETYPE_TAG_C 0x8100
16#define SPX5_ETYPE_TAG_S 0x88a8
17
18#define SPX5_WAIT_US 1000
19#define SPX5_WAIT_MAX_US 2000
20
21enum port_error {
22 SPX5_PERR_SPEED,
23 SPX5_PERR_IFTYPE,
24};
25
26#define PAUSE_DISCARD 0xC
27#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
28
29static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
30{
31 status->an_complete = true;
32 if (!(lp_abil & LPA_SGMII_LINK)) {
33 status->link = false;
34 return;
35 }
36
37 switch (lp_abil & LPA_SGMII_SPD_MASK) {
38 case LPA_SGMII_10:
39 status->speed = SPEED_10;
40 break;
41 case LPA_SGMII_100:
42 status->speed = SPEED_100;
43 break;
44 case LPA_SGMII_1000:
45 status->speed = SPEED_1000;
46 break;
47 default:
48 status->link = false;
49 return;
50 }
51 if (lp_abil & LPA_SGMII_FULL_DUPLEX)
52 status->duplex = DUPLEX_FULL;
53 else
54 status->duplex = DUPLEX_HALF;
55}
56
57static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
58{
59 status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
60 status->an_complete = true;
61 status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
62 DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
63
64 if ((ld_abil & ADVERTISE_1000XPAUSE) &&
65 (lp_abil & ADVERTISE_1000XPAUSE)) {
66 status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
67 } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
68 (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
69 status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
70 MLO_PAUSE_TX : 0;
71 status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
72 MLO_PAUSE_RX : 0;
73 } else {
74 status->pause = MLO_PAUSE_NONE;
75 }
76}
77
78static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
79 struct sparx5_port *port,
80 struct sparx5_port_status *status)
81{
82 u32 portno = port->portno;
83 u16 lp_adv, ld_adv;
84 u32 value;
85
86 /* Get PCS Link down sticky */
87 value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
88 status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
89 if (status->link_down) /* Clear the sticky */
90 spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
91
92 /* Get both current Link and Sync status */
93 value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
94 status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
95 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
96
97 if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
98 status->speed = SPEED_1000;
99 else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
100 status->speed = SPEED_2500;
101
102 status->duplex = DUPLEX_FULL;
103
104 /* Get PCS ANEG status register */
105 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
106
107 /* Aneg complete provides more information */
108 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
109 lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
110 if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
111 decode_sgmii_word(lp_adv, status);
112 } else {
113 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
114 ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
115 decode_cl37_word(lp_adv, ld_adv, status);
116 }
117 }
118 return 0;
119}
120
121static int sparx5_get_sfi_status(struct sparx5 *sparx5,
122 struct sparx5_port *port,
123 struct sparx5_port_status *status)
124{
125 bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
126 u32 portno = port->portno;
127 u32 value, dev, tinst;
128 void __iomem *inst;
129
130 if (!high_speed_dev) {
131 netdev_err(port->ndev, "error: low speed and SFI mode\n");
132 return -EINVAL;
133 }
134
135 dev = sparx5_to_high_dev(sparx5, portno);
136 tinst = sparx5_port_dev_index(sparx5, portno);
137 inst = spx5_inst_get(sparx5, dev, tinst);
138
139 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
140 if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
141 /* The link is or has been down. Clear the sticky bit */
142 status->link_down = 1;
143 spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
145 }
146 status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
147 status->duplex = DUPLEX_FULL;
148 if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
149 status->speed = SPEED_5000;
150 else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
151 status->speed = SPEED_10000;
152 else
153 status->speed = SPEED_25000;
154
155 return 0;
156}
157
158/* Get link status of 1000Base-X/in-band and SFI ports.
159 */
160int sparx5_get_port_status(struct sparx5 *sparx5,
161 struct sparx5_port *port,
162 struct sparx5_port_status *status)
163{
164 memset(status, 0, sizeof(*status));
165 status->speed = port->conf.speed;
166 if (port->conf.power_down) {
167 status->link = false;
168 return 0;
169 }
170 switch (port->conf.portmode) {
171 case PHY_INTERFACE_MODE_SGMII:
172 case PHY_INTERFACE_MODE_QSGMII:
173 case PHY_INTERFACE_MODE_1000BASEX:
174 case PHY_INTERFACE_MODE_2500BASEX:
175 return sparx5_get_dev2g5_status(sparx5, port, status);
176 case PHY_INTERFACE_MODE_5GBASER:
177 case PHY_INTERFACE_MODE_10GBASER:
178 case PHY_INTERFACE_MODE_25GBASER:
179 return sparx5_get_sfi_status(sparx5, port, status);
180 case PHY_INTERFACE_MODE_NA:
181 return 0;
182 default:
183 netdev_err(port->ndev, "Status not supported");
184 return -ENODEV;
185 }
186 return 0;
187}
188
189static int sparx5_port_error(struct sparx5_port *port,
190 struct sparx5_port_config *conf,
191 enum port_error errtype)
192{
193 switch (errtype) {
194 case SPX5_PERR_SPEED:
195 netdev_err(port->ndev,
196 "Interface does not support speed: %u: for %s\n",
197 conf->speed, phy_modes(conf->portmode));
198 break;
199 case SPX5_PERR_IFTYPE:
200 netdev_err(port->ndev,
201 "Switch port does not support interface type: %s\n",
202 phy_modes(conf->portmode));
203 break;
204 default:
205 netdev_err(port->ndev,
206 "Interface configuration error\n");
207 }
208
209 return -EINVAL;
210}
211
212static int sparx5_port_verify_speed(struct sparx5 *sparx5,
213 struct sparx5_port *port,
214 struct sparx5_port_config *conf)
215{
216 const struct sparx5_ops *ops = sparx5->data->ops;
217
218 if ((ops->is_port_2g5(port->portno) &&
219 conf->speed > SPEED_2500) ||
220 (ops->is_port_5g(port->portno) &&
221 conf->speed > SPEED_5000) ||
222 (ops->is_port_10g(port->portno) &&
223 conf->speed > SPEED_10000))
224 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
225
226 switch (conf->portmode) {
227 case PHY_INTERFACE_MODE_NA:
228 return -EINVAL;
229 case PHY_INTERFACE_MODE_1000BASEX:
230 if (conf->speed != SPEED_1000 ||
231 ops->is_port_2g5(port->portno))
232 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
233 if (ops->is_port_2g5(port->portno))
234 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
235 break;
236 case PHY_INTERFACE_MODE_2500BASEX:
237 if (conf->speed != SPEED_2500 ||
238 ops->is_port_2g5(port->portno))
239 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
240 break;
241 case PHY_INTERFACE_MODE_QSGMII:
242 if (port->portno > 47)
243 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
244 fallthrough;
245 case PHY_INTERFACE_MODE_SGMII:
246 if (conf->speed != SPEED_1000 &&
247 conf->speed != SPEED_100 &&
248 conf->speed != SPEED_10 &&
249 conf->speed != SPEED_2500)
250 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
251 break;
252 case PHY_INTERFACE_MODE_5GBASER:
253 case PHY_INTERFACE_MODE_10GBASER:
254 case PHY_INTERFACE_MODE_25GBASER:
255 if ((conf->speed != SPEED_5000 &&
256 conf->speed != SPEED_10000 &&
257 conf->speed != SPEED_25000))
258 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
259 break;
260 default:
261 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
262 }
263 return 0;
264}
265
266static bool sparx5_dev_change(struct sparx5 *sparx5,
267 struct sparx5_port *port,
268 struct sparx5_port_config *conf)
269{
270 return sparx5_is_baser(port->conf.portmode) ^
271 sparx5_is_baser(conf->portmode);
272}
273
274static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
275{
276 u32 value, resource, prio, delay_cnt = 0;
277 bool poll_src = true;
278 char *mem = "";
279
280 /* Resource == 0: Memory tracked per source (SRC-MEM)
281 * Resource == 1: Frame references tracked per source (SRC-REF)
282 * Resource == 2: Memory tracked per destination (DST-MEM)
283 * Resource == 3: Frame references tracked per destination. (DST-REF)
284 */
285 while (1) {
286 bool empty = true;
287
288 for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
289 u32 base;
290
291 base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
292 for (prio = 0; prio < SPX5_PRIOS; prio++) {
293 value = spx5_rd(sparx5,
294 QRES_RES_STAT(base + prio));
295 if (value) {
296 mem = resource == 0 ?
297 "DST-MEM" : "SRC-MEM";
298 empty = false;
299 }
300 }
301 }
302
303 if (empty)
304 break;
305
306 if (delay_cnt++ == 2000) {
307 dev_err(sparx5->dev,
308 "Flush timeout port %u. %s queue not empty\n",
309 portno, mem);
310 return -EINVAL;
311 }
312
313 usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
314 }
315 return 0;
316}
317
318static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
319{
320 u32 tinst = high_spd_dev ?
321 sparx5_port_dev_index(sparx5, port->portno) : port->portno;
322 u32 dev = high_spd_dev ?
323 sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5;
324 void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
325 const struct sparx5_ops *ops = sparx5->data->ops;
326 u32 spd = port->conf.speed;
327 u32 spd_prm;
328 int err;
329
330 if (high_spd_dev) {
331 /* 1: Reset the PCS Rx clock domain */
332 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
333 DEV10G_DEV_RST_CTRL_PCS_RX_RST,
334 devinst,
335 DEV10G_DEV_RST_CTRL(0));
336
337 /* 2: Disable MAC frame reception */
338 spx5_inst_rmw(0,
339 DEV10G_MAC_ENA_CFG_RX_ENA,
340 devinst,
341 DEV10G_MAC_ENA_CFG(0));
342 } else {
343 /* 1: Reset the PCS Rx clock domain */
344 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
345 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
346 devinst,
347 DEV2G5_DEV_RST_CTRL(0));
348 /* 2: Disable MAC frame reception */
349 spx5_inst_rmw(0,
350 DEV2G5_MAC_ENA_CFG_RX_ENA,
351 devinst,
352 DEV2G5_MAC_ENA_CFG(0));
353 }
354 /* 3: Disable traffic being sent to or from switch port->portno */
355 spx5_rmw(0,
356 QFWD_SWITCH_PORT_MODE_PORT_ENA,
357 sparx5,
358 QFWD_SWITCH_PORT_MODE(port->portno));
359
360 /* 4: Disable dequeuing from the egress queues */
361 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
362 HSCH_PORT_MODE_DEQUEUE_DIS,
363 sparx5,
364 HSCH_PORT_MODE(port->portno));
365
366 /* 5: Disable Flowcontrol */
367 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
368 QSYS_PAUSE_CFG_PAUSE_STOP,
369 sparx5,
370 QSYS_PAUSE_CFG(port->portno));
371
372 spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
373 /* 6: Wait while the last frame is exiting the queues */
374 usleep_range(8 * spd_prm, 10 * spd_prm);
375
376 /* 7: Flush the queues associated with the port->portno */
377 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
378 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
379 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
380 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
381 HSCH_FLUSH_CTRL_FLUSH_PORT |
382 HSCH_FLUSH_CTRL_FLUSH_DST |
383 HSCH_FLUSH_CTRL_FLUSH_SRC |
384 HSCH_FLUSH_CTRL_FLUSH_ENA,
385 sparx5,
386 HSCH_FLUSH_CTRL);
387
388 /* 8: Enable dequeuing from the egress queues */
389 spx5_rmw(0,
390 HSCH_PORT_MODE_DEQUEUE_DIS,
391 sparx5,
392 HSCH_PORT_MODE(port->portno));
393
394 /* 9: Wait until flushing is complete */
395 err = sparx5_port_flush_poll(sparx5, port->portno);
396 if (err)
397 return err;
398
399 /* 10: Reset the MAC clock domain */
400 if (high_spd_dev) {
401 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
402 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
403 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
404 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
405 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
406 DEV10G_DEV_RST_CTRL_MAC_TX_RST,
407 devinst,
408 DEV10G_DEV_RST_CTRL(0));
409
410 } else {
411 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
412 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
413 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
414 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
415 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
416 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
417 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
418 DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
419 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
420 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
421 devinst,
422 DEV2G5_DEV_RST_CTRL(0));
423 }
424 /* 11: Clear flushing */
425 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
426 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
427 HSCH_FLUSH_CTRL_FLUSH_PORT |
428 HSCH_FLUSH_CTRL_FLUSH_ENA,
429 sparx5,
430 HSCH_FLUSH_CTRL);
431
432 if (high_spd_dev) {
433 u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
434 void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
435
436 /* 12: Disable 5G/10G/25 BaseR PCS */
437 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
438 PCS10G_BR_PCS_CFG_PCS_ENA,
439 pcsinst,
440 PCS10G_BR_PCS_CFG(0));
441
442 if (ops->is_port_25g(port->portno))
443 /* Disable 25G PCS */
444 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
445 DEV25G_PCS25G_CFG_PCS25G_ENA,
446 sparx5,
447 DEV25G_PCS25G_CFG(tinst));
448 } else {
449 /* 12: Disable 1G PCS */
450 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
451 DEV2G5_PCS1G_CFG_PCS_ENA,
452 sparx5,
453 DEV2G5_PCS1G_CFG(port->portno));
454 }
455
456 /* The port is now flushed and disabled */
457 return 0;
458}
459
460static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
461 u32 portno, u32 speed)
462{
463 u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
464 const u32 taxi_dist[SPX5_PORTS_ALL] = {
465 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
466 4, 4, 4, 4,
467 11, 12, 13, 14, 15, 16, 17, 18,
468 11, 12, 13, 14, 15, 16, 17, 18,
469 11, 12, 13, 14, 15, 16, 17, 18,
470 11, 12, 13, 14, 15, 16, 17, 18,
471 4, 6, 8, 4, 6, 8, 6, 8,
472 2, 2, 2, 2, 2, 2, 2, 4, 2
473 };
474 u32 mac_per = 6400, tmp1, tmp2, tmp3;
475 u32 fifo_width = 16;
476 u32 mac_width = 8;
477 u32 addition = 0;
478
479 if (!is_sparx5(sparx5))
480 return 0;
481
482 switch (speed) {
483 case SPEED_25000:
484 return 0;
485 case SPEED_10000:
486 mac_per = 6400;
487 mac_width = 8;
488 addition = 1;
489 break;
490 case SPEED_5000:
491 mac_per = 12800;
492 mac_width = 8;
493 addition = 0;
494 break;
495 case SPEED_2500:
496 mac_per = 3200;
497 mac_width = 1;
498 addition = 0;
499 break;
500 case SPEED_1000:
501 mac_per = 8000;
502 mac_width = 1;
503 addition = 0;
504 break;
505 case SPEED_100:
506 case SPEED_10:
507 return 1;
508 default:
509 break;
510 }
511
512 tmp1 = 1000 * mac_width / fifo_width;
513 tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
514 * sys_clk / mac_per);
515 tmp3 = tmp1 * tmp2 / 1000;
516 return (tmp3 + 2000 + 999) / 1000 + addition;
517}
518
519/* Configure port muxing:
520 * QSGMII: 4x2G5 devices
521 */
522int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
523 struct sparx5_port_config *conf)
524{
525 u32 portno = port->portno;
526 u32 inst;
527
528 if (port->conf.portmode == conf->portmode)
529 return 0; /* Nothing to do */
530
531 switch (conf->portmode) {
532 case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
533 inst = (portno - portno % 4) / 4;
534 spx5_rmw(BIT(inst),
535 BIT(inst),
536 sparx5,
537 PORT_CONF_QSGMII_ENA);
538
539 if ((portno / 4 % 2) == 0) {
540 /* Affects d0-d3,d8-d11..d40-d43 */
541 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
542 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
543 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
544 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
545 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
546 PORT_CONF_USGMII_CFG_QUAD_MODE,
547 sparx5,
548 PORT_CONF_USGMII_CFG((portno / 8)));
549 }
550 break;
551 default:
552 break;
553 }
554 return 0;
555}
556
557static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
558 struct sparx5_port *port)
559{
560 enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
561 int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
562 max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
563 bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
564 enum sparx5_vlan_port_type vlan_type = port->vlan_type;
565 bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
566 u32 dev = sparx5_to_high_dev(sparx5, port->portno);
567 u32 tinst = sparx5_port_dev_index(sparx5, port->portno);
568 void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
569 const struct sparx5_ops *ops = sparx5->data->ops;
570 u32 etype;
571
572 etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
573 port->custom_etype :
574 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
575 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
576
577 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
578 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
579 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
580 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
581 sparx5,
582 DEV2G5_MAC_TAGS_CFG(port->portno));
583
584 if (ops->is_port_2g5(port->portno))
585 return 0;
586
587 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
588 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
589 DEV10G_MAC_TAGS_CFG_TAG_ID |
590 DEV10G_MAC_TAGS_CFG_TAG_ENA,
591 inst,
592 DEV10G_MAC_TAGS_CFG(0, 0));
593
594 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
595 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
596 inst,
597 DEV10G_MAC_NUM_TAGS_CFG(0));
598
599 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
600 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
601 inst,
602 DEV10G_MAC_MAXLEN_CFG(0));
603 return 0;
604}
605
606int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
607{
608 u32 clk_period_ps = 1600; /* 625Mhz for now */
609 u32 urg = 672000;
610
611 switch (speed) {
612 case SPEED_10:
613 case SPEED_100:
614 case SPEED_1000:
615 urg = 672000;
616 break;
617 case SPEED_2500:
618 urg = 270000;
619 break;
620 case SPEED_5000:
621 urg = 135000;
622 break;
623 case SPEED_10000:
624 urg = 67200;
625 break;
626 case SPEED_25000:
627 urg = 27000;
628 break;
629 }
630 return urg / clk_period_ps - 1;
631}
632
633static u16 sparx5_wm_enc(u16 value)
634{
635 if (value >= 2048)
636 return 2048 + value / 16;
637
638 return value;
639}
640
641static int sparx5_port_fc_setup(struct sparx5 *sparx5,
642 struct sparx5_port *port,
643 struct sparx5_port_config *conf)
644{
645 bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
646 u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
647
648 if (conf->pause & MLO_PAUSE_TX)
649 pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
650 SPX5_BUFFER_CELL_SZ));
651
652 /* Set HDX flowcontrol */
653 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
654 DSM_MAC_CFG_HDX_BACKPREASSURE,
655 sparx5,
656 DSM_MAC_CFG(port->portno));
657
658 /* Obey flowcontrol */
659 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
660 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
661 sparx5,
662 DSM_RX_PAUSE_CFG(port->portno));
663
664 /* Disable forward pressure */
665 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
666 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
667 sparx5,
668 QSYS_FWD_PRESSURE(port->portno));
669
670 /* Generate pause frames */
671 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
672 QSYS_PAUSE_CFG_PAUSE_STOP,
673 sparx5,
674 QSYS_PAUSE_CFG(port->portno));
675
676 return 0;
677}
678
679static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
680{
681 if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
682 return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
683 else
684 return 1; /* Enable SGMII Aneg */
685}
686
687int sparx5_serdes_set(struct sparx5 *sparx5,
688 struct sparx5_port *port,
689 struct sparx5_port_config *conf)
690{
691 int portmode, err, speed = conf->speed;
692
693 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
694 ((port->portno % 4) != 0)) {
695 return 0;
696 }
697 if (sparx5_is_baser(conf->portmode)) {
698 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
699 speed = SPEED_25000;
700 else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
701 speed = SPEED_10000;
702 else
703 speed = SPEED_5000;
704 }
705
706 err = phy_set_media(port->serdes, conf->media);
707 if (err)
708 return err;
709 if (speed > 0) {
710 err = phy_set_speed(port->serdes, speed);
711 if (err)
712 return err;
713 }
714 if (conf->serdes_reset) {
715 err = phy_reset(port->serdes);
716 if (err)
717 return err;
718 }
719
720 /* Configure SerDes with port parameters
721 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
722 */
723 portmode = conf->portmode;
724 if (sparx5_is_baser(conf->portmode))
725 portmode = PHY_INTERFACE_MODE_10GBASER;
726 err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
727 if (err)
728 return err;
729 conf->serdes_reset = false;
730 return err;
731}
732
733static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
734 struct sparx5_port *port,
735 struct sparx5_port_config *conf)
736{
737 bool sgmii = false, inband_aneg = false;
738 int err;
739
740 if (conf->inband) {
741 if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
742 conf->portmode == PHY_INTERFACE_MODE_QSGMII)
743 inband_aneg = true; /* Cisco-SGMII in-band-aneg */
744 else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
745 conf->autoneg)
746 inband_aneg = true; /* Clause-37 in-band-aneg */
747
748 err = sparx5_serdes_set(sparx5, port, conf);
749 if (err)
750 return -EINVAL;
751 } else {
752 sgmii = true; /* Phy is connected to the MAC */
753 }
754
755 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
756 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
757 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
758 sparx5,
759 DEV2G5_PCS1G_MODE_CFG(port->portno));
760
761 /* Enable PCS */
762 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
763 sparx5,
764 DEV2G5_PCS1G_CFG(port->portno));
765
766 if (inband_aneg) {
767 u16 abil = sparx5_get_aneg_word(conf);
768
769 /* Enable in-band aneg */
770 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
771 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
772 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
773 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
774 sparx5,
775 DEV2G5_PCS1G_ANEG_CFG(port->portno));
776 } else {
777 spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
778 }
779
780 /* Take PCS out of reset */
781 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
782 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
783 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
784 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
785 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
786 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
787 sparx5,
788 DEV2G5_DEV_RST_CTRL(port->portno));
789
790 return 0;
791}
792
793static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
794 struct sparx5_port *port,
795 struct sparx5_port_config *conf)
796{
797 u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
798 u32 pix = sparx5_port_dev_index(sparx5, port->portno);
799 u32 dev = sparx5_to_high_dev(sparx5, port->portno);
800 u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
801 void __iomem *devinst;
802 void __iomem *pcsinst;
803 int err;
804
805 devinst = spx5_inst_get(sparx5, dev, pix);
806 pcsinst = spx5_inst_get(sparx5, pcs, pix);
807
808 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
809 err = sparx5_serdes_set(sparx5, port, conf);
810 if (err)
811 return -EINVAL;
812 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
813 /* Enable PCS for 25G device, speed 25G */
814 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
815 DEV25G_PCS25G_CFG_PCS25G_ENA,
816 sparx5,
817 DEV25G_PCS25G_CFG(pix));
818 } else {
819 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
820 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
821 PCS10G_BR_PCS_CFG_PCS_ENA,
822 pcsinst,
823 PCS10G_BR_PCS_CFG(0));
824 }
825
826 /* Enable 5G/10G/25G MAC module */
827 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
828 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
829 devinst,
830 DEV10G_MAC_ENA_CFG(0));
831
832 /* Take the device out of reset */
833 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
834 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
835 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
836 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
837 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
838 DEV10G_DEV_RST_CTRL_PCS_RX_RST |
839 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
840 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
841 DEV10G_DEV_RST_CTRL_MAC_TX_RST |
842 DEV10G_DEV_RST_CTRL_SPEED_SEL,
843 devinst,
844 DEV10G_DEV_RST_CTRL(0));
845
846 return 0;
847}
848
849/* Switch between 1G/2500 and 5G/10G/25G devices */
850static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
851{
852 const struct sparx5_ops *ops = sparx5->data->ops;
853 int bt_indx;
854
855 bt_indx = BIT(ops->get_port_dev_bit(sparx5, port));
856
857 if (ops->is_port_5g(port)) {
858 spx5_rmw(hsd ? 0 : bt_indx,
859 bt_indx,
860 sparx5,
861 PORT_CONF_DEV5G_MODES);
862 } else if (ops->is_port_10g(port)) {
863 spx5_rmw(hsd ? 0 : bt_indx,
864 bt_indx,
865 sparx5,
866 PORT_CONF_DEV10G_MODES);
867 } else if (ops->is_port_25g(port)) {
868 spx5_rmw(hsd ? 0 : bt_indx,
869 bt_indx,
870 sparx5,
871 PORT_CONF_DEV25G_MODES);
872 }
873}
874
875/* Configure speed/duplex dependent registers */
876static int sparx5_port_config_low_set(struct sparx5 *sparx5,
877 struct sparx5_port *port,
878 struct sparx5_port_config *conf)
879{
880 u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
881 bool fdx = conf->duplex == DUPLEX_FULL;
882 int spd = conf->speed;
883
884 clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
885 gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
886 tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
887 hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
888 hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
889
890 /* GIG/FDX mode */
891 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
892 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
893 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
894 DEV2G5_MAC_MODE_CFG_FDX_ENA,
895 sparx5,
896 DEV2G5_MAC_MODE_CFG(port->portno));
897
898 /* Set MAC IFG Gaps */
899 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
900 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
901 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
902 sparx5,
903 DEV2G5_MAC_IFG_CFG(port->portno));
904
905 /* Disabling frame aging when in HDX (due to HDX issue) */
906 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
907 HSCH_PORT_MODE_AGE_DIS,
908 sparx5,
909 HSCH_PORT_MODE(port->portno));
910
911 /* Enable MAC module */
912 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
913 DEV2G5_MAC_ENA_CFG_TX_ENA,
914 sparx5,
915 DEV2G5_MAC_ENA_CFG(port->portno));
916
917 /* Select speed and take MAC out of reset */
918 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
919 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
920 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
921 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
922 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
923 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
924 sparx5,
925 DEV2G5_DEV_RST_CTRL(port->portno));
926
927 /* Enable PHAD_CTRL for better timestamping */
928 if (!is_sparx5(sparx5)) {
929 for (int i = 0; i < 2; ++i) {
930 /* Divide the port clock by three for the two
931 * phase detection registers.
932 */
933 spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
934 DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
935 DEV2G5_PHAD_CTRL_DIV_CFG |
936 DEV2G5_PHAD_CTRL_PHAD_ENA,
937 sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
938 }
939 }
940
941 return 0;
942}
943
944int sparx5_port_pcs_set(struct sparx5 *sparx5,
945 struct sparx5_port *port,
946 struct sparx5_port_config *conf)
947
948{
949 bool high_speed_dev = sparx5_is_baser(conf->portmode);
950 int err;
951
952 if (sparx5_dev_change(sparx5, port, conf)) {
953 /* switch device */
954 sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
955
956 /* Disable the not-in-use device */
957 err = sparx5_port_disable(sparx5, port, !high_speed_dev);
958 if (err)
959 return err;
960 }
961 /* Disable the port before re-configuring */
962 err = sparx5_port_disable(sparx5, port, high_speed_dev);
963 if (err)
964 return -EINVAL;
965
966 if (high_speed_dev)
967 err = sparx5_port_pcs_high_set(sparx5, port, conf);
968 else
969 err = sparx5_port_pcs_low_set(sparx5, port, conf);
970
971 if (err)
972 return -EINVAL;
973
974 if (conf->inband) {
975 /* Enable/disable 1G counters in ASM */
976 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
977 ASM_PORT_CFG_CSC_STAT_DIS,
978 sparx5,
979 ASM_PORT_CFG(port->portno));
980
981 /* Enable/disable 1G counters in DSM */
982 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
983 DSM_BUF_CFG_CSC_STAT_DIS,
984 sparx5,
985 DSM_BUF_CFG(port->portno));
986 }
987
988 port->conf = *conf;
989
990 return 0;
991}
992
993int sparx5_port_config(struct sparx5 *sparx5,
994 struct sparx5_port *port,
995 struct sparx5_port_config *conf)
996{
997 bool high_speed_dev = sparx5_is_baser(conf->portmode);
998 const struct sparx5_ops *ops = sparx5->data->ops;
999 int err, urgency, stop_wm;
1000
1001 err = sparx5_port_verify_speed(sparx5, port, conf);
1002 if (err)
1003 return err;
1004
1005 /* high speed device is already configured */
1006 if (!high_speed_dev)
1007 sparx5_port_config_low_set(sparx5, port, conf);
1008
1009 /* Configure flow control */
1010 err = sparx5_port_fc_setup(sparx5, port, conf);
1011 if (err)
1012 return err;
1013
1014 if (!is_sparx5(sparx5) && ops->is_port_10g(port->portno) &&
1015 conf->speed < SPEED_10000)
1016 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1017 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1018 sparx5,
1019 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1020
1021 /* Set the DSM stop watermark */
1022 stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
1023 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
1024 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
1025 sparx5,
1026 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1027
1028 /* Enable port in queue system */
1029 urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
1030 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1031 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1032 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1033 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1034 sparx5,
1035 QFWD_SWITCH_PORT_MODE(port->portno));
1036
1037 /* Save the new values */
1038 port->conf = *conf;
1039
1040 return 0;
1041}
1042
1043/* Initialize port config to default */
1044int sparx5_port_init(struct sparx5 *sparx5,
1045 struct sparx5_port *port,
1046 struct sparx5_port_config *conf)
1047{
1048 u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1049 u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1050 const struct sparx5_ops *ops = sparx5->data->ops;
1051 u32 devhigh = sparx5_to_high_dev(sparx5, port->portno);
1052 u32 pix = sparx5_port_dev_index(sparx5, port->portno);
1053 u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
1054 bool sd_pol = port->signd_active_high;
1055 bool sd_sel = !port->signd_internal;
1056 bool sd_ena = port->signd_enable;
1057 u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1058 void __iomem *devinst;
1059 void __iomem *pcsinst;
1060 int err;
1061
1062 devinst = spx5_inst_get(sparx5, devhigh, pix);
1063 pcsinst = spx5_inst_get(sparx5, pcs, pix);
1064
1065 /* Set the mux port mode */
1066 err = ops->set_port_mux(sparx5, port, conf);
1067 if (err)
1068 return err;
1069
1070 /* Configure MAC vlan awareness */
1071 err = sparx5_port_max_tags_set(sparx5, port);
1072 if (err)
1073 return err;
1074
1075 /* Set Max Length */
1076 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1077 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1078 sparx5,
1079 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1080
1081 /* 1G/2G5: Signal Detect configuration */
1082 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1083 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1084 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1085 sparx5,
1086 DEV2G5_PCS1G_SD_CFG(port->portno));
1087
1088 /* Set Pause WM hysteresis */
1089 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1090 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1091 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1092 QSYS_PAUSE_CFG_PAUSE_START |
1093 QSYS_PAUSE_CFG_PAUSE_STOP |
1094 QSYS_PAUSE_CFG_PAUSE_ENA,
1095 sparx5,
1096 QSYS_PAUSE_CFG(port->portno));
1097
1098 /* Port ATOP. Frames are tail dropped when this WM is hit */
1099 spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1100 sparx5,
1101 QSYS_ATOP(port->portno));
1102
1103 /* Discard pause frame 01-80-C2-00-00-01 */
1104 spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1105
1106 /* Discard SMAC multicast */
1107 spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1108 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1109 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1110
1111 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1112 conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1113 err = sparx5_serdes_set(sparx5, port, conf);
1114 if (err)
1115 return err;
1116
1117 if (!ops->is_port_2g5(port->portno))
1118 /* Enable shadow device */
1119 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1120 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1121 sparx5,
1122 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1123
1124 sparx5_dev_switch(sparx5, port->portno, false);
1125 }
1126 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1127 // All ports must be PCS enabled in QSGMII mode
1128 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1129 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1130 sparx5,
1131 DEV2G5_DEV_RST_CTRL(port->portno));
1132 }
1133 /* Default IFGs for 1G */
1134 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1135 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1136 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1137 sparx5,
1138 DEV2G5_MAC_IFG_CFG(port->portno));
1139
1140 if (ops->is_port_2g5(port->portno))
1141 return 0; /* Low speed device only - return */
1142
1143 /* Now setup the high speed device */
1144 if (conf->portmode == PHY_INTERFACE_MODE_NA)
1145 conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1146
1147 if (sparx5_is_baser(conf->portmode))
1148 sparx5_dev_switch(sparx5, port->portno, true);
1149
1150 /* Set Max Length */
1151 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1152 DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1153 devinst,
1154 DEV10G_MAC_MAXLEN_CFG(0));
1155
1156 /* Handle Signal Detect in 10G PCS */
1157 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1158 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1159 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1160 pcsinst,
1161 PCS10G_BR_PCS_SD_CFG(0));
1162
1163 if (ops->is_port_25g(port->portno)) {
1164 /* Handle Signal Detect in 25G PCS */
1165 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1166 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1167 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1168 sparx5,
1169 DEV25G_PCS25G_SD_CFG(pix));
1170 }
1171
1172 if (!is_sparx5(sparx5)) {
1173 void __iomem *inst;
1174 u32 dev, tinst;
1175
1176 if (ops->is_port_10g(port->portno)) {
1177 dev = sparx5_to_high_dev(sparx5, port->portno);
1178 tinst = sparx5_port_dev_index(sparx5, port->portno);
1179 inst = spx5_inst_get(sparx5, dev, tinst);
1180
1181 spx5_inst_wr(5, inst,
1182 DEV10G_PTP_STAMPER_CFG(port->portno));
1183 } else if (ops->is_port_5g(port->portno)) {
1184 dev = sparx5_to_high_dev(sparx5, port->portno);
1185 tinst = sparx5_port_dev_index(sparx5, port->portno);
1186 inst = spx5_inst_get(sparx5, dev, tinst);
1187
1188 spx5_inst_wr(5, inst,
1189 DEV5G_PTP_STAMPER_CFG(port->portno));
1190 }
1191 }
1192
1193 return 0;
1194}
1195
1196void sparx5_port_enable(struct sparx5_port *port, bool enable)
1197{
1198 struct sparx5 *sparx5 = port->sparx5;
1199
1200 /* Enable port for frame transfer? */
1201 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1202 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1203 sparx5,
1204 QFWD_SWITCH_PORT_MODE(port->portno));
1205}
1206
1207int sparx5_port_qos_set(struct sparx5_port *port,
1208 struct sparx5_port_qos *qos)
1209{
1210 sparx5_port_qos_dscp_set(port, &qos->dscp);
1211 sparx5_port_qos_pcp_set(port, &qos->pcp);
1212 sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1213 sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1214 sparx5_port_qos_default_set(port, qos);
1215
1216 return 0;
1217}
1218
1219int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1220 struct sparx5_port_qos_pcp_rewr *qos)
1221{
1222 int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1223 struct sparx5 *sparx5 = port->sparx5;
1224 u8 pcp, dei;
1225
1226 /* Use mapping table, with classified QoS as index, to map QoS and DP
1227 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1228 * PCP. Classified PCP equals frame PCP.
1229 */
1230 if (qos->enable)
1231 mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1232
1233 spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1234 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1235 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1236 port->sparx5, REW_TAG_CTRL(port->portno));
1237
1238 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1239 /* Extract PCP and DEI */
1240 pcp = qos->map.map[i];
1241 if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1242 dei = 1;
1243 else
1244 dei = 0;
1245
1246 /* Rewrite PCP and DEI, for each classified QoS class and DP
1247 * level. This table is only used if tag ctrl mode is set to
1248 * 'mapped'.
1249 *
1250 * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
1251 * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
1252 */
1253 if (dei) {
1254 spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1255 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1256 REW_PCP_MAP_DE1(port->portno, i));
1257
1258 spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1259 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1260 REW_DEI_MAP_DE1(port->portno, i));
1261 } else {
1262 spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1263 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1264 REW_PCP_MAP_DE0(port->portno, i));
1265
1266 spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1267 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1268 REW_DEI_MAP_DE0(port->portno, i));
1269 }
1270 }
1271
1272 return 0;
1273}
1274
1275int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1276 struct sparx5_port_qos_pcp *qos)
1277{
1278 struct sparx5 *sparx5 = port->sparx5;
1279 u8 *pcp_itr = qos->map.map;
1280 u8 pcp, dp;
1281 int i;
1282
1283 /* Enable/disable pcp and dp for qos classification. */
1284 spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1285 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1286 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1287 sparx5, ANA_CL_QOS_CFG(port->portno));
1288
1289 /* Map each pcp and dei value to priority and dp */
1290 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1291 pcp = *(pcp_itr + i);
1292 dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1293 spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1294 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1295 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1296 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1297 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1298 }
1299
1300 return 0;
1301}
1302
1303void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1304 int mode)
1305{
1306 spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1307 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1308 ANA_CL_QOS_CFG(port->portno));
1309}
1310
1311int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1312 struct sparx5_port_qos_dscp_rewr *qos)
1313{
1314 struct sparx5 *sparx5 = port->sparx5;
1315 bool rewr = false;
1316 u16 dscp;
1317 int i;
1318
1319 /* On egress, rewrite DSCP value to either classified DSCP or frame
1320 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1321 */
1322 if (qos->enable)
1323 rewr = true;
1324
1325 spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1326 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1327 REW_DSCP_MAP(port->portno));
1328
1329 /* On ingress, map each classified QoS class and DP to classified DSCP
1330 * value. This mapping table is global for all ports.
1331 */
1332 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1333 dscp = qos->map.map[i];
1334 spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1335 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1336 ANA_CL_QOS_MAP_CFG(i));
1337 }
1338
1339 return 0;
1340}
1341
1342int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1343 struct sparx5_port_qos_dscp *qos)
1344{
1345 struct sparx5 *sparx5 = port->sparx5;
1346 u8 *dscp = qos->map.map;
1347 int i;
1348
1349 /* Enable/disable dscp and dp for qos classification.
1350 * Disable rewrite of dscp values for now.
1351 */
1352 spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1353 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1354 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1355 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1356 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1357 ANA_CL_QOS_CFG(port->portno));
1358
1359 /* Map each dscp value to priority and dp */
1360 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1361 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1362 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1363 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1364 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1365 ANA_CL_DSCP_CFG(i));
1366 }
1367
1368 /* Set per-dscp trust */
1369 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1370 if (qos->qos_enable) {
1371 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1372 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1373 ANA_CL_DSCP_CFG(i));
1374 }
1375 }
1376
1377 return 0;
1378}
1379
1380int sparx5_port_qos_default_set(const struct sparx5_port *port,
1381 const struct sparx5_port_qos *qos)
1382{
1383 struct sparx5 *sparx5 = port->sparx5;
1384
1385 /* Set default prio and dp level */
1386 spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1387 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1388 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1389 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1390 sparx5, ANA_CL_QOS_CFG(port->portno));
1391
1392 /* Set default pcp and dei for untagged frames */
1393 spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1394 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1395 ANA_CL_VLAN_CTRL_PORT_PCP |
1396 ANA_CL_VLAN_CTRL_PORT_DEI,
1397 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1398
1399 return 0;
1400}
1401
1402int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
1403{
1404 return sparx5->data->consts->n_ports + port;
1405}
1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include <linux/module.h>
8#include <linux/phy/phy.h>
9
10#include "sparx5_main_regs.h"
11#include "sparx5_main.h"
12#include "sparx5_port.h"
13
14#define SPX5_ETYPE_TAG_C 0x8100
15#define SPX5_ETYPE_TAG_S 0x88a8
16
17#define SPX5_WAIT_US 1000
18#define SPX5_WAIT_MAX_US 2000
19
20enum port_error {
21 SPX5_PERR_SPEED,
22 SPX5_PERR_IFTYPE,
23};
24
25#define PAUSE_DISCARD 0xC
26#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
27
28static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
29{
30 status->an_complete = true;
31 if (!(lp_abil & LPA_SGMII_LINK)) {
32 status->link = false;
33 return;
34 }
35
36 switch (lp_abil & LPA_SGMII_SPD_MASK) {
37 case LPA_SGMII_10:
38 status->speed = SPEED_10;
39 break;
40 case LPA_SGMII_100:
41 status->speed = SPEED_100;
42 break;
43 case LPA_SGMII_1000:
44 status->speed = SPEED_1000;
45 break;
46 default:
47 status->link = false;
48 return;
49 }
50 if (lp_abil & LPA_SGMII_FULL_DUPLEX)
51 status->duplex = DUPLEX_FULL;
52 else
53 status->duplex = DUPLEX_HALF;
54}
55
56static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
57{
58 status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
59 status->an_complete = true;
60 status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
61 DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
62
63 if ((ld_abil & ADVERTISE_1000XPAUSE) &&
64 (lp_abil & ADVERTISE_1000XPAUSE)) {
65 status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
66 } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
67 (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
68 status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
69 MLO_PAUSE_TX : 0;
70 status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
71 MLO_PAUSE_RX : 0;
72 } else {
73 status->pause = MLO_PAUSE_NONE;
74 }
75}
76
77static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
78 struct sparx5_port *port,
79 struct sparx5_port_status *status)
80{
81 u32 portno = port->portno;
82 u16 lp_adv, ld_adv;
83 u32 value;
84
85 /* Get PCS Link down sticky */
86 value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
87 status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
88 if (status->link_down) /* Clear the sticky */
89 spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
90
91 /* Get both current Link and Sync status */
92 value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
93 status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
94 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
95
96 if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
97 status->speed = SPEED_1000;
98 else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
99 status->speed = SPEED_2500;
100
101 status->duplex = DUPLEX_FULL;
102
103 /* Get PCS ANEG status register */
104 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
105
106 /* Aneg complete provides more information */
107 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
108 lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
109 if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
110 decode_sgmii_word(lp_adv, status);
111 } else {
112 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
113 ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
114 decode_cl37_word(lp_adv, ld_adv, status);
115 }
116 }
117 return 0;
118}
119
120static int sparx5_get_sfi_status(struct sparx5 *sparx5,
121 struct sparx5_port *port,
122 struct sparx5_port_status *status)
123{
124 bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
125 u32 portno = port->portno;
126 u32 value, dev, tinst;
127 void __iomem *inst;
128
129 if (!high_speed_dev) {
130 netdev_err(port->ndev, "error: low speed and SFI mode\n");
131 return -EINVAL;
132 }
133
134 dev = sparx5_to_high_dev(portno);
135 tinst = sparx5_port_dev_index(portno);
136 inst = spx5_inst_get(sparx5, dev, tinst);
137
138 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
139 if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
140 /* The link is or has been down. Clear the sticky bit */
141 status->link_down = 1;
142 spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
143 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 }
145 status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
146 status->duplex = DUPLEX_FULL;
147 if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
148 status->speed = SPEED_5000;
149 else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
150 status->speed = SPEED_10000;
151 else
152 status->speed = SPEED_25000;
153
154 return 0;
155}
156
157/* Get link status of 1000Base-X/in-band and SFI ports.
158 */
159int sparx5_get_port_status(struct sparx5 *sparx5,
160 struct sparx5_port *port,
161 struct sparx5_port_status *status)
162{
163 memset(status, 0, sizeof(*status));
164 status->speed = port->conf.speed;
165 if (port->conf.power_down) {
166 status->link = false;
167 return 0;
168 }
169 switch (port->conf.portmode) {
170 case PHY_INTERFACE_MODE_SGMII:
171 case PHY_INTERFACE_MODE_QSGMII:
172 case PHY_INTERFACE_MODE_1000BASEX:
173 case PHY_INTERFACE_MODE_2500BASEX:
174 return sparx5_get_dev2g5_status(sparx5, port, status);
175 case PHY_INTERFACE_MODE_5GBASER:
176 case PHY_INTERFACE_MODE_10GBASER:
177 case PHY_INTERFACE_MODE_25GBASER:
178 return sparx5_get_sfi_status(sparx5, port, status);
179 case PHY_INTERFACE_MODE_NA:
180 return 0;
181 default:
182 netdev_err(port->ndev, "Status not supported");
183 return -ENODEV;
184 }
185 return 0;
186}
187
188static int sparx5_port_error(struct sparx5_port *port,
189 struct sparx5_port_config *conf,
190 enum port_error errtype)
191{
192 switch (errtype) {
193 case SPX5_PERR_SPEED:
194 netdev_err(port->ndev,
195 "Interface does not support speed: %u: for %s\n",
196 conf->speed, phy_modes(conf->portmode));
197 break;
198 case SPX5_PERR_IFTYPE:
199 netdev_err(port->ndev,
200 "Switch port does not support interface type: %s\n",
201 phy_modes(conf->portmode));
202 break;
203 default:
204 netdev_err(port->ndev,
205 "Interface configuration error\n");
206 }
207
208 return -EINVAL;
209}
210
211static int sparx5_port_verify_speed(struct sparx5 *sparx5,
212 struct sparx5_port *port,
213 struct sparx5_port_config *conf)
214{
215 if ((sparx5_port_is_2g5(port->portno) &&
216 conf->speed > SPEED_2500) ||
217 (sparx5_port_is_5g(port->portno) &&
218 conf->speed > SPEED_5000) ||
219 (sparx5_port_is_10g(port->portno) &&
220 conf->speed > SPEED_10000))
221 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
222
223 switch (conf->portmode) {
224 case PHY_INTERFACE_MODE_NA:
225 return -EINVAL;
226 case PHY_INTERFACE_MODE_1000BASEX:
227 if (conf->speed != SPEED_1000 ||
228 sparx5_port_is_2g5(port->portno))
229 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
230 if (sparx5_port_is_2g5(port->portno))
231 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
232 break;
233 case PHY_INTERFACE_MODE_2500BASEX:
234 if (conf->speed != SPEED_2500 ||
235 sparx5_port_is_2g5(port->portno))
236 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
237 break;
238 case PHY_INTERFACE_MODE_QSGMII:
239 if (port->portno > 47)
240 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
241 fallthrough;
242 case PHY_INTERFACE_MODE_SGMII:
243 if (conf->speed != SPEED_1000 &&
244 conf->speed != SPEED_100 &&
245 conf->speed != SPEED_10 &&
246 conf->speed != SPEED_2500)
247 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
248 break;
249 case PHY_INTERFACE_MODE_5GBASER:
250 case PHY_INTERFACE_MODE_10GBASER:
251 case PHY_INTERFACE_MODE_25GBASER:
252 if ((conf->speed != SPEED_5000 &&
253 conf->speed != SPEED_10000 &&
254 conf->speed != SPEED_25000))
255 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
256 break;
257 default:
258 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
259 }
260 return 0;
261}
262
263static bool sparx5_dev_change(struct sparx5 *sparx5,
264 struct sparx5_port *port,
265 struct sparx5_port_config *conf)
266{
267 return sparx5_is_baser(port->conf.portmode) ^
268 sparx5_is_baser(conf->portmode);
269}
270
271static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
272{
273 u32 value, resource, prio, delay_cnt = 0;
274 bool poll_src = true;
275 char *mem = "";
276
277 /* Resource == 0: Memory tracked per source (SRC-MEM)
278 * Resource == 1: Frame references tracked per source (SRC-REF)
279 * Resource == 2: Memory tracked per destination (DST-MEM)
280 * Resource == 3: Frame references tracked per destination. (DST-REF)
281 */
282 while (1) {
283 bool empty = true;
284
285 for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
286 u32 base;
287
288 base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
289 for (prio = 0; prio < SPX5_PRIOS; prio++) {
290 value = spx5_rd(sparx5,
291 QRES_RES_STAT(base + prio));
292 if (value) {
293 mem = resource == 0 ?
294 "DST-MEM" : "SRC-MEM";
295 empty = false;
296 }
297 }
298 }
299
300 if (empty)
301 break;
302
303 if (delay_cnt++ == 2000) {
304 dev_err(sparx5->dev,
305 "Flush timeout port %u. %s queue not empty\n",
306 portno, mem);
307 return -EINVAL;
308 }
309
310 usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
311 }
312 return 0;
313}
314
315static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
316{
317 u32 tinst = high_spd_dev ?
318 sparx5_port_dev_index(port->portno) : port->portno;
319 u32 dev = high_spd_dev ?
320 sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
321 void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
322 u32 spd = port->conf.speed;
323 u32 spd_prm;
324 int err;
325
326 if (high_spd_dev) {
327 /* 1: Reset the PCS Rx clock domain */
328 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
329 DEV10G_DEV_RST_CTRL_PCS_RX_RST,
330 devinst,
331 DEV10G_DEV_RST_CTRL(0));
332
333 /* 2: Disable MAC frame reception */
334 spx5_inst_rmw(0,
335 DEV10G_MAC_ENA_CFG_RX_ENA,
336 devinst,
337 DEV10G_MAC_ENA_CFG(0));
338 } else {
339 /* 1: Reset the PCS Rx clock domain */
340 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
341 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
342 devinst,
343 DEV2G5_DEV_RST_CTRL(0));
344 /* 2: Disable MAC frame reception */
345 spx5_inst_rmw(0,
346 DEV2G5_MAC_ENA_CFG_RX_ENA,
347 devinst,
348 DEV2G5_MAC_ENA_CFG(0));
349 }
350 /* 3: Disable traffic being sent to or from switch port->portno */
351 spx5_rmw(0,
352 QFWD_SWITCH_PORT_MODE_PORT_ENA,
353 sparx5,
354 QFWD_SWITCH_PORT_MODE(port->portno));
355
356 /* 4: Disable dequeuing from the egress queues */
357 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
358 HSCH_PORT_MODE_DEQUEUE_DIS,
359 sparx5,
360 HSCH_PORT_MODE(port->portno));
361
362 /* 5: Disable Flowcontrol */
363 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
364 QSYS_PAUSE_CFG_PAUSE_STOP,
365 sparx5,
366 QSYS_PAUSE_CFG(port->portno));
367
368 spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
369 /* 6: Wait while the last frame is exiting the queues */
370 usleep_range(8 * spd_prm, 10 * spd_prm);
371
372 /* 7: Flush the queues accociated with the port->portno */
373 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
374 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
375 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
376 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
377 HSCH_FLUSH_CTRL_FLUSH_PORT |
378 HSCH_FLUSH_CTRL_FLUSH_DST |
379 HSCH_FLUSH_CTRL_FLUSH_SRC |
380 HSCH_FLUSH_CTRL_FLUSH_ENA,
381 sparx5,
382 HSCH_FLUSH_CTRL);
383
384 /* 8: Enable dequeuing from the egress queues */
385 spx5_rmw(0,
386 HSCH_PORT_MODE_DEQUEUE_DIS,
387 sparx5,
388 HSCH_PORT_MODE(port->portno));
389
390 /* 9: Wait until flushing is complete */
391 err = sparx5_port_flush_poll(sparx5, port->portno);
392 if (err)
393 return err;
394
395 /* 10: Reset the MAC clock domain */
396 if (high_spd_dev) {
397 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
398 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
399 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
400 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
401 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
402 DEV10G_DEV_RST_CTRL_MAC_TX_RST,
403 devinst,
404 DEV10G_DEV_RST_CTRL(0));
405
406 } else {
407 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
408 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
409 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
410 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
411 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
412 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
413 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
414 DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
415 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
416 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
417 devinst,
418 DEV2G5_DEV_RST_CTRL(0));
419 }
420 /* 11: Clear flushing */
421 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
422 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
423 HSCH_FLUSH_CTRL_FLUSH_PORT |
424 HSCH_FLUSH_CTRL_FLUSH_ENA,
425 sparx5,
426 HSCH_FLUSH_CTRL);
427
428 if (high_spd_dev) {
429 u32 pcs = sparx5_to_pcs_dev(port->portno);
430 void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
431
432 /* 12: Disable 5G/10G/25 BaseR PCS */
433 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
434 PCS10G_BR_PCS_CFG_PCS_ENA,
435 pcsinst,
436 PCS10G_BR_PCS_CFG(0));
437
438 if (sparx5_port_is_25g(port->portno))
439 /* Disable 25G PCS */
440 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
441 DEV25G_PCS25G_CFG_PCS25G_ENA,
442 sparx5,
443 DEV25G_PCS25G_CFG(tinst));
444 } else {
445 /* 12: Disable 1G PCS */
446 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
447 DEV2G5_PCS1G_CFG_PCS_ENA,
448 sparx5,
449 DEV2G5_PCS1G_CFG(port->portno));
450 }
451
452 /* The port is now flushed and disabled */
453 return 0;
454}
455
456static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
457 u32 portno, u32 speed)
458{
459 u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
460 const u32 taxi_dist[SPX5_PORTS_ALL] = {
461 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
462 4, 4, 4, 4,
463 11, 12, 13, 14, 15, 16, 17, 18,
464 11, 12, 13, 14, 15, 16, 17, 18,
465 11, 12, 13, 14, 15, 16, 17, 18,
466 11, 12, 13, 14, 15, 16, 17, 18,
467 4, 6, 8, 4, 6, 8, 6, 8,
468 2, 2, 2, 2, 2, 2, 2, 4, 2
469 };
470 u32 mac_per = 6400, tmp1, tmp2, tmp3;
471 u32 fifo_width = 16;
472 u32 mac_width = 8;
473 u32 addition = 0;
474
475 switch (speed) {
476 case SPEED_25000:
477 return 0;
478 case SPEED_10000:
479 mac_per = 6400;
480 mac_width = 8;
481 addition = 1;
482 break;
483 case SPEED_5000:
484 mac_per = 12800;
485 mac_width = 8;
486 addition = 0;
487 break;
488 case SPEED_2500:
489 mac_per = 3200;
490 mac_width = 1;
491 addition = 0;
492 break;
493 case SPEED_1000:
494 mac_per = 8000;
495 mac_width = 1;
496 addition = 0;
497 break;
498 case SPEED_100:
499 case SPEED_10:
500 return 1;
501 default:
502 break;
503 }
504
505 tmp1 = 1000 * mac_width / fifo_width;
506 tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
507 * sys_clk / mac_per);
508 tmp3 = tmp1 * tmp2 / 1000;
509 return (tmp3 + 2000 + 999) / 1000 + addition;
510}
511
512/* Configure port muxing:
513 * QSGMII: 4x2G5 devices
514 */
515static int sparx5_port_mux_set(struct sparx5 *sparx5,
516 struct sparx5_port *port,
517 struct sparx5_port_config *conf)
518{
519 u32 portno = port->portno;
520 u32 inst;
521
522 if (port->conf.portmode == conf->portmode)
523 return 0; /* Nothing to do */
524
525 switch (conf->portmode) {
526 case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
527 inst = (portno - portno % 4) / 4;
528 spx5_rmw(BIT(inst),
529 BIT(inst),
530 sparx5,
531 PORT_CONF_QSGMII_ENA);
532
533 if ((portno / 4 % 2) == 0) {
534 /* Affects d0-d3,d8-d11..d40-d43 */
535 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
536 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
537 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
538 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
539 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
540 PORT_CONF_USGMII_CFG_QUAD_MODE,
541 sparx5,
542 PORT_CONF_USGMII_CFG((portno / 8)));
543 }
544 break;
545 default:
546 break;
547 }
548 return 0;
549}
550
551static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
552 struct sparx5_port *port)
553{
554 enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
555 int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
556 max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
557 bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
558 enum sparx5_vlan_port_type vlan_type = port->vlan_type;
559 bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
560 u32 dev = sparx5_to_high_dev(port->portno);
561 u32 tinst = sparx5_port_dev_index(port->portno);
562 void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
563 u32 etype;
564
565 etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
566 port->custom_etype :
567 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
568 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
569
570 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
571 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
572 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
573 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
574 sparx5,
575 DEV2G5_MAC_TAGS_CFG(port->portno));
576
577 if (sparx5_port_is_2g5(port->portno))
578 return 0;
579
580 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
581 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
582 DEV10G_MAC_TAGS_CFG_TAG_ID |
583 DEV10G_MAC_TAGS_CFG_TAG_ENA,
584 inst,
585 DEV10G_MAC_TAGS_CFG(0, 0));
586
587 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
588 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
589 inst,
590 DEV10G_MAC_NUM_TAGS_CFG(0));
591
592 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
593 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
594 inst,
595 DEV10G_MAC_MAXLEN_CFG(0));
596 return 0;
597}
598
599static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
600{
601 u32 clk_period_ps = 1600; /* 625Mhz for now */
602 u32 urg = 672000;
603
604 switch (speed) {
605 case SPEED_10:
606 case SPEED_100:
607 case SPEED_1000:
608 urg = 672000;
609 break;
610 case SPEED_2500:
611 urg = 270000;
612 break;
613 case SPEED_5000:
614 urg = 135000;
615 break;
616 case SPEED_10000:
617 urg = 67200;
618 break;
619 case SPEED_25000:
620 urg = 27000;
621 break;
622 }
623 return urg / clk_period_ps - 1;
624}
625
626static u16 sparx5_wm_enc(u16 value)
627{
628 if (value >= 2048)
629 return 2048 + value / 16;
630
631 return value;
632}
633
634static int sparx5_port_fc_setup(struct sparx5 *sparx5,
635 struct sparx5_port *port,
636 struct sparx5_port_config *conf)
637{
638 bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
639 u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
640
641 if (conf->pause & MLO_PAUSE_TX)
642 pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
643 SPX5_BUFFER_CELL_SZ));
644
645 /* Set HDX flowcontrol */
646 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
647 DSM_MAC_CFG_HDX_BACKPREASSURE,
648 sparx5,
649 DSM_MAC_CFG(port->portno));
650
651 /* Obey flowcontrol */
652 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
653 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
654 sparx5,
655 DSM_RX_PAUSE_CFG(port->portno));
656
657 /* Disable forward pressure */
658 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
659 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
660 sparx5,
661 QSYS_FWD_PRESSURE(port->portno));
662
663 /* Generate pause frames */
664 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
665 QSYS_PAUSE_CFG_PAUSE_STOP,
666 sparx5,
667 QSYS_PAUSE_CFG(port->portno));
668
669 return 0;
670}
671
672static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
673{
674 if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
675 return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
676 else
677 return 1; /* Enable SGMII Aneg */
678}
679
680int sparx5_serdes_set(struct sparx5 *sparx5,
681 struct sparx5_port *port,
682 struct sparx5_port_config *conf)
683{
684 int portmode, err, speed = conf->speed;
685
686 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
687 ((port->portno % 4) != 0)) {
688 return 0;
689 }
690 if (sparx5_is_baser(conf->portmode)) {
691 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
692 speed = SPEED_25000;
693 else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
694 speed = SPEED_10000;
695 else
696 speed = SPEED_5000;
697 }
698
699 err = phy_set_media(port->serdes, conf->media);
700 if (err)
701 return err;
702 if (speed > 0) {
703 err = phy_set_speed(port->serdes, speed);
704 if (err)
705 return err;
706 }
707 if (conf->serdes_reset) {
708 err = phy_reset(port->serdes);
709 if (err)
710 return err;
711 }
712
713 /* Configure SerDes with port parameters
714 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
715 */
716 portmode = conf->portmode;
717 if (sparx5_is_baser(conf->portmode))
718 portmode = PHY_INTERFACE_MODE_10GBASER;
719 err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
720 if (err)
721 return err;
722 conf->serdes_reset = false;
723 return err;
724}
725
726static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
727 struct sparx5_port *port,
728 struct sparx5_port_config *conf)
729{
730 bool sgmii = false, inband_aneg = false;
731 int err;
732
733 if (port->conf.inband) {
734 if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
735 conf->portmode == PHY_INTERFACE_MODE_QSGMII)
736 inband_aneg = true; /* Cisco-SGMII in-band-aneg */
737 else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
738 conf->autoneg)
739 inband_aneg = true; /* Clause-37 in-band-aneg */
740
741 err = sparx5_serdes_set(sparx5, port, conf);
742 if (err)
743 return -EINVAL;
744 } else {
745 sgmii = true; /* Phy is connnected to the MAC */
746 }
747
748 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
749 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
750 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
751 sparx5,
752 DEV2G5_PCS1G_MODE_CFG(port->portno));
753
754 /* Enable PCS */
755 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
756 sparx5,
757 DEV2G5_PCS1G_CFG(port->portno));
758
759 if (inband_aneg) {
760 u16 abil = sparx5_get_aneg_word(conf);
761
762 /* Enable in-band aneg */
763 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
764 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
765 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
766 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
767 sparx5,
768 DEV2G5_PCS1G_ANEG_CFG(port->portno));
769 } else {
770 spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
771 }
772
773 /* Take PCS out of reset */
774 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
775 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
776 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
777 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
778 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
779 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
780 sparx5,
781 DEV2G5_DEV_RST_CTRL(port->portno));
782
783 return 0;
784}
785
786static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
787 struct sparx5_port *port,
788 struct sparx5_port_config *conf)
789{
790 u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
791 u32 pix = sparx5_port_dev_index(port->portno);
792 u32 dev = sparx5_to_high_dev(port->portno);
793 u32 pcs = sparx5_to_pcs_dev(port->portno);
794 void __iomem *devinst;
795 void __iomem *pcsinst;
796 int err;
797
798 devinst = spx5_inst_get(sparx5, dev, pix);
799 pcsinst = spx5_inst_get(sparx5, pcs, pix);
800
801 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
802 err = sparx5_serdes_set(sparx5, port, conf);
803 if (err)
804 return -EINVAL;
805 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
806 /* Enable PCS for 25G device, speed 25G */
807 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
808 DEV25G_PCS25G_CFG_PCS25G_ENA,
809 sparx5,
810 DEV25G_PCS25G_CFG(pix));
811 } else {
812 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
813 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
814 PCS10G_BR_PCS_CFG_PCS_ENA,
815 pcsinst,
816 PCS10G_BR_PCS_CFG(0));
817 }
818
819 /* Enable 5G/10G/25G MAC module */
820 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
821 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
822 devinst,
823 DEV10G_MAC_ENA_CFG(0));
824
825 /* Take the device out of reset */
826 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
827 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
828 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
829 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
830 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
831 DEV10G_DEV_RST_CTRL_PCS_RX_RST |
832 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
833 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
834 DEV10G_DEV_RST_CTRL_MAC_TX_RST |
835 DEV10G_DEV_RST_CTRL_SPEED_SEL,
836 devinst,
837 DEV10G_DEV_RST_CTRL(0));
838
839 return 0;
840}
841
842/* Switch between 1G/2500 and 5G/10G/25G devices */
843static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
844{
845 int bt_indx = BIT(sparx5_port_dev_index(port));
846
847 if (sparx5_port_is_5g(port)) {
848 spx5_rmw(hsd ? 0 : bt_indx,
849 bt_indx,
850 sparx5,
851 PORT_CONF_DEV5G_MODES);
852 } else if (sparx5_port_is_10g(port)) {
853 spx5_rmw(hsd ? 0 : bt_indx,
854 bt_indx,
855 sparx5,
856 PORT_CONF_DEV10G_MODES);
857 } else if (sparx5_port_is_25g(port)) {
858 spx5_rmw(hsd ? 0 : bt_indx,
859 bt_indx,
860 sparx5,
861 PORT_CONF_DEV25G_MODES);
862 }
863}
864
865/* Configure speed/duplex dependent registers */
866static int sparx5_port_config_low_set(struct sparx5 *sparx5,
867 struct sparx5_port *port,
868 struct sparx5_port_config *conf)
869{
870 u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
871 bool fdx = conf->duplex == DUPLEX_FULL;
872 int spd = conf->speed;
873
874 clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
875 gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
876 tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
877 hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
878 hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
879
880 /* GIG/FDX mode */
881 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
882 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
883 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
884 DEV2G5_MAC_MODE_CFG_FDX_ENA,
885 sparx5,
886 DEV2G5_MAC_MODE_CFG(port->portno));
887
888 /* Set MAC IFG Gaps */
889 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
890 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
891 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
892 sparx5,
893 DEV2G5_MAC_IFG_CFG(port->portno));
894
895 /* Disabling frame aging when in HDX (due to HDX issue) */
896 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
897 HSCH_PORT_MODE_AGE_DIS,
898 sparx5,
899 HSCH_PORT_MODE(port->portno));
900
901 /* Enable MAC module */
902 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
903 DEV2G5_MAC_ENA_CFG_TX_ENA,
904 sparx5,
905 DEV2G5_MAC_ENA_CFG(port->portno));
906
907 /* Select speed and take MAC out of reset */
908 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
909 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
910 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
911 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
912 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
913 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
914 sparx5,
915 DEV2G5_DEV_RST_CTRL(port->portno));
916
917 return 0;
918}
919
920int sparx5_port_pcs_set(struct sparx5 *sparx5,
921 struct sparx5_port *port,
922 struct sparx5_port_config *conf)
923
924{
925 bool high_speed_dev = sparx5_is_baser(conf->portmode);
926 int err;
927
928 if (sparx5_dev_change(sparx5, port, conf)) {
929 /* switch device */
930 sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
931
932 /* Disable the not-in-use device */
933 err = sparx5_port_disable(sparx5, port, !high_speed_dev);
934 if (err)
935 return err;
936 }
937 /* Disable the port before re-configuring */
938 err = sparx5_port_disable(sparx5, port, high_speed_dev);
939 if (err)
940 return -EINVAL;
941
942 if (high_speed_dev)
943 err = sparx5_port_pcs_high_set(sparx5, port, conf);
944 else
945 err = sparx5_port_pcs_low_set(sparx5, port, conf);
946
947 if (err)
948 return -EINVAL;
949
950 if (port->conf.inband) {
951 /* Enable/disable 1G counters in ASM */
952 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
953 ASM_PORT_CFG_CSC_STAT_DIS,
954 sparx5,
955 ASM_PORT_CFG(port->portno));
956
957 /* Enable/disable 1G counters in DSM */
958 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
959 DSM_BUF_CFG_CSC_STAT_DIS,
960 sparx5,
961 DSM_BUF_CFG(port->portno));
962 }
963
964 port->conf = *conf;
965
966 return 0;
967}
968
969int sparx5_port_config(struct sparx5 *sparx5,
970 struct sparx5_port *port,
971 struct sparx5_port_config *conf)
972{
973 bool high_speed_dev = sparx5_is_baser(conf->portmode);
974 int err, urgency, stop_wm;
975
976 err = sparx5_port_verify_speed(sparx5, port, conf);
977 if (err)
978 return err;
979
980 /* high speed device is already configured */
981 if (!high_speed_dev)
982 sparx5_port_config_low_set(sparx5, port, conf);
983
984 /* Configure flow control */
985 err = sparx5_port_fc_setup(sparx5, port, conf);
986 if (err)
987 return err;
988
989 /* Set the DSM stop watermark */
990 stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
991 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
992 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
993 sparx5,
994 DSM_DEV_TX_STOP_WM_CFG(port->portno));
995
996 /* Enable port in queue system */
997 urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
998 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
999 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1000 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1001 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1002 sparx5,
1003 QFWD_SWITCH_PORT_MODE(port->portno));
1004
1005 /* Save the new values */
1006 port->conf = *conf;
1007
1008 return 0;
1009}
1010
1011/* Initialize port config to default */
1012int sparx5_port_init(struct sparx5 *sparx5,
1013 struct sparx5_port *port,
1014 struct sparx5_port_config *conf)
1015{
1016 u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1017 u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1018 u32 devhigh = sparx5_to_high_dev(port->portno);
1019 u32 pix = sparx5_port_dev_index(port->portno);
1020 u32 pcs = sparx5_to_pcs_dev(port->portno);
1021 bool sd_pol = port->signd_active_high;
1022 bool sd_sel = !port->signd_internal;
1023 bool sd_ena = port->signd_enable;
1024 u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1025 void __iomem *devinst;
1026 void __iomem *pcsinst;
1027 int err;
1028
1029 devinst = spx5_inst_get(sparx5, devhigh, pix);
1030 pcsinst = spx5_inst_get(sparx5, pcs, pix);
1031
1032 /* Set the mux port mode */
1033 err = sparx5_port_mux_set(sparx5, port, conf);
1034 if (err)
1035 return err;
1036
1037 /* Configure MAC vlan awareness */
1038 err = sparx5_port_max_tags_set(sparx5, port);
1039 if (err)
1040 return err;
1041
1042 /* Set Max Length */
1043 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1044 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1045 sparx5,
1046 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1047
1048 /* 1G/2G5: Signal Detect configuration */
1049 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1050 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1051 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1052 sparx5,
1053 DEV2G5_PCS1G_SD_CFG(port->portno));
1054
1055 /* Set Pause WM hysteresis */
1056 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1057 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1058 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1059 QSYS_PAUSE_CFG_PAUSE_START |
1060 QSYS_PAUSE_CFG_PAUSE_STOP |
1061 QSYS_PAUSE_CFG_PAUSE_ENA,
1062 sparx5,
1063 QSYS_PAUSE_CFG(port->portno));
1064
1065 /* Port ATOP. Frames are tail dropped when this WM is hit */
1066 spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1067 sparx5,
1068 QSYS_ATOP(port->portno));
1069
1070 /* Discard pause frame 01-80-C2-00-00-01 */
1071 spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1072
1073 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1074 conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1075 err = sparx5_serdes_set(sparx5, port, conf);
1076 if (err)
1077 return err;
1078
1079 if (!sparx5_port_is_2g5(port->portno))
1080 /* Enable shadow device */
1081 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1082 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1083 sparx5,
1084 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1085
1086 sparx5_dev_switch(sparx5, port->portno, false);
1087 }
1088 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1089 // All ports must be PCS enabled in QSGMII mode
1090 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1091 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1092 sparx5,
1093 DEV2G5_DEV_RST_CTRL(port->portno));
1094 }
1095 /* Default IFGs for 1G */
1096 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1097 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1098 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1099 sparx5,
1100 DEV2G5_MAC_IFG_CFG(port->portno));
1101
1102 if (sparx5_port_is_2g5(port->portno))
1103 return 0; /* Low speed device only - return */
1104
1105 /* Now setup the high speed device */
1106 if (conf->portmode == PHY_INTERFACE_MODE_NA)
1107 conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1108
1109 if (sparx5_is_baser(conf->portmode))
1110 sparx5_dev_switch(sparx5, port->portno, true);
1111
1112 /* Set Max Length */
1113 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1114 DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1115 devinst,
1116 DEV10G_MAC_ENA_CFG(0));
1117
1118 /* Handle Signal Detect in 10G PCS */
1119 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1120 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1121 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1122 pcsinst,
1123 PCS10G_BR_PCS_SD_CFG(0));
1124
1125 if (sparx5_port_is_25g(port->portno)) {
1126 /* Handle Signal Detect in 25G PCS */
1127 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1128 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1129 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1130 sparx5,
1131 DEV25G_PCS25G_SD_CFG(pix));
1132 }
1133
1134 return 0;
1135}
1136
1137void sparx5_port_enable(struct sparx5_port *port, bool enable)
1138{
1139 struct sparx5 *sparx5 = port->sparx5;
1140
1141 /* Enable port for frame transfer? */
1142 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1143 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1144 sparx5,
1145 QFWD_SWITCH_PORT_MODE(port->portno));
1146}