Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include "sparx5_main_regs.h"
8#include "sparx5_main.h"
9
10static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
11{
12 u32 mask[3];
13
14 /* Divide up mask in 32 bit words */
15 bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
16
17 /* Output mask to respective registers */
18 spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
19 if (is_sparx5(sparx5)) {
20 spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
21 spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
22 }
23
24 return 0;
25}
26
27void sparx5_vlan_init(struct sparx5 *sparx5)
28{
29 u16 vid;
30
31 spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
32 ANA_L3_VLAN_CTRL_VLAN_ENA,
33 sparx5,
34 ANA_L3_VLAN_CTRL);
35
36 /* Map VLAN = FID */
37 for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
38 spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
39 ANA_L3_VLAN_CFG_VLAN_FID,
40 sparx5,
41 ANA_L3_VLAN_CFG(vid));
42}
43
44void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
45{
46 struct sparx5_port *port = sparx5->ports[portno];
47
48 /* Configure PVID */
49 spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
50 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
51 ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
52 ANA_CL_VLAN_CTRL_PORT_VID,
53 sparx5,
54 ANA_CL_VLAN_CTRL(port->portno));
55}
56
57int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
58 bool untagged)
59{
60 struct sparx5 *sparx5 = port->sparx5;
61 int ret;
62
63 /* Untagged egress vlan classification */
64 if (untagged && port->vid != vid) {
65 if (port->vid) {
66 netdev_err(port->ndev,
67 "Port already has a native VLAN: %d\n",
68 port->vid);
69 return -EBUSY;
70 }
71 port->vid = vid;
72 }
73
74 /* Make the port a member of the VLAN */
75 set_bit(port->portno, sparx5->vlan_mask[vid]);
76 ret = sparx5_vlant_set_mask(sparx5, vid);
77 if (ret)
78 return ret;
79
80 /* Default ingress vlan classification */
81 if (pvid)
82 port->pvid = vid;
83
84 sparx5_vlan_port_apply(sparx5, port);
85
86 return 0;
87}
88
89int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
90{
91 struct sparx5 *sparx5 = port->sparx5;
92 int ret;
93
94 /* 8021q removes VID 0 on module unload for all interfaces
95 * with VLAN filtering feature. We need to keep it to receive
96 * untagged traffic.
97 */
98 if (vid == 0)
99 return 0;
100
101 /* Stop the port from being a member of the vlan */
102 clear_bit(port->portno, sparx5->vlan_mask[vid]);
103 ret = sparx5_vlant_set_mask(sparx5, vid);
104 if (ret)
105 return ret;
106
107 /* Ingress */
108 if (port->pvid == vid)
109 port->pvid = 0;
110
111 /* Egress */
112 if (port->vid == vid)
113 port->vid = 0;
114
115 sparx5_vlan_port_apply(sparx5, port);
116
117 return 0;
118}
119
120void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
121{
122 struct sparx5 *sparx5 = port->sparx5;
123 u32 val, mask;
124
125 /* mask is spread across 3 registers x 32 bit */
126 if (port->portno < 32) {
127 mask = BIT(port->portno);
128 val = enable ? mask : 0;
129 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
130 } else if (port->portno < 64) {
131 mask = BIT(port->portno - 32);
132 val = enable ? mask : 0;
133 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
134 } else if (port->portno < SPX5_PORTS) {
135 mask = BIT(port->portno - 64);
136 val = enable ? mask : 0;
137 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
138 } else {
139 netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
140 }
141}
142
143void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
144{
145 spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
146 if (is_sparx5(spx5)) {
147 spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
148 spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
149 }
150}
151
152void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
153{
154 portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
155 if (is_sparx5(spx5)) {
156 portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
157 portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
158 }
159}
160
161void sparx5_update_fwd(struct sparx5 *sparx5)
162{
163 DECLARE_BITMAP(workmask, SPX5_PORTS);
164 u32 mask[3];
165 int port;
166
167 /* Divide up fwd mask in 32 bit words */
168 bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
169
170 /* Update flood masks */
171 for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
172 port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) {
173 spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
174 if (is_sparx5(sparx5)) {
175 spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
176 spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
177 }
178 }
179
180 /* Update SRC masks */
181 for (port = 0; port < sparx5->data->consts->n_ports; port++) {
182 if (test_bit(port, sparx5->bridge_fwd_mask)) {
183 /* Allow to send to all bridged but self */
184 bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
185 clear_bit(port, workmask);
186 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
187 spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
188 if (is_sparx5(sparx5)) {
189 spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
190 spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
191 }
192 } else {
193 spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
194 if (is_sparx5(sparx5)) {
195 spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
196 spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
197 }
198 }
199 }
200
201 /* Learning enabled only for bridged ports */
202 bitmap_and(workmask, sparx5->bridge_fwd_mask,
203 sparx5->bridge_lrn_mask, SPX5_PORTS);
204 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
205
206 /* Apply learning mask */
207 spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
208 if (is_sparx5(sparx5)) {
209 spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
210 spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
211 }
212}
213
214void sparx5_vlan_port_apply(struct sparx5 *sparx5,
215 struct sparx5_port *port)
216
217{
218 u32 val;
219
220 /* Configure PVID, vlan aware */
221 val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
222 ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
223 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
224 spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
225
226 val = 0;
227 if (port->vlan_aware && !port->pvid)
228 /* If port is vlan-aware and tagged, drop untagged and
229 * priority tagged frames.
230 */
231 val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
232 ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
233 ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
234 spx5_wr(val, sparx5,
235 ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
236
237 /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
238 val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
239 if (port->vlan_aware) {
240 if (port->vid)
241 /* Tag all frames except when VID == DEFAULT_VLAN */
242 val |= REW_TAG_CTRL_TAG_CFG_SET(1);
243 else
244 val |= REW_TAG_CTRL_TAG_CFG_SET(3);
245 }
246 spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
247
248 /* Egress VID */
249 spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
250 REW_PORT_VLAN_CFG_PORT_VID,
251 sparx5,
252 REW_PORT_VLAN_CFG(port->portno));
253}
1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include "sparx5_main_regs.h"
8#include "sparx5_main.h"
9
10static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
11{
12 u32 mask[3];
13
14 /* Divide up mask in 32 bit words */
15 bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
16
17 /* Output mask to respective registers */
18 spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
19 spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
20 spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
21
22 return 0;
23}
24
25void sparx5_vlan_init(struct sparx5 *sparx5)
26{
27 u16 vid;
28
29 spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
30 ANA_L3_VLAN_CTRL_VLAN_ENA,
31 sparx5,
32 ANA_L3_VLAN_CTRL);
33
34 /* Map VLAN = FID */
35 for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
36 spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
37 ANA_L3_VLAN_CFG_VLAN_FID,
38 sparx5,
39 ANA_L3_VLAN_CFG(vid));
40}
41
42void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
43{
44 struct sparx5_port *port = sparx5->ports[portno];
45
46 /* Configure PVID */
47 spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
48 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
49 ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
50 ANA_CL_VLAN_CTRL_PORT_VID,
51 sparx5,
52 ANA_CL_VLAN_CTRL(port->portno));
53}
54
55int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
56 bool untagged)
57{
58 struct sparx5 *sparx5 = port->sparx5;
59 int ret;
60
61 /* Untagged egress vlan classification */
62 if (untagged && port->vid != vid) {
63 if (port->vid) {
64 netdev_err(port->ndev,
65 "Port already has a native VLAN: %d\n",
66 port->vid);
67 return -EBUSY;
68 }
69 port->vid = vid;
70 }
71
72 /* Make the port a member of the VLAN */
73 set_bit(port->portno, sparx5->vlan_mask[vid]);
74 ret = sparx5_vlant_set_mask(sparx5, vid);
75 if (ret)
76 return ret;
77
78 /* Default ingress vlan classification */
79 if (pvid)
80 port->pvid = vid;
81
82 sparx5_vlan_port_apply(sparx5, port);
83
84 return 0;
85}
86
87int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
88{
89 struct sparx5 *sparx5 = port->sparx5;
90 int ret;
91
92 /* 8021q removes VID 0 on module unload for all interfaces
93 * with VLAN filtering feature. We need to keep it to receive
94 * untagged traffic.
95 */
96 if (vid == 0)
97 return 0;
98
99 /* Stop the port from being a member of the vlan */
100 clear_bit(port->portno, sparx5->vlan_mask[vid]);
101 ret = sparx5_vlant_set_mask(sparx5, vid);
102 if (ret)
103 return ret;
104
105 /* Ingress */
106 if (port->pvid == vid)
107 port->pvid = 0;
108
109 /* Egress */
110 if (port->vid == vid)
111 port->vid = 0;
112
113 sparx5_vlan_port_apply(sparx5, port);
114
115 return 0;
116}
117
118void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
119{
120 struct sparx5 *sparx5 = port->sparx5;
121 u32 val, mask;
122
123 /* mask is spread across 3 registers x 32 bit */
124 if (port->portno < 32) {
125 mask = BIT(port->portno);
126 val = enable ? mask : 0;
127 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
128 } else if (port->portno < 64) {
129 mask = BIT(port->portno - 32);
130 val = enable ? mask : 0;
131 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
132 } else if (port->portno < SPX5_PORTS) {
133 mask = BIT(port->portno - 64);
134 val = enable ? mask : 0;
135 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
136 } else {
137 netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
138 }
139}
140
141void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
142{
143 spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
144 spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
145 spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
146}
147
148void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
149{
150 portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
151 portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
152 portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
153}
154
155void sparx5_update_fwd(struct sparx5 *sparx5)
156{
157 DECLARE_BITMAP(workmask, SPX5_PORTS);
158 u32 mask[3];
159 int port;
160
161 /* Divide up fwd mask in 32 bit words */
162 bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
163
164 /* Update flood masks */
165 for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
166 spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
167 spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
168 spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
169 }
170
171 /* Update SRC masks */
172 for (port = 0; port < SPX5_PORTS; port++) {
173 if (test_bit(port, sparx5->bridge_fwd_mask)) {
174 /* Allow to send to all bridged but self */
175 bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
176 clear_bit(port, workmask);
177 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
178 spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
179 spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
180 spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
181 } else {
182 spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
183 spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
184 spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
185 }
186 }
187
188 /* Learning enabled only for bridged ports */
189 bitmap_and(workmask, sparx5->bridge_fwd_mask,
190 sparx5->bridge_lrn_mask, SPX5_PORTS);
191 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
192
193 /* Apply learning mask */
194 spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
195 spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
196 spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
197}
198
199void sparx5_vlan_port_apply(struct sparx5 *sparx5,
200 struct sparx5_port *port)
201
202{
203 u32 val;
204
205 /* Configure PVID, vlan aware */
206 val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
207 ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
208 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
209 spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
210
211 val = 0;
212 if (port->vlan_aware && !port->pvid)
213 /* If port is vlan-aware and tagged, drop untagged and
214 * priority tagged frames.
215 */
216 val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
217 ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
218 ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
219 spx5_wr(val, sparx5,
220 ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
221
222 /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
223 val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
224 if (port->vlan_aware) {
225 if (port->vid)
226 /* Tag all frames except when VID == DEFAULT_VLAN */
227 val |= REW_TAG_CTRL_TAG_CFG_SET(1);
228 else
229 val |= REW_TAG_CTRL_TAG_CFG_SET(3);
230 }
231 spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
232
233 /* Egress VID */
234 spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
235 REW_PORT_VLAN_CFG_PORT_VID,
236 sparx5,
237 REW_PORT_VLAN_CFG(port->portno));
238}