Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handling of a single switch chip, part of a switch fabric
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9#include <linux/if_bridge.h>
10#include <linux/netdevice.h>
11#include <linux/notifier.h>
12#include <linux/if_vlan.h>
13#include <net/switchdev.h>
14
15#include "dsa_priv.h"
16
17static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
19{
20 int i;
21
22 for (i = 0; i < ds->num_ports; ++i) {
23 struct dsa_port *dp = dsa_to_port(ds, i);
24
25 if (dp->ageing_time && dp->ageing_time < ageing_time)
26 ageing_time = dp->ageing_time;
27 }
28
29 return ageing_time;
30}
31
32static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 struct dsa_notifier_ageing_time_info *info)
34{
35 unsigned int ageing_time = info->ageing_time;
36 struct switchdev_trans *trans = info->trans;
37
38 if (switchdev_trans_ph_prepare(trans)) {
39 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 return -ERANGE;
41 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
42 return -ERANGE;
43 return 0;
44 }
45
46 /* Program the fastest ageing time in case of multiple bridges */
47 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
48
49 if (ds->ops->set_ageing_time)
50 return ds->ops->set_ageing_time(ds, ageing_time);
51
52 return 0;
53}
54
55static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
56 struct dsa_notifier_mtu_info *info)
57{
58 if (ds->index == info->sw_index)
59 return (port == info->port) || dsa_is_dsa_port(ds, port);
60
61 if (!info->propagate_upstream)
62 return false;
63
64 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
65 return true;
66
67 return false;
68}
69
70static int dsa_switch_mtu(struct dsa_switch *ds,
71 struct dsa_notifier_mtu_info *info)
72{
73 int port, ret;
74
75 if (!ds->ops->port_change_mtu)
76 return -EOPNOTSUPP;
77
78 for (port = 0; port < ds->num_ports; port++) {
79 if (dsa_switch_mtu_match(ds, port, info)) {
80 ret = ds->ops->port_change_mtu(ds, port, info->mtu);
81 if (ret)
82 return ret;
83 }
84 }
85
86 return 0;
87}
88
89static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 struct dsa_notifier_bridge_info *info)
91{
92 struct dsa_switch_tree *dst = ds->dst;
93
94 if (dst->index == info->tree_index && ds->index == info->sw_index &&
95 ds->ops->port_bridge_join)
96 return ds->ops->port_bridge_join(ds, info->port, info->br);
97
98 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
99 ds->ops->crosschip_bridge_join)
100 return ds->ops->crosschip_bridge_join(ds, info->tree_index,
101 info->sw_index,
102 info->port, info->br);
103
104 return 0;
105}
106
107static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108 struct dsa_notifier_bridge_info *info)
109{
110 bool unset_vlan_filtering = br_vlan_enabled(info->br);
111 struct dsa_switch_tree *dst = ds->dst;
112 int err, i;
113
114 if (dst->index == info->tree_index && ds->index == info->sw_index &&
115 ds->ops->port_bridge_join)
116 ds->ops->port_bridge_leave(ds, info->port, info->br);
117
118 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
119 ds->ops->crosschip_bridge_join)
120 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
121 info->sw_index, info->port,
122 info->br);
123
124 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
125 * event for changing vlan_filtering setting upon slave ports leaving
126 * it. That is a good thing, because that lets us handle it and also
127 * handle the case where the switch's vlan_filtering setting is global
128 * (not per port). When that happens, the correct moment to trigger the
129 * vlan_filtering callback is only when the last port left this bridge.
130 */
131 if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
132 for (i = 0; i < ds->num_ports; i++) {
133 if (i == info->port)
134 continue;
135 if (dsa_to_port(ds, i)->bridge_dev == info->br) {
136 unset_vlan_filtering = false;
137 break;
138 }
139 }
140 }
141 if (unset_vlan_filtering) {
142 struct switchdev_trans trans = {0};
143
144 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
145 false, &trans);
146 if (err && err != EOPNOTSUPP)
147 return err;
148 }
149 return 0;
150}
151
152static int dsa_switch_fdb_add(struct dsa_switch *ds,
153 struct dsa_notifier_fdb_info *info)
154{
155 int port = dsa_towards_port(ds, info->sw_index, info->port);
156
157 if (!ds->ops->port_fdb_add)
158 return -EOPNOTSUPP;
159
160 return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
161}
162
163static int dsa_switch_fdb_del(struct dsa_switch *ds,
164 struct dsa_notifier_fdb_info *info)
165{
166 int port = dsa_towards_port(ds, info->sw_index, info->port);
167
168 if (!ds->ops->port_fdb_del)
169 return -EOPNOTSUPP;
170
171 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
172}
173
174static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
175 struct dsa_notifier_mdb_info *info)
176{
177 if (ds->index == info->sw_index && port == info->port)
178 return true;
179
180 if (dsa_is_dsa_port(ds, port))
181 return true;
182
183 return false;
184}
185
186static int dsa_switch_mdb_prepare(struct dsa_switch *ds,
187 struct dsa_notifier_mdb_info *info)
188{
189 int port, err;
190
191 if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
192 return -EOPNOTSUPP;
193
194 for (port = 0; port < ds->num_ports; port++) {
195 if (dsa_switch_mdb_match(ds, port, info)) {
196 err = ds->ops->port_mdb_prepare(ds, port, info->mdb);
197 if (err)
198 return err;
199 }
200 }
201
202 return 0;
203}
204
205static int dsa_switch_mdb_add(struct dsa_switch *ds,
206 struct dsa_notifier_mdb_info *info)
207{
208 int port;
209
210 if (switchdev_trans_ph_prepare(info->trans))
211 return dsa_switch_mdb_prepare(ds, info);
212
213 if (!ds->ops->port_mdb_add)
214 return 0;
215
216 for (port = 0; port < ds->num_ports; port++)
217 if (dsa_switch_mdb_match(ds, port, info))
218 ds->ops->port_mdb_add(ds, port, info->mdb);
219
220 return 0;
221}
222
223static int dsa_switch_mdb_del(struct dsa_switch *ds,
224 struct dsa_notifier_mdb_info *info)
225{
226 if (!ds->ops->port_mdb_del)
227 return -EOPNOTSUPP;
228
229 if (ds->index == info->sw_index)
230 return ds->ops->port_mdb_del(ds, info->port, info->mdb);
231
232 return 0;
233}
234
235static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
236 int vlan_dev_vid,
237 void *arg)
238{
239 struct switchdev_obj_port_vlan *vlan = arg;
240 u16 vid;
241
242 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
243 if (vid == vlan_dev_vid)
244 return -EBUSY;
245 }
246
247 return 0;
248}
249
250static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
251 const struct switchdev_obj_port_vlan *vlan)
252{
253 const struct dsa_port *dp = dsa_to_port(ds, port);
254 int err = 0;
255
256 /* Device is not bridged, let it proceed with the VLAN device
257 * creation.
258 */
259 if (!dp->bridge_dev)
260 return err;
261
262 /* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare phase and
263 * already checks whether there is an overlapping bridge VLAN entry
264 * with the same VID, so here we only need to check that if we are
265 * adding a bridge VLAN entry there is not an overlapping VLAN device
266 * claiming that VID.
267 */
268 return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
269 (void *)vlan);
270}
271
272static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
273 struct dsa_notifier_vlan_info *info)
274{
275 if (ds->index == info->sw_index && port == info->port)
276 return true;
277
278 if (dsa_is_dsa_port(ds, port))
279 return true;
280
281 return false;
282}
283
284static int dsa_switch_vlan_prepare(struct dsa_switch *ds,
285 struct dsa_notifier_vlan_info *info)
286{
287 int port, err;
288
289 if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
290 return -EOPNOTSUPP;
291
292 for (port = 0; port < ds->num_ports; port++) {
293 if (dsa_switch_vlan_match(ds, port, info)) {
294 err = dsa_port_vlan_check(ds, port, info->vlan);
295 if (err)
296 return err;
297
298 err = ds->ops->port_vlan_prepare(ds, port, info->vlan);
299 if (err)
300 return err;
301 }
302 }
303
304 return 0;
305}
306
307static int dsa_switch_vlan_add(struct dsa_switch *ds,
308 struct dsa_notifier_vlan_info *info)
309{
310 int port;
311
312 if (switchdev_trans_ph_prepare(info->trans))
313 return dsa_switch_vlan_prepare(ds, info);
314
315 if (!ds->ops->port_vlan_add)
316 return 0;
317
318 for (port = 0; port < ds->num_ports; port++)
319 if (dsa_switch_vlan_match(ds, port, info))
320 ds->ops->port_vlan_add(ds, port, info->vlan);
321
322 return 0;
323}
324
325static int dsa_switch_vlan_del(struct dsa_switch *ds,
326 struct dsa_notifier_vlan_info *info)
327{
328 if (!ds->ops->port_vlan_del)
329 return -EOPNOTSUPP;
330
331 if (ds->index == info->sw_index)
332 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
333
334 /* Do not deprogram the DSA links as they may be used as conduit
335 * for other VLAN members in the fabric.
336 */
337 return 0;
338}
339
340static int dsa_switch_event(struct notifier_block *nb,
341 unsigned long event, void *info)
342{
343 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
344 int err;
345
346 switch (event) {
347 case DSA_NOTIFIER_AGEING_TIME:
348 err = dsa_switch_ageing_time(ds, info);
349 break;
350 case DSA_NOTIFIER_BRIDGE_JOIN:
351 err = dsa_switch_bridge_join(ds, info);
352 break;
353 case DSA_NOTIFIER_BRIDGE_LEAVE:
354 err = dsa_switch_bridge_leave(ds, info);
355 break;
356 case DSA_NOTIFIER_FDB_ADD:
357 err = dsa_switch_fdb_add(ds, info);
358 break;
359 case DSA_NOTIFIER_FDB_DEL:
360 err = dsa_switch_fdb_del(ds, info);
361 break;
362 case DSA_NOTIFIER_MDB_ADD:
363 err = dsa_switch_mdb_add(ds, info);
364 break;
365 case DSA_NOTIFIER_MDB_DEL:
366 err = dsa_switch_mdb_del(ds, info);
367 break;
368 case DSA_NOTIFIER_VLAN_ADD:
369 err = dsa_switch_vlan_add(ds, info);
370 break;
371 case DSA_NOTIFIER_VLAN_DEL:
372 err = dsa_switch_vlan_del(ds, info);
373 break;
374 case DSA_NOTIFIER_MTU:
375 err = dsa_switch_mtu(ds, info);
376 break;
377 default:
378 err = -EOPNOTSUPP;
379 break;
380 }
381
382 /* Non-switchdev operations cannot be rolled back. If a DSA driver
383 * returns an error during the chained call, switch chips may be in an
384 * inconsistent state.
385 */
386 if (err)
387 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
388 event, err);
389
390 return notifier_from_errno(err);
391}
392
393int dsa_switch_register_notifier(struct dsa_switch *ds)
394{
395 ds->nb.notifier_call = dsa_switch_event;
396
397 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
398}
399
400void dsa_switch_unregister_notifier(struct dsa_switch *ds)
401{
402 int err;
403
404 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
405 if (err)
406 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
407}