Loading...
Note: File does not exist in v6.13.7.
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * net/dsa/dsa_priv.h - Hardware switch handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7#ifndef __DSA_PRIV_H
8#define __DSA_PRIV_H
9
10#include <linux/if_bridge.h>
11#include <linux/phy.h>
12#include <linux/netdevice.h>
13#include <linux/netpoll.h>
14#include <net/dsa.h>
15#include <net/gro_cells.h>
16
17enum {
18 DSA_NOTIFIER_AGEING_TIME,
19 DSA_NOTIFIER_BRIDGE_JOIN,
20 DSA_NOTIFIER_BRIDGE_LEAVE,
21 DSA_NOTIFIER_FDB_ADD,
22 DSA_NOTIFIER_FDB_DEL,
23 DSA_NOTIFIER_HOST_FDB_ADD,
24 DSA_NOTIFIER_HOST_FDB_DEL,
25 DSA_NOTIFIER_HSR_JOIN,
26 DSA_NOTIFIER_HSR_LEAVE,
27 DSA_NOTIFIER_LAG_CHANGE,
28 DSA_NOTIFIER_LAG_JOIN,
29 DSA_NOTIFIER_LAG_LEAVE,
30 DSA_NOTIFIER_MDB_ADD,
31 DSA_NOTIFIER_MDB_DEL,
32 DSA_NOTIFIER_HOST_MDB_ADD,
33 DSA_NOTIFIER_HOST_MDB_DEL,
34 DSA_NOTIFIER_VLAN_ADD,
35 DSA_NOTIFIER_VLAN_DEL,
36 DSA_NOTIFIER_MTU,
37 DSA_NOTIFIER_TAG_PROTO,
38 DSA_NOTIFIER_MRP_ADD,
39 DSA_NOTIFIER_MRP_DEL,
40 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
41 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
42};
43
44/* DSA_NOTIFIER_AGEING_TIME */
45struct dsa_notifier_ageing_time_info {
46 unsigned int ageing_time;
47};
48
49/* DSA_NOTIFIER_BRIDGE_* */
50struct dsa_notifier_bridge_info {
51 struct net_device *br;
52 int tree_index;
53 int sw_index;
54 int port;
55};
56
57/* DSA_NOTIFIER_FDB_* */
58struct dsa_notifier_fdb_info {
59 int sw_index;
60 int port;
61 const unsigned char *addr;
62 u16 vid;
63};
64
65/* DSA_NOTIFIER_MDB_* */
66struct dsa_notifier_mdb_info {
67 const struct switchdev_obj_port_mdb *mdb;
68 int sw_index;
69 int port;
70};
71
72/* DSA_NOTIFIER_LAG_* */
73struct dsa_notifier_lag_info {
74 struct net_device *lag;
75 int sw_index;
76 int port;
77
78 struct netdev_lag_upper_info *info;
79};
80
81/* DSA_NOTIFIER_VLAN_* */
82struct dsa_notifier_vlan_info {
83 const struct switchdev_obj_port_vlan *vlan;
84 int sw_index;
85 int port;
86 struct netlink_ext_ack *extack;
87};
88
89/* DSA_NOTIFIER_MTU */
90struct dsa_notifier_mtu_info {
91 bool targeted_match;
92 int sw_index;
93 int port;
94 int mtu;
95};
96
97/* DSA_NOTIFIER_TAG_PROTO_* */
98struct dsa_notifier_tag_proto_info {
99 const struct dsa_device_ops *tag_ops;
100};
101
102/* DSA_NOTIFIER_MRP_* */
103struct dsa_notifier_mrp_info {
104 const struct switchdev_obj_mrp *mrp;
105 int sw_index;
106 int port;
107};
108
109/* DSA_NOTIFIER_MRP_* */
110struct dsa_notifier_mrp_ring_role_info {
111 const struct switchdev_obj_ring_role_mrp *mrp;
112 int sw_index;
113 int port;
114};
115
116struct dsa_switchdev_event_work {
117 struct dsa_switch *ds;
118 int port;
119 struct net_device *dev;
120 struct work_struct work;
121 unsigned long event;
122 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
123 * SWITCHDEV_FDB_DEL_TO_DEVICE
124 */
125 unsigned char addr[ETH_ALEN];
126 u16 vid;
127 bool host_addr;
128};
129
130/* DSA_NOTIFIER_HSR_* */
131struct dsa_notifier_hsr_info {
132 struct net_device *hsr;
133 int sw_index;
134 int port;
135};
136
137struct dsa_slave_priv {
138 /* Copy of CPU port xmit for faster access in slave transmit hot path */
139 struct sk_buff * (*xmit)(struct sk_buff *skb,
140 struct net_device *dev);
141
142 struct gro_cells gcells;
143
144 /* DSA port data, such as switch, port index, etc. */
145 struct dsa_port *dp;
146
147#ifdef CONFIG_NET_POLL_CONTROLLER
148 struct netpoll *netpoll;
149#endif
150
151 /* TC context */
152 struct list_head mall_tc_list;
153};
154
155/* dsa.c */
156const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
157void dsa_tag_driver_put(const struct dsa_device_ops *ops);
158const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
159
160bool dsa_schedule_work(struct work_struct *work);
161void dsa_flush_workqueue(void);
162const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
163
164static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
165{
166 return ops->needed_headroom + ops->needed_tailroom;
167}
168
169/* master.c */
170int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
171void dsa_master_teardown(struct net_device *dev);
172
173static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
174 int device, int port)
175{
176 struct dsa_port *cpu_dp = dev->dsa_ptr;
177 struct dsa_switch_tree *dst = cpu_dp->dst;
178 struct dsa_port *dp;
179
180 list_for_each_entry(dp, &dst->ports, list)
181 if (dp->ds->index == device && dp->index == port &&
182 dp->type == DSA_PORT_TYPE_USER)
183 return dp->slave;
184
185 return NULL;
186}
187
188/* port.c */
189void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
190 const struct dsa_device_ops *tag_ops);
191int dsa_port_set_state(struct dsa_port *dp, u8 state);
192int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
193int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
194void dsa_port_disable_rt(struct dsa_port *dp);
195void dsa_port_disable(struct dsa_port *dp);
196int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
197 struct netlink_ext_ack *extack);
198int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
199 struct netlink_ext_ack *extack);
200void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
201int dsa_port_lag_change(struct dsa_port *dp,
202 struct netdev_lag_lower_state_info *linfo);
203int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
204 struct netdev_lag_upper_info *uinfo,
205 struct netlink_ext_ack *extack);
206int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev,
207 struct netlink_ext_ack *extack);
208void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
209int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
210 struct netlink_ext_ack *extack);
211bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
212int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
213int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
214 bool targeted_match);
215int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
216 u16 vid);
217int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
218 u16 vid);
219int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
220 u16 vid);
221int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
222 u16 vid);
223int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
224int dsa_port_mdb_add(const struct dsa_port *dp,
225 const struct switchdev_obj_port_mdb *mdb);
226int dsa_port_mdb_del(const struct dsa_port *dp,
227 const struct switchdev_obj_port_mdb *mdb);
228int dsa_port_host_mdb_add(const struct dsa_port *dp,
229 const struct switchdev_obj_port_mdb *mdb);
230int dsa_port_host_mdb_del(const struct dsa_port *dp,
231 const struct switchdev_obj_port_mdb *mdb);
232int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
233 struct switchdev_brport_flags flags,
234 struct netlink_ext_ack *extack);
235int dsa_port_bridge_flags(const struct dsa_port *dp,
236 struct switchdev_brport_flags flags,
237 struct netlink_ext_ack *extack);
238int dsa_port_vlan_add(struct dsa_port *dp,
239 const struct switchdev_obj_port_vlan *vlan,
240 struct netlink_ext_ack *extack);
241int dsa_port_vlan_del(struct dsa_port *dp,
242 const struct switchdev_obj_port_vlan *vlan);
243int dsa_port_mrp_add(const struct dsa_port *dp,
244 const struct switchdev_obj_mrp *mrp);
245int dsa_port_mrp_del(const struct dsa_port *dp,
246 const struct switchdev_obj_mrp *mrp);
247int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
248 const struct switchdev_obj_ring_role_mrp *mrp);
249int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
250 const struct switchdev_obj_ring_role_mrp *mrp);
251int dsa_port_link_register_of(struct dsa_port *dp);
252void dsa_port_link_unregister_of(struct dsa_port *dp);
253int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
254void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
255extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
256
257static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
258 struct net_device *dev)
259{
260 return dsa_port_to_bridge_port(dp) == dev;
261}
262
263static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
264 struct net_device *bridge_dev)
265{
266 /* DSA ports connected to a bridge, and event was emitted
267 * for the bridge.
268 */
269 return dp->bridge_dev == bridge_dev;
270}
271
272/* Returns true if any port of this tree offloads the given net_device */
273static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
274 struct net_device *dev)
275{
276 struct dsa_port *dp;
277
278 list_for_each_entry(dp, &dst->ports, list)
279 if (dsa_port_offloads_bridge_port(dp, dev))
280 return true;
281
282 return false;
283}
284
285/* slave.c */
286extern const struct dsa_device_ops notag_netdev_ops;
287extern struct notifier_block dsa_slave_switchdev_notifier;
288extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
289
290void dsa_slave_mii_bus_init(struct dsa_switch *ds);
291int dsa_slave_create(struct dsa_port *dp);
292void dsa_slave_destroy(struct net_device *slave_dev);
293int dsa_slave_suspend(struct net_device *slave_dev);
294int dsa_slave_resume(struct net_device *slave_dev);
295int dsa_slave_register_notifier(void);
296void dsa_slave_unregister_notifier(void);
297void dsa_slave_setup_tagger(struct net_device *slave);
298int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
299
300static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
301{
302 struct dsa_slave_priv *p = netdev_priv(dev);
303
304 return p->dp;
305}
306
307static inline struct net_device *
308dsa_slave_to_master(const struct net_device *dev)
309{
310 struct dsa_port *dp = dsa_slave_to_port(dev);
311
312 return dp->cpu_dp->master;
313}
314
315/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
316 * frames as untagged, since the bridge will not untag them.
317 */
318static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
319{
320 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
321 struct net_device *br = dp->bridge_dev;
322 struct net_device *dev = skb->dev;
323 struct net_device *upper_dev;
324 u16 vid, pvid, proto;
325 int err;
326
327 if (!br || br_vlan_enabled(br))
328 return skb;
329
330 err = br_vlan_get_proto(br, &proto);
331 if (err)
332 return skb;
333
334 /* Move VLAN tag from data to hwaccel */
335 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
336 skb = skb_vlan_untag(skb);
337 if (!skb)
338 return NULL;
339 }
340
341 if (!skb_vlan_tag_present(skb))
342 return skb;
343
344 vid = skb_vlan_tag_get_id(skb);
345
346 /* We already run under an RCU read-side critical section since
347 * we are called from netif_receive_skb_list_internal().
348 */
349 err = br_vlan_get_pvid_rcu(dev, &pvid);
350 if (err)
351 return skb;
352
353 if (vid != pvid)
354 return skb;
355
356 /* The sad part about attempting to untag from DSA is that we
357 * don't know, unless we check, if the skb will end up in
358 * the bridge's data path - br_allowed_ingress() - or not.
359 * For example, there might be an 8021q upper for the
360 * default_pvid of the bridge, which will steal VLAN-tagged traffic
361 * from the bridge's data path. This is a configuration that DSA
362 * supports because vlan_filtering is 0. In that case, we should
363 * definitely keep the tag, to make sure it keeps working.
364 */
365 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
366 if (upper_dev)
367 return skb;
368
369 __vlan_hwaccel_clear_tag(skb);
370
371 return skb;
372}
373
374/* switch.c */
375int dsa_switch_register_notifier(struct dsa_switch *ds);
376void dsa_switch_unregister_notifier(struct dsa_switch *ds);
377
378/* dsa2.c */
379void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
380void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
381int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
382int dsa_broadcast(unsigned long e, void *v);
383int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
384 struct net_device *master,
385 const struct dsa_device_ops *tag_ops,
386 const struct dsa_device_ops *old_tag_ops);
387
388extern struct list_head dsa_tree_list;
389
390#endif