Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#ifndef __DSA_TAG_H
4#define __DSA_TAG_H
5
6#include <linux/if_vlan.h>
7#include <linux/list.h>
8#include <linux/types.h>
9#include <net/dsa.h>
10
11#include "port.h"
12#include "user.h"
13
14struct dsa_tag_driver {
15 const struct dsa_device_ops *ops;
16 struct list_head list;
17 struct module *owner;
18};
19
20extern struct packet_type dsa_pack_type;
21
22const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol);
23const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name);
24void dsa_tag_driver_put(const struct dsa_device_ops *ops);
25const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
26
27static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
28{
29 return ops->needed_headroom + ops->needed_tailroom;
30}
31
32static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
33 int device, int port)
34{
35 struct dsa_port *cpu_dp = dev->dsa_ptr;
36 struct dsa_switch_tree *dst = cpu_dp->dst;
37 struct dsa_port *dp;
38
39 list_for_each_entry(dp, &dst->ports, list)
40 if (dp->ds->index == device && dp->index == port &&
41 dp->type == DSA_PORT_TYPE_USER)
42 return dp->user;
43
44 return NULL;
45}
46
47/**
48 * dsa_software_untag_vlan_aware_bridge: Software untagging for VLAN-aware bridge
49 * @skb: Pointer to received socket buffer (packet)
50 * @br: Pointer to bridge upper interface of ingress port
51 * @vid: Parsed VID from packet
52 *
53 * The bridge can process tagged packets. Software like STP/PTP may not. The
54 * bridge can also process untagged packets, to the same effect as if they were
55 * tagged with the PVID of the ingress port. So packets tagged with the PVID of
56 * the bridge port must be software-untagged, to support both use cases.
57 */
58static inline void dsa_software_untag_vlan_aware_bridge(struct sk_buff *skb,
59 struct net_device *br,
60 u16 vid)
61{
62 u16 pvid, proto;
63 int err;
64
65 err = br_vlan_get_proto(br, &proto);
66 if (err)
67 return;
68
69 err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
70 if (err)
71 return;
72
73 if (vid == pvid && skb->vlan_proto == htons(proto))
74 __vlan_hwaccel_clear_tag(skb);
75}
76
77/**
78 * dsa_software_untag_vlan_unaware_bridge: Software untagging for VLAN-unaware bridge
79 * @skb: Pointer to received socket buffer (packet)
80 * @br: Pointer to bridge upper interface of ingress port
81 * @vid: Parsed VID from packet
82 *
83 * The bridge ignores all VLAN tags. Software like STP/PTP may not (it may run
84 * on the plain port, or on a VLAN upper interface). Maybe packets are coming
85 * to software as tagged with a driver-defined VID which is NOT equal to the
86 * PVID of the bridge port (since the bridge is VLAN-unaware, its configuration
87 * should NOT be committed to hardware). DSA needs a method for this private
88 * VID to be communicated by software to it, and if packets are tagged with it,
89 * software-untag them. Note: the private VID may be different per bridge, to
90 * support the FDB isolation use case.
91 *
92 * FIXME: this is currently implemented based on the broken assumption that
93 * the "private VID" used by the driver in VLAN-unaware mode is equal to the
94 * bridge PVID. It should not be, except for a coincidence; the bridge PVID is
95 * irrelevant to the data path in the VLAN-unaware mode. Thus, the VID that
96 * this function removes is wrong.
97 *
98 * All users of ds->untag_bridge_pvid should fix their drivers, if necessary,
99 * to make the two independent. Only then, if there still remains a need to
100 * strip the private VID from packets, then a new ds->ops->get_private_vid()
101 * API shall be introduced to communicate to DSA what this VID is, which needs
102 * to be stripped here.
103 */
104static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
105 struct net_device *br,
106 u16 vid)
107{
108 struct net_device *upper_dev;
109 u16 pvid, proto;
110 int err;
111
112 err = br_vlan_get_proto(br, &proto);
113 if (err)
114 return;
115
116 err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
117 if (err)
118 return;
119
120 if (vid != pvid || skb->vlan_proto != htons(proto))
121 return;
122
123 /* The sad part about attempting to untag from DSA is that we
124 * don't know, unless we check, if the skb will end up in
125 * the bridge's data path - br_allowed_ingress() - or not.
126 * For example, there might be an 8021q upper for the
127 * default_pvid of the bridge, which will steal VLAN-tagged traffic
128 * from the bridge's data path. This is a configuration that DSA
129 * supports because vlan_filtering is 0. In that case, we should
130 * definitely keep the tag, to make sure it keeps working.
131 */
132 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
133 if (!upper_dev)
134 __vlan_hwaccel_clear_tag(skb);
135}
136
137/**
138 * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
139 * @skb: Pointer to socket buffer (packet)
140 *
141 * Receive path method for switches which send some packets as VLAN-tagged
142 * towards the CPU port (generally from VLAN-aware bridge ports) even when the
143 * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
144 * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
145 *
146 * As a side effect of this method, any VLAN tag from the skb head is moved
147 * to hwaccel.
148 */
149static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
150{
151 struct dsa_port *dp = dsa_user_to_port(skb->dev);
152 struct net_device *br = dsa_port_bridge_dev_get(dp);
153 u16 vid, proto;
154 int err;
155
156 /* software untagging for standalone ports not yet necessary */
157 if (!br)
158 return skb;
159
160 err = br_vlan_get_proto(br, &proto);
161 if (err)
162 return skb;
163
164 /* Move VLAN tag from data to hwaccel */
165 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
166 skb = skb_vlan_untag(skb);
167 if (!skb)
168 return NULL;
169 }
170
171 if (!skb_vlan_tag_present(skb))
172 return skb;
173
174 vid = skb_vlan_tag_get_id(skb);
175
176 if (br_vlan_enabled(br)) {
177 if (dp->ds->untag_vlan_aware_bridge_pvid)
178 dsa_software_untag_vlan_aware_bridge(skb, br, vid);
179 } else {
180 if (dp->ds->untag_bridge_pvid)
181 dsa_software_untag_vlan_unaware_bridge(skb, br, vid);
182 }
183
184 return skb;
185}
186
187/* For switches without hardware support for DSA tagging to be able
188 * to support termination through the bridge.
189 */
190static inline struct net_device *
191dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid)
192{
193 struct dsa_port *cpu_dp = conduit->dsa_ptr;
194 struct dsa_switch_tree *dst = cpu_dp->dst;
195 struct bridge_vlan_info vinfo;
196 struct net_device *user;
197 struct dsa_port *dp;
198 int err;
199
200 list_for_each_entry(dp, &dst->ports, list) {
201 if (dp->type != DSA_PORT_TYPE_USER)
202 continue;
203
204 if (!dp->bridge)
205 continue;
206
207 if (dp->stp_state != BR_STATE_LEARNING &&
208 dp->stp_state != BR_STATE_FORWARDING)
209 continue;
210
211 /* Since the bridge might learn this packet, keep the CPU port
212 * affinity with the port that will be used for the reply on
213 * xmit.
214 */
215 if (dp->cpu_dp != cpu_dp)
216 continue;
217
218 user = dp->user;
219
220 err = br_vlan_get_info_rcu(user, vid, &vinfo);
221 if (err)
222 continue;
223
224 return user;
225 }
226
227 return NULL;
228}
229
230/* If the ingress port offloads the bridge, we mark the frame as autonomously
231 * forwarded by hardware, so the software bridge doesn't forward in twice, back
232 * to us, because we already did. However, if we're in fallback mode and we do
233 * software bridging, we are not offloading it, therefore the dp->bridge
234 * pointer is not populated, and flooding needs to be done by software (we are
235 * effectively operating in standalone ports mode).
236 */
237static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
238{
239 struct dsa_port *dp = dsa_user_to_port(skb->dev);
240
241 skb->offload_fwd_mark = !!(dp->bridge);
242}
243
244/* Helper for removing DSA header tags from packets in the RX path.
245 * Must not be called before skb_pull(len).
246 * skb->data
247 * |
248 * v
249 * | | | | | | | | | | | | | | | | | | |
250 * +-----------------------+-----------------------+---------------+-------+
251 * | Destination MAC | Source MAC | DSA header | EType |
252 * +-----------------------+-----------------------+---------------+-------+
253 * | |
254 * <----- len -----> <----- len ----->
255 * |
256 * >>>>>>> v
257 * >>>>>>> | | | | | | | | | | | | | | |
258 * >>>>>>> +-----------------------+-----------------------+-------+
259 * >>>>>>> | Destination MAC | Source MAC | EType |
260 * +-----------------------+-----------------------+-------+
261 * ^
262 * |
263 * skb->data
264 */
265static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
266{
267 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
268}
269
270/* Helper for creating space for DSA header tags in TX path packets.
271 * Must not be called before skb_push(len).
272 *
273 * Before:
274 *
275 * <<<<<<< | | | | | | | | | | | | | | |
276 * ^ <<<<<<< +-----------------------+-----------------------+-------+
277 * | <<<<<<< | Destination MAC | Source MAC | EType |
278 * | +-----------------------+-----------------------+-------+
279 * <----- len ----->
280 * |
281 * |
282 * skb->data
283 *
284 * After:
285 *
286 * | | | | | | | | | | | | | | | | | | |
287 * +-----------------------+-----------------------+---------------+-------+
288 * | Destination MAC | Source MAC | DSA header | EType |
289 * +-----------------------+-----------------------+---------------+-------+
290 * ^ | |
291 * | <----- len ----->
292 * skb->data
293 */
294static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
295{
296 memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
297}
298
299/* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from
300 * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
301 * what the DSA conduit perceives as the EtherType (the beginning of the L3
302 * protocol). Since DSA EtherType header taggers treat the EtherType as part of
303 * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
304 * is located 2 bytes behind skb->data. Note that EtherType in this context
305 * means the first 2 bytes of the DSA header, not the encapsulated EtherType
306 * that will become visible after the DSA header is stripped.
307 */
308static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
309{
310 return skb->data - 2;
311}
312
313/* On TX, skb->data points to the MAC header, which means that EtherType
314 * header taggers start exactly where the EtherType is (the EtherType is
315 * treated as part of the DSA header).
316 */
317static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
318{
319 return skb->data + 2 * ETH_ALEN;
320}
321
322/* Create 2 modaliases per tagging protocol, one to auto-load the module
323 * given the ID reported by get_tag_protocol(), and the other by name.
324 */
325#define DSA_TAG_DRIVER_ALIAS "dsa_tag:"
326#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto, __name) \
327 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __name); \
328 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS "id-" \
329 __stringify(__proto##_VALUE))
330
331void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
332 unsigned int count,
333 struct module *owner);
334void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
335 unsigned int count);
336
337#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
338static int __init dsa_tag_driver_module_init(void) \
339{ \
340 dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
341 THIS_MODULE); \
342 return 0; \
343} \
344module_init(dsa_tag_driver_module_init); \
345 \
346static void __exit dsa_tag_driver_module_exit(void) \
347{ \
348 dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
349} \
350module_exit(dsa_tag_driver_module_exit)
351
352/**
353 * module_dsa_tag_drivers() - Helper macro for registering DSA tag
354 * drivers
355 * @__ops_array: Array of tag driver structures
356 *
357 * Helper macro for DSA tag drivers which do not do anything special
358 * in module init/exit. Each module may only use this macro once, and
359 * calling it replaces module_init() and module_exit().
360 */
361#define module_dsa_tag_drivers(__ops_array) \
362dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
363
364#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
365
366/* Create a static structure we can build a linked list of dsa_tag
367 * drivers
368 */
369#define DSA_TAG_DRIVER(__ops) \
370static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
371 .ops = &__ops, \
372}
373
374/**
375 * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
376 * driver
377 * @__ops: Single tag driver structures
378 *
379 * Helper macro for DSA tag drivers which do not do anything special
380 * in module init/exit. Each module may only use this macro once, and
381 * calling it replaces module_init() and module_exit().
382 */
383#define module_dsa_tag_driver(__ops) \
384DSA_TAG_DRIVER(__ops); \
385 \
386static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
387 &DSA_TAG_DRIVER_NAME(__ops) \
388}; \
389module_dsa_tag_drivers(dsa_tag_driver_array)
390
391#endif
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#ifndef __DSA_TAG_H
4#define __DSA_TAG_H
5
6#include <linux/if_vlan.h>
7#include <linux/list.h>
8#include <linux/types.h>
9#include <net/dsa.h>
10
11#include "port.h"
12#include "user.h"
13
14struct dsa_tag_driver {
15 const struct dsa_device_ops *ops;
16 struct list_head list;
17 struct module *owner;
18};
19
20extern struct packet_type dsa_pack_type;
21
22const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol);
23const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name);
24void dsa_tag_driver_put(const struct dsa_device_ops *ops);
25const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
26
27static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
28{
29 return ops->needed_headroom + ops->needed_tailroom;
30}
31
32static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
33 int device, int port)
34{
35 struct dsa_port *cpu_dp = dev->dsa_ptr;
36 struct dsa_switch_tree *dst = cpu_dp->dst;
37 struct dsa_port *dp;
38
39 list_for_each_entry(dp, &dst->ports, list)
40 if (dp->ds->index == device && dp->index == port &&
41 dp->type == DSA_PORT_TYPE_USER)
42 return dp->user;
43
44 return NULL;
45}
46
47/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
48 * frames as untagged, since the bridge will not untag them.
49 */
50static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
51{
52 struct dsa_port *dp = dsa_user_to_port(skb->dev);
53 struct net_device *br = dsa_port_bridge_dev_get(dp);
54 struct net_device *dev = skb->dev;
55 struct net_device *upper_dev;
56 u16 vid, pvid, proto;
57 int err;
58
59 if (!br || br_vlan_enabled(br))
60 return skb;
61
62 err = br_vlan_get_proto(br, &proto);
63 if (err)
64 return skb;
65
66 /* Move VLAN tag from data to hwaccel */
67 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
68 skb = skb_vlan_untag(skb);
69 if (!skb)
70 return NULL;
71 }
72
73 if (!skb_vlan_tag_present(skb))
74 return skb;
75
76 vid = skb_vlan_tag_get_id(skb);
77
78 /* We already run under an RCU read-side critical section since
79 * we are called from netif_receive_skb_list_internal().
80 */
81 err = br_vlan_get_pvid_rcu(dev, &pvid);
82 if (err)
83 return skb;
84
85 if (vid != pvid)
86 return skb;
87
88 /* The sad part about attempting to untag from DSA is that we
89 * don't know, unless we check, if the skb will end up in
90 * the bridge's data path - br_allowed_ingress() - or not.
91 * For example, there might be an 8021q upper for the
92 * default_pvid of the bridge, which will steal VLAN-tagged traffic
93 * from the bridge's data path. This is a configuration that DSA
94 * supports because vlan_filtering is 0. In that case, we should
95 * definitely keep the tag, to make sure it keeps working.
96 */
97 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
98 if (upper_dev)
99 return skb;
100
101 __vlan_hwaccel_clear_tag(skb);
102
103 return skb;
104}
105
106/* For switches without hardware support for DSA tagging to be able
107 * to support termination through the bridge.
108 */
109static inline struct net_device *
110dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid)
111{
112 struct dsa_port *cpu_dp = conduit->dsa_ptr;
113 struct dsa_switch_tree *dst = cpu_dp->dst;
114 struct bridge_vlan_info vinfo;
115 struct net_device *user;
116 struct dsa_port *dp;
117 int err;
118
119 list_for_each_entry(dp, &dst->ports, list) {
120 if (dp->type != DSA_PORT_TYPE_USER)
121 continue;
122
123 if (!dp->bridge)
124 continue;
125
126 if (dp->stp_state != BR_STATE_LEARNING &&
127 dp->stp_state != BR_STATE_FORWARDING)
128 continue;
129
130 /* Since the bridge might learn this packet, keep the CPU port
131 * affinity with the port that will be used for the reply on
132 * xmit.
133 */
134 if (dp->cpu_dp != cpu_dp)
135 continue;
136
137 user = dp->user;
138
139 err = br_vlan_get_info_rcu(user, vid, &vinfo);
140 if (err)
141 continue;
142
143 return user;
144 }
145
146 return NULL;
147}
148
149/* If the ingress port offloads the bridge, we mark the frame as autonomously
150 * forwarded by hardware, so the software bridge doesn't forward in twice, back
151 * to us, because we already did. However, if we're in fallback mode and we do
152 * software bridging, we are not offloading it, therefore the dp->bridge
153 * pointer is not populated, and flooding needs to be done by software (we are
154 * effectively operating in standalone ports mode).
155 */
156static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
157{
158 struct dsa_port *dp = dsa_user_to_port(skb->dev);
159
160 skb->offload_fwd_mark = !!(dp->bridge);
161}
162
163/* Helper for removing DSA header tags from packets in the RX path.
164 * Must not be called before skb_pull(len).
165 * skb->data
166 * |
167 * v
168 * | | | | | | | | | | | | | | | | | | |
169 * +-----------------------+-----------------------+---------------+-------+
170 * | Destination MAC | Source MAC | DSA header | EType |
171 * +-----------------------+-----------------------+---------------+-------+
172 * | |
173 * <----- len -----> <----- len ----->
174 * |
175 * >>>>>>> v
176 * >>>>>>> | | | | | | | | | | | | | | |
177 * >>>>>>> +-----------------------+-----------------------+-------+
178 * >>>>>>> | Destination MAC | Source MAC | EType |
179 * +-----------------------+-----------------------+-------+
180 * ^
181 * |
182 * skb->data
183 */
184static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
185{
186 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
187}
188
189/* Helper for creating space for DSA header tags in TX path packets.
190 * Must not be called before skb_push(len).
191 *
192 * Before:
193 *
194 * <<<<<<< | | | | | | | | | | | | | | |
195 * ^ <<<<<<< +-----------------------+-----------------------+-------+
196 * | <<<<<<< | Destination MAC | Source MAC | EType |
197 * | +-----------------------+-----------------------+-------+
198 * <----- len ----->
199 * |
200 * |
201 * skb->data
202 *
203 * After:
204 *
205 * | | | | | | | | | | | | | | | | | | |
206 * +-----------------------+-----------------------+---------------+-------+
207 * | Destination MAC | Source MAC | DSA header | EType |
208 * +-----------------------+-----------------------+---------------+-------+
209 * ^ | |
210 * | <----- len ----->
211 * skb->data
212 */
213static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
214{
215 memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
216}
217
218/* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from
219 * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
220 * what the DSA conduit perceives as the EtherType (the beginning of the L3
221 * protocol). Since DSA EtherType header taggers treat the EtherType as part of
222 * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
223 * is located 2 bytes behind skb->data. Note that EtherType in this context
224 * means the first 2 bytes of the DSA header, not the encapsulated EtherType
225 * that will become visible after the DSA header is stripped.
226 */
227static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
228{
229 return skb->data - 2;
230}
231
232/* On TX, skb->data points to the MAC header, which means that EtherType
233 * header taggers start exactly where the EtherType is (the EtherType is
234 * treated as part of the DSA header).
235 */
236static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
237{
238 return skb->data + 2 * ETH_ALEN;
239}
240
241/* Create 2 modaliases per tagging protocol, one to auto-load the module
242 * given the ID reported by get_tag_protocol(), and the other by name.
243 */
244#define DSA_TAG_DRIVER_ALIAS "dsa_tag:"
245#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto, __name) \
246 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __name); \
247 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS "id-" \
248 __stringify(__proto##_VALUE))
249
250void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
251 unsigned int count,
252 struct module *owner);
253void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
254 unsigned int count);
255
256#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
257static int __init dsa_tag_driver_module_init(void) \
258{ \
259 dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
260 THIS_MODULE); \
261 return 0; \
262} \
263module_init(dsa_tag_driver_module_init); \
264 \
265static void __exit dsa_tag_driver_module_exit(void) \
266{ \
267 dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
268} \
269module_exit(dsa_tag_driver_module_exit)
270
271/**
272 * module_dsa_tag_drivers() - Helper macro for registering DSA tag
273 * drivers
274 * @__ops_array: Array of tag driver structures
275 *
276 * Helper macro for DSA tag drivers which do not do anything special
277 * in module init/exit. Each module may only use this macro once, and
278 * calling it replaces module_init() and module_exit().
279 */
280#define module_dsa_tag_drivers(__ops_array) \
281dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
282
283#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
284
285/* Create a static structure we can build a linked list of dsa_tag
286 * drivers
287 */
288#define DSA_TAG_DRIVER(__ops) \
289static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
290 .ops = &__ops, \
291}
292
293/**
294 * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
295 * driver
296 * @__ops: Single tag driver structures
297 *
298 * Helper macro for DSA tag drivers which do not do anything special
299 * in module init/exit. Each module may only use this macro once, and
300 * calling it replaces module_init() and module_exit().
301 */
302#define module_dsa_tag_driver(__ops) \
303DSA_TAG_DRIVER(__ops); \
304 \
305static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
306 &DSA_TAG_DRIVER_NAME(__ops) \
307}; \
308module_dsa_tag_drivers(dsa_tag_driver_array)
309
310#endif