Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Regular and Ethertype DSA tagging
  4 * Copyright (c) 2008-2009 Marvell Semiconductor
  5 *
  6 * Regular DSA
  7 * -----------
  8
  9 * For untagged (in 802.1Q terms) packets, the switch will splice in
 10 * the tag between the SA and the ethertype of the original
 11 * packet. Tagged frames will instead have their outermost .1Q tag
 12 * converted to a DSA tag. It expects the same layout when receiving
 13 * packets from the CPU.
 14 *
 15 * Example:
 16 *
 17 *     .----.----.----.---------
 18 * Pu: | DA | SA | ET | Payload ...
 19 *     '----'----'----'---------
 20 *       6    6    2       N
 21 *     .----.----.--------.-----.----.---------
 22 * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
 23 *     '----'----'--------'-----'----'---------
 24 *       6    6       2      2    2       N
 25 *     .----.----.-----.----.---------
 26 * Pd: | DA | SA | DSA | ET | Payload ...
 27 *     '----'----'-----'----'---------
 28 *       6    6     4    2       N
 29 *
 30 * No matter if a packet is received untagged (Pu) or tagged (Pt),
 31 * they will both have the same layout (Pd) when they are sent to the
 32 * CPU. This is done by ignoring 802.3, replacing the ethertype field
 33 * with more metadata, among which is a bit to signal if the original
 34 * packet was tagged or not.
 35 *
 36 * Ethertype DSA
 37 * -------------
 38 * Uses the exact same tag format as regular DSA, but also includes a
 39 * proper ethertype field (which the mv88e6xxx driver sets to
 40 * ETH_P_EDSA/0xdada) followed by two zero bytes:
 41 *
 42 * .----.----.--------.--------.-----.----.---------
 43 * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
 44 * '----'----'--------'--------'-----'----'---------
 45 *   6    6       2        2      4    2       N
 46 */
 47
 48#include <linux/dsa/mv88e6xxx.h>
 49#include <linux/etherdevice.h>
 50#include <linux/list.h>
 51#include <linux/slab.h>
 52
 53#include "tag.h"
 54
 55#define DSA_NAME	"dsa"
 56#define EDSA_NAME	"edsa"
 57
 58#define DSA_HLEN	4
 59
 60/**
 61 * enum dsa_cmd - DSA Command
 62 * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
 63 *     the CPU port. This is needed to implement control protocols,
 64 *     e.g. STP and LLDP, that must not allow those control packets to
 65 *     be switched according to the normal rules.
 66 * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
 67 *     port, ignoring all the barriers that the switch normally
 68 *     enforces (VLANs, STP port states etc.). No source address
 69 *     learning takes place. "sudo send packet"
 70 * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
 71 *     user configured ingress or egress monitor criteria. These are
 72 *     forwarded by the switch tree to the user configured ingress or
 73 *     egress monitor port, which can be set to the CPU port or a
 74 *     regular port. If the destination is a regular port, the tag
 75 *     will be removed before egressing the port. If the destination
 76 *     is the CPU port, the tag will not be removed.
 77 * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
 78 *     through the switch tree, including the flows that are directed
 79 *     towards the CPU. Its device/port tuple encodes the original
 80 *     source port on which the packet ingressed. It can also be used
 81 *     on transmit by the CPU to defer the forwarding decision to the
 82 *     hardware, based on the current config of PVT/VTU/ATU
 83 *     etc. Source address learning takes places if enabled on the
 84 *     receiving DSA/CPU port.
 85 */
 86enum dsa_cmd {
 87	DSA_CMD_TO_CPU     = 0,
 88	DSA_CMD_FROM_CPU   = 1,
 89	DSA_CMD_TO_SNIFFER = 2,
 90	DSA_CMD_FORWARD    = 3
 91};
 92
 93/**
 94 * enum dsa_code - TO_CPU Code
 95 *
 96 * @DSA_CODE_MGMT_TRAP: DA was classified as a management
 97 *     address. Typical examples include STP BPDUs and LLDP.
 98 * @DSA_CODE_FRAME2REG: Response to a "remote management" request.
 99 * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
100 * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
101 *     the device. Typical examples are matching on DA/SA/VID and DHCP
102 *     snooping.
103 * @DSA_CODE_ARP_MIRROR: The name says it all really.
104 * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
105 *     particular policy was set to trigger a mirror instead of a
106 *     trap.
107 * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
108 * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
109 *
110 * A 3-bit code is used to relay why a particular frame was sent to
111 * the CPU. We only use this to determine if the packet was mirrored
112 * or trapped, i.e. whether the packet has been forwarded by hardware
113 * or not.
114 *
115 * This is the superset of all possible codes. Any particular device
116 * may only implement a subset.
117 */
118enum dsa_code {
119	DSA_CODE_MGMT_TRAP     = 0,
120	DSA_CODE_FRAME2REG     = 1,
121	DSA_CODE_IGMP_MLD_TRAP = 2,
122	DSA_CODE_POLICY_TRAP   = 3,
123	DSA_CODE_ARP_MIRROR    = 4,
124	DSA_CODE_POLICY_MIRROR = 5,
125	DSA_CODE_RESERVED_6    = 6,
126	DSA_CODE_RESERVED_7    = 7
127};
128
129static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
130				   u8 extra)
131{
132	struct dsa_port *dp = dsa_user_to_port(dev);
133	struct net_device *br_dev;
134	u8 tag_dev, tag_port;
135	enum dsa_cmd cmd;
136	u8 *dsa_header;
137
138	if (skb->offload_fwd_mark) {
139		unsigned int bridge_num = dsa_port_bridge_num_get(dp);
140		struct dsa_switch_tree *dst = dp->ds->dst;
141
142		cmd = DSA_CMD_FORWARD;
143
144		/* When offloading forwarding for a bridge, inject FORWARD
145		 * packets on behalf of a virtual switch device with an index
146		 * past the physical switches.
147		 */
148		tag_dev = dst->last_switch + bridge_num;
149		tag_port = 0;
150	} else {
151		cmd = DSA_CMD_FROM_CPU;
152		tag_dev = dp->ds->index;
153		tag_port = dp->index;
154	}
155
156	br_dev = dsa_port_bridge_dev_get(dp);
157
158	/* If frame is already 802.1Q tagged, we can convert it to a DSA
159	 * tag (avoiding a memmove), but only if the port is standalone
160	 * (in which case we always send FROM_CPU) or if the port's
161	 * bridge has VLAN filtering enabled (in which case the CPU port
162	 * will be a member of the VLAN).
163	 */
164	if (skb->protocol == htons(ETH_P_8021Q) &&
165	    (!br_dev || br_vlan_enabled(br_dev))) {
166		if (extra) {
167			skb_push(skb, extra);
168			dsa_alloc_etype_header(skb, extra);
169		}
170
171		/* Construct tagged DSA tag from 802.1Q tag. */
172		dsa_header = dsa_etype_header_pos_tx(skb) + extra;
173		dsa_header[0] = (cmd << 6) | 0x20 | tag_dev;
174		dsa_header[1] = tag_port << 3;
175
176		/* Move CFI field from byte 2 to byte 1. */
177		if (dsa_header[2] & 0x10) {
178			dsa_header[1] |= 0x01;
179			dsa_header[2] &= ~0x10;
180		}
181	} else {
182		u16 vid;
183
184		vid = br_dev ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
185
186		skb_push(skb, DSA_HLEN + extra);
187		dsa_alloc_etype_header(skb, DSA_HLEN + extra);
188
189		/* Construct DSA header from untagged frame. */
190		dsa_header = dsa_etype_header_pos_tx(skb) + extra;
191
192		dsa_header[0] = (cmd << 6) | tag_dev;
193		dsa_header[1] = tag_port << 3;
194		dsa_header[2] = vid >> 8;
195		dsa_header[3] = vid & 0xff;
 
 
196	}
197
198	return skb;
199}
200
201static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
202				  u8 extra)
203{
204	bool trap = false, trunk = false;
205	int source_device, source_port;
 
206	enum dsa_code code;
207	enum dsa_cmd cmd;
208	u8 *dsa_header;
209
210	/* The ethertype field is part of the DSA header. */
211	dsa_header = dsa_etype_header_pos_rx(skb);
212
213	cmd = dsa_header[0] >> 6;
214	switch (cmd) {
215	case DSA_CMD_FORWARD:
 
 
216		trunk = !!(dsa_header[1] & 4);
217		break;
218
219	case DSA_CMD_TO_CPU:
220		code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
221
222		switch (code) {
223		case DSA_CODE_FRAME2REG:
224			/* Remote management is not implemented yet,
225			 * drop.
226			 */
227			return NULL;
228		case DSA_CODE_ARP_MIRROR:
229		case DSA_CODE_POLICY_MIRROR:
230			/* Mark mirrored packets to notify any upper
231			 * device (like a bridge) that forwarding has
232			 * already been done by hardware.
233			 */
 
234			break;
235		case DSA_CODE_MGMT_TRAP:
236		case DSA_CODE_IGMP_MLD_TRAP:
237		case DSA_CODE_POLICY_TRAP:
238			/* Traps have, by definition, not been
239			 * forwarded by hardware, so don't mark them.
240			 */
241			trap = true;
242			break;
243		default:
244			/* Reserved code, this could be anything. Drop
245			 * seems like the safest option.
246			 */
247			return NULL;
248		}
249
250		break;
251
252	default:
253		return NULL;
254	}
255
256	source_device = dsa_header[0] & 0x1f;
257	source_port = (dsa_header[1] >> 3) & 0x1f;
258
259	if (trunk) {
260		struct dsa_port *cpu_dp = dev->dsa_ptr;
261		struct dsa_lag *lag;
262
263		/* The exact source port is not available in the tag,
264		 * so we inject the frame directly on the upper
265		 * team/bond.
266		 */
267		lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
268		skb->dev = lag ? lag->dev : NULL;
269	} else {
270		skb->dev = dsa_conduit_find_user(dev, source_device,
271						 source_port);
272	}
273
274	if (!skb->dev)
275		return NULL;
276
277	/* When using LAG offload, skb->dev is not a DSA user interface,
278	 * so we cannot call dsa_default_offload_fwd_mark and we need to
279	 * special-case it.
280	 */
281	if (trunk)
282		skb->offload_fwd_mark = true;
283	else if (!trap)
284		dsa_default_offload_fwd_mark(skb);
285
286	/* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
287	 * tag, and delete the ethertype (extra) if applicable. If the
288	 * 'tagged' bit is cleared; delete the DSA tag, and ethertype
289	 * if applicable.
290	 */
291	if (dsa_header[0] & 0x20) {
292		u8 new_header[4];
293
294		/* Insert 802.1Q ethertype and copy the VLAN-related
295		 * fields, but clear the bit that will hold CFI (since
296		 * DSA uses that bit location for another purpose).
297		 */
298		new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
299		new_header[1] = ETH_P_8021Q & 0xff;
300		new_header[2] = dsa_header[2] & ~0x10;
301		new_header[3] = dsa_header[3];
302
303		/* Move CFI bit from its place in the DSA header to
304		 * its 802.1Q-designated place.
305		 */
306		if (dsa_header[1] & 0x01)
307			new_header[2] |= 0x10;
308
309		/* Update packet checksum if skb is CHECKSUM_COMPLETE. */
310		if (skb->ip_summed == CHECKSUM_COMPLETE) {
311			__wsum c = skb->csum;
312			c = csum_add(c, csum_partial(new_header + 2, 2, 0));
313			c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
314			skb->csum = c;
315		}
316
317		memcpy(dsa_header, new_header, DSA_HLEN);
318
319		if (extra)
320			dsa_strip_etype_header(skb, extra);
 
 
321	} else {
322		skb_pull_rcsum(skb, DSA_HLEN);
323		dsa_strip_etype_header(skb, DSA_HLEN + extra);
 
 
324	}
325
326	return skb;
327}
328
329#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
330
331static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
332{
333	return dsa_xmit_ll(skb, dev, 0);
334}
335
336static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev)
 
337{
338	if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
339		return NULL;
340
341	return dsa_rcv_ll(skb, dev, 0);
342}
343
344static const struct dsa_device_ops dsa_netdev_ops = {
345	.name	  = DSA_NAME,
346	.proto	  = DSA_TAG_PROTO_DSA,
347	.xmit	  = dsa_xmit,
348	.rcv	  = dsa_rcv,
349	.needed_headroom = DSA_HLEN,
350};
351
352DSA_TAG_DRIVER(dsa_netdev_ops);
353MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA, DSA_NAME);
354#endif	/* CONFIG_NET_DSA_TAG_DSA */
355
356#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
357
358#define EDSA_HLEN 8
359
360static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
361{
362	u8 *edsa_header;
363
364	skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
365	if (!skb)
366		return NULL;
367
368	edsa_header = dsa_etype_header_pos_tx(skb);
369	edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
370	edsa_header[1] = ETH_P_EDSA & 0xff;
371	edsa_header[2] = 0x00;
372	edsa_header[3] = 0x00;
373	return skb;
374}
375
376static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev)
 
377{
378	if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
379		return NULL;
380
381	skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
382
383	return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
384}
385
386static const struct dsa_device_ops edsa_netdev_ops = {
387	.name	  = EDSA_NAME,
388	.proto	  = DSA_TAG_PROTO_EDSA,
389	.xmit	  = edsa_xmit,
390	.rcv	  = edsa_rcv,
391	.needed_headroom = EDSA_HLEN,
392};
393
394DSA_TAG_DRIVER(edsa_netdev_ops);
395MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA, EDSA_NAME);
396#endif	/* CONFIG_NET_DSA_TAG_EDSA */
397
398static struct dsa_tag_driver *dsa_tag_drivers[] = {
399#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
400	&DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
401#endif
402#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
403	&DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
404#endif
405};
406
407module_dsa_tag_drivers(dsa_tag_drivers);
408
409MODULE_DESCRIPTION("DSA tag driver for Marvell switches using DSA headers");
410MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Regular and Ethertype DSA tagging
  4 * Copyright (c) 2008-2009 Marvell Semiconductor
  5 *
  6 * Regular DSA
  7 * -----------
  8
  9 * For untagged (in 802.1Q terms) packets, the switch will splice in
 10 * the tag between the SA and the ethertype of the original
 11 * packet. Tagged frames will instead have their outermost .1Q tag
 12 * converted to a DSA tag. It expects the same layout when receiving
 13 * packets from the CPU.
 14 *
 15 * Example:
 16 *
 17 *     .----.----.----.---------
 18 * Pu: | DA | SA | ET | Payload ...
 19 *     '----'----'----'---------
 20 *       6    6    2       N
 21 *     .----.----.--------.-----.----.---------
 22 * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
 23 *     '----'----'--------'-----'----'---------
 24 *       6    6       2      2    2       N
 25 *     .----.----.-----.----.---------
 26 * Pd: | DA | SA | DSA | ET | Payload ...
 27 *     '----'----'-----'----'---------
 28 *       6    6     4    2       N
 29 *
 30 * No matter if a packet is received untagged (Pu) or tagged (Pt),
 31 * they will both have the same layout (Pd) when they are sent to the
 32 * CPU. This is done by ignoring 802.3, replacing the ethertype field
 33 * with more metadata, among which is a bit to signal if the original
 34 * packet was tagged or not.
 35 *
 36 * Ethertype DSA
 37 * -------------
 38 * Uses the exact same tag format as regular DSA, but also includes a
 39 * proper ethertype field (which the mv88e6xxx driver sets to
 40 * ETH_P_EDSA/0xdada) followed by two zero bytes:
 41 *
 42 * .----.----.--------.--------.-----.----.---------
 43 * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
 44 * '----'----'--------'--------'-----'----'---------
 45 *   6    6       2        2      4    2       N
 46 */
 47
 
 48#include <linux/etherdevice.h>
 49#include <linux/list.h>
 50#include <linux/slab.h>
 51
 52#include "dsa_priv.h"
 
 
 
 53
 54#define DSA_HLEN	4
 55
 56/**
 57 * enum dsa_cmd - DSA Command
 58 * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
 59 *     the CPU port. This is needed to implement control protocols,
 60 *     e.g. STP and LLDP, that must not allow those control packets to
 61 *     be switched according to the normal rules.
 62 * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
 63 *     port, ignoring all the barriers that the switch normally
 64 *     enforces (VLANs, STP port states etc.). No source address
 65 *     learning takes place. "sudo send packet"
 66 * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
 67 *     user configured ingress or egress monitor criteria. These are
 68 *     forwarded by the switch tree to the user configured ingress or
 69 *     egress monitor port, which can be set to the CPU port or a
 70 *     regular port. If the destination is a regular port, the tag
 71 *     will be removed before egressing the port. If the destination
 72 *     is the CPU port, the tag will not be removed.
 73 * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
 74 *     through the switch tree, including the flows that are directed
 75 *     towards the CPU. Its device/port tuple encodes the original
 76 *     source port on which the packet ingressed. It can also be used
 77 *     on transmit by the CPU to defer the forwarding decision to the
 78 *     hardware, based on the current config of PVT/VTU/ATU
 79 *     etc. Source address learning takes places if enabled on the
 80 *     receiving DSA/CPU port.
 81 */
 82enum dsa_cmd {
 83	DSA_CMD_TO_CPU     = 0,
 84	DSA_CMD_FROM_CPU   = 1,
 85	DSA_CMD_TO_SNIFFER = 2,
 86	DSA_CMD_FORWARD    = 3
 87};
 88
 89/**
 90 * enum dsa_code - TO_CPU Code
 91 *
 92 * @DSA_CODE_MGMT_TRAP: DA was classified as a management
 93 *     address. Typical examples include STP BPDUs and LLDP.
 94 * @DSA_CODE_FRAME2REG: Response to a "remote management" request.
 95 * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
 96 * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
 97 *     the device. Typical examples are matching on DA/SA/VID and DHCP
 98 *     snooping.
 99 * @DSA_CODE_ARP_MIRROR: The name says it all really.
100 * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
101 *     particular policy was set to trigger a mirror instead of a
102 *     trap.
103 * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
104 * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
105 *
106 * A 3-bit code is used to relay why a particular frame was sent to
107 * the CPU. We only use this to determine if the packet was mirrored
108 * or trapped, i.e. whether the packet has been forwarded by hardware
109 * or not.
110 *
111 * This is the superset of all possible codes. Any particular device
112 * may only implement a subset.
113 */
114enum dsa_code {
115	DSA_CODE_MGMT_TRAP     = 0,
116	DSA_CODE_FRAME2REG     = 1,
117	DSA_CODE_IGMP_MLD_TRAP = 2,
118	DSA_CODE_POLICY_TRAP   = 3,
119	DSA_CODE_ARP_MIRROR    = 4,
120	DSA_CODE_POLICY_MIRROR = 5,
121	DSA_CODE_RESERVED_6    = 6,
122	DSA_CODE_RESERVED_7    = 7
123};
124
125static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
126				   u8 extra)
127{
128	struct dsa_port *dp = dsa_slave_to_port(dev);
 
 
 
129	u8 *dsa_header;
130
131	if (skb->protocol == htons(ETH_P_8021Q)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132		if (extra) {
133			skb_push(skb, extra);
134			memmove(skb->data, skb->data + extra, 2 * ETH_ALEN);
135		}
136
137		/* Construct tagged FROM_CPU DSA tag from 802.1Q tag. */
138		dsa_header = skb->data + 2 * ETH_ALEN + extra;
139		dsa_header[0] = (DSA_CMD_FROM_CPU << 6) | 0x20 | dp->ds->index;
140		dsa_header[1] = dp->index << 3;
141
142		/* Move CFI field from byte 2 to byte 1. */
143		if (dsa_header[2] & 0x10) {
144			dsa_header[1] |= 0x01;
145			dsa_header[2] &= ~0x10;
146		}
147	} else {
 
 
 
 
148		skb_push(skb, DSA_HLEN + extra);
149		memmove(skb->data, skb->data + DSA_HLEN + extra, 2 * ETH_ALEN);
 
 
 
150
151		/* Construct untagged FROM_CPU DSA tag. */
152		dsa_header = skb->data + 2 * ETH_ALEN + extra;
153		dsa_header[0] = (DSA_CMD_FROM_CPU << 6) | dp->ds->index;
154		dsa_header[1] = dp->index << 3;
155		dsa_header[2] = 0x00;
156		dsa_header[3] = 0x00;
157	}
158
159	return skb;
160}
161
162static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
163				  u8 extra)
164{
 
165	int source_device, source_port;
166	bool trunk = false;
167	enum dsa_code code;
168	enum dsa_cmd cmd;
169	u8 *dsa_header;
170
171	/* The ethertype field is part of the DSA header. */
172	dsa_header = skb->data - 2;
173
174	cmd = dsa_header[0] >> 6;
175	switch (cmd) {
176	case DSA_CMD_FORWARD:
177		skb->offload_fwd_mark = 1;
178
179		trunk = !!(dsa_header[1] & 4);
180		break;
181
182	case DSA_CMD_TO_CPU:
183		code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
184
185		switch (code) {
186		case DSA_CODE_FRAME2REG:
187			/* Remote management is not implemented yet,
188			 * drop.
189			 */
190			return NULL;
191		case DSA_CODE_ARP_MIRROR:
192		case DSA_CODE_POLICY_MIRROR:
193			/* Mark mirrored packets to notify any upper
194			 * device (like a bridge) that forwarding has
195			 * already been done by hardware.
196			 */
197			skb->offload_fwd_mark = 1;
198			break;
199		case DSA_CODE_MGMT_TRAP:
200		case DSA_CODE_IGMP_MLD_TRAP:
201		case DSA_CODE_POLICY_TRAP:
202			/* Traps have, by definition, not been
203			 * forwarded by hardware, so don't mark them.
204			 */
 
205			break;
206		default:
207			/* Reserved code, this could be anything. Drop
208			 * seems like the safest option.
209			 */
210			return NULL;
211		}
212
213		break;
214
215	default:
216		return NULL;
217	}
218
219	source_device = dsa_header[0] & 0x1f;
220	source_port = (dsa_header[1] >> 3) & 0x1f;
221
222	if (trunk) {
223		struct dsa_port *cpu_dp = dev->dsa_ptr;
 
224
225		/* The exact source port is not available in the tag,
226		 * so we inject the frame directly on the upper
227		 * team/bond.
228		 */
229		skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
 
230	} else {
231		skb->dev = dsa_master_find_slave(dev, source_device,
232						 source_port);
233	}
234
235	if (!skb->dev)
236		return NULL;
237
 
 
 
 
 
 
 
 
 
238	/* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
239	 * tag, and delete the ethertype (extra) if applicable. If the
240	 * 'tagged' bit is cleared; delete the DSA tag, and ethertype
241	 * if applicable.
242	 */
243	if (dsa_header[0] & 0x20) {
244		u8 new_header[4];
245
246		/* Insert 802.1Q ethertype and copy the VLAN-related
247		 * fields, but clear the bit that will hold CFI (since
248		 * DSA uses that bit location for another purpose).
249		 */
250		new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
251		new_header[1] = ETH_P_8021Q & 0xff;
252		new_header[2] = dsa_header[2] & ~0x10;
253		new_header[3] = dsa_header[3];
254
255		/* Move CFI bit from its place in the DSA header to
256		 * its 802.1Q-designated place.
257		 */
258		if (dsa_header[1] & 0x01)
259			new_header[2] |= 0x10;
260
261		/* Update packet checksum if skb is CHECKSUM_COMPLETE. */
262		if (skb->ip_summed == CHECKSUM_COMPLETE) {
263			__wsum c = skb->csum;
264			c = csum_add(c, csum_partial(new_header + 2, 2, 0));
265			c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
266			skb->csum = c;
267		}
268
269		memcpy(dsa_header, new_header, DSA_HLEN);
270
271		if (extra)
272			memmove(skb->data - ETH_HLEN,
273				skb->data - ETH_HLEN - extra,
274				2 * ETH_ALEN);
275	} else {
276		skb_pull_rcsum(skb, DSA_HLEN);
277		memmove(skb->data - ETH_HLEN,
278			skb->data - ETH_HLEN - DSA_HLEN - extra,
279			2 * ETH_ALEN);
280	}
281
282	return skb;
283}
284
285#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
286
287static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
288{
289	return dsa_xmit_ll(skb, dev, 0);
290}
291
292static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
293			       struct packet_type *pt)
294{
295	if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
296		return NULL;
297
298	return dsa_rcv_ll(skb, dev, 0);
299}
300
301static const struct dsa_device_ops dsa_netdev_ops = {
302	.name	  = "dsa",
303	.proto	  = DSA_TAG_PROTO_DSA,
304	.xmit	  = dsa_xmit,
305	.rcv	  = dsa_rcv,
306	.needed_headroom = DSA_HLEN,
307};
308
309DSA_TAG_DRIVER(dsa_netdev_ops);
310MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
311#endif	/* CONFIG_NET_DSA_TAG_DSA */
312
313#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
314
315#define EDSA_HLEN 8
316
317static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
318{
319	u8 *edsa_header;
320
321	skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
322	if (!skb)
323		return NULL;
324
325	edsa_header = skb->data + 2 * ETH_ALEN;
326	edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
327	edsa_header[1] = ETH_P_EDSA & 0xff;
328	edsa_header[2] = 0x00;
329	edsa_header[3] = 0x00;
330	return skb;
331}
332
333static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
334				struct packet_type *pt)
335{
336	if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
337		return NULL;
338
339	skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
340
341	return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
342}
343
344static const struct dsa_device_ops edsa_netdev_ops = {
345	.name	  = "edsa",
346	.proto	  = DSA_TAG_PROTO_EDSA,
347	.xmit	  = edsa_xmit,
348	.rcv	  = edsa_rcv,
349	.needed_headroom = EDSA_HLEN,
350};
351
352DSA_TAG_DRIVER(edsa_netdev_ops);
353MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
354#endif	/* CONFIG_NET_DSA_TAG_EDSA */
355
356static struct dsa_tag_driver *dsa_tag_drivers[] = {
357#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
358	&DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
359#endif
360#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
361	&DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
362#endif
363};
364
365module_dsa_tag_drivers(dsa_tag_drivers);
366
 
367MODULE_LICENSE("GPL");