Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * DPAA2 Ethernet Switch declarations
  4 *
  5 * Copyright 2014-2016 Freescale Semiconductor Inc.
  6 * Copyright 2017-2021 NXP
  7 *
  8 */
  9
 10#ifndef __ETHSW_H
 11#define __ETHSW_H
 12
 13#include <linux/netdevice.h>
 14#include <linux/etherdevice.h>
 15#include <linux/rtnetlink.h>
 16#include <linux/if_vlan.h>
 17#include <uapi/linux/if_bridge.h>
 18#include <net/switchdev.h>
 19#include <linux/if_bridge.h>
 20#include <linux/fsl/mc.h>
 21#include <net/pkt_cls.h>
 22#include <soc/fsl/dpaa2-io.h>
 23
 24#include "dpaa2-mac.h"
 25#include "dpsw.h"
 26
 27/* Number of IRQs supported */
 28#define DPSW_IRQ_NUM	2
 29
 30/* Port is member of VLAN */
 31#define ETHSW_VLAN_MEMBER	1
 32/* VLAN to be treated as untagged on egress */
 33#define ETHSW_VLAN_UNTAGGED	2
 34/* Untagged frames will be assigned to this VLAN */
 35#define ETHSW_VLAN_PVID		4
 36/* VLAN configured on the switch */
 37#define ETHSW_VLAN_GLOBAL	8
 38
 39/* Maximum Frame Length supported by HW (currently 10k) */
 40#define DPAA2_MFL		(10 * 1024)
 41#define ETHSW_MAX_FRAME_LENGTH	(DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
 42#define ETHSW_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
 43
 44#define ETHSW_FEATURE_MAC_ADDR	BIT(0)
 45
 46/* Number of receive queues (one RX and one TX_CONF) */
 47#define DPAA2_SWITCH_RX_NUM_FQS	2
 48
 49/* Hardware requires alignment for ingress/egress buffer addresses */
 50#define DPAA2_SWITCH_RX_BUF_RAW_SIZE	PAGE_SIZE
 51#define DPAA2_SWITCH_RX_BUF_TAILROOM \
 52	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 53#define DPAA2_SWITCH_RX_BUF_SIZE \
 54	(DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
 55
 56#define DPAA2_SWITCH_STORE_SIZE 16
 57
 58/* Buffer management */
 59#define BUFS_PER_CMD			7
 60#define DPAA2_ETHSW_NUM_BUFS		(1024 * BUFS_PER_CMD)
 61#define DPAA2_ETHSW_REFILL_THRESH	(DPAA2_ETHSW_NUM_BUFS * 5 / 6)
 62
 63/* Number of times to retry DPIO portal operations while waiting
 64 * for portal to finish executing current command and become
 65 * available. We want to avoid being stuck in a while loop in case
 66 * hardware becomes unresponsive, but not give up too easily if
 67 * the portal really is busy for valid reasons
 68 */
 69#define DPAA2_SWITCH_SWP_BUSY_RETRIES		1000
 70
 71/* Hardware annotation buffer size */
 72#define DPAA2_SWITCH_HWA_SIZE			64
 73/* Software annotation buffer size */
 74#define DPAA2_SWITCH_SWA_SIZE			64
 75
 76#define DPAA2_SWITCH_TX_BUF_ALIGN		64
 77
 78#define DPAA2_SWITCH_TX_DATA_OFFSET \
 79	(DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
 80
 81#define DPAA2_SWITCH_NEEDED_HEADROOM \
 82	(DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
 83
 84#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES	16
 85#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS		1
 86
 87#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE	256
 88
 89extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
 90
 91struct ethsw_core;
 92
 93struct dpaa2_switch_fq {
 94	struct ethsw_core *ethsw;
 95	enum dpsw_queue_type type;
 96	struct dpaa2_io_store *store;
 97	struct dpaa2_io_notification_ctx nctx;
 98	struct napi_struct napi;
 99	u32 fqid;
100};
101
102struct dpaa2_switch_fdb {
103	struct net_device	*bridge_dev;
104	u16			fdb_id;
105	bool			in_use;
106};
107
108struct dpaa2_switch_acl_entry {
109	struct list_head	list;
110	u16			prio;
111	unsigned long		cookie;
112
113	struct dpsw_acl_entry_cfg cfg;
114	struct dpsw_acl_key	key;
115};
116
117struct dpaa2_switch_mirror_entry {
118	struct list_head	list;
119	struct dpsw_reflection_cfg cfg;
120	unsigned long		cookie;
121	u16 if_id;
122};
123
124struct dpaa2_switch_filter_block {
125	struct ethsw_core	*ethsw;
126	u64			ports;
127	bool			in_use;
128
129	struct list_head	acl_entries;
130	u16			acl_id;
131	u8			num_acl_rules;
132
133	struct list_head	mirror_entries;
 
 
134};
135
136static inline bool
137dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
138{
139	if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
140	    DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
141		return true;
142	return false;
143}
144
145/* Per port private data */
146struct ethsw_port_priv {
147	struct net_device	*netdev;
148	u16			idx;
149	struct ethsw_core	*ethsw_data;
150	u8			link_state;
151	u8			stp_state;
152
153	u8			vlans[VLAN_VID_MASK + 1];
154	u16			pvid;
155	u16			tx_qdid;
156
157	struct dpaa2_switch_fdb	*fdb;
158	bool			bcast_flood;
159	bool			ucast_flood;
160	bool			learn_ena;
161
162	struct dpaa2_switch_filter_block *filter_block;
163	struct dpaa2_mac	*mac;
164	/* Protects against changes to port_priv->mac */
165	struct mutex		mac_lock;
166};
167
168/* Switch data */
169struct ethsw_core {
170	struct device			*dev;
171	struct fsl_mc_io		*mc_io;
172	u16				dpsw_handle;
173	struct dpsw_attr		sw_attr;
174	u16				major, minor;
175	unsigned long			features;
176	int				dev_id;
177	struct ethsw_port_priv		**ports;
178	struct iommu_domain		*iommu_domain;
179
180	u8				vlans[VLAN_VID_MASK + 1];
181
182	struct workqueue_struct		*workqueue;
183
184	struct dpaa2_switch_fq		fq[DPAA2_SWITCH_RX_NUM_FQS];
185	struct fsl_mc_device		*dpbp_dev;
186	int				buf_count;
187	u16				bpid;
188	int				napi_users;
189
190	struct dpaa2_switch_fdb		*fdbs;
191	struct dpaa2_switch_filter_block *filter_blocks;
192	u16				mirror_port;
193};
194
195static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
196					 struct net_device *netdev)
197{
198	int i;
199
200	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
201		if (ethsw->ports[i]->netdev == netdev)
202			return ethsw->ports[i]->idx;
203
204	return -EINVAL;
205}
206
207static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
208{
209	if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
210		dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
211		return false;
212	}
213
214	if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
215		dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
216		return false;
217	}
218
219	if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
220		dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
221		return false;
222	}
223
224	if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
225		dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
226		return false;
227	}
228
229	return true;
230}
231
232static inline bool
233dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
234{
235	return dpaa2_mac_is_type_phy(port_priv->mac);
236}
237
238static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
239{
240	return port_priv->mac ? true : false;
241}
242
243bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
244
245int dpaa2_switch_port_vlans_add(struct net_device *netdev,
246				const struct switchdev_obj_port_vlan *vlan);
247
248int dpaa2_switch_port_vlans_del(struct net_device *netdev,
249				const struct switchdev_obj_port_vlan *vlan);
250
251typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
252				  struct fdb_dump_entry *fdb_entry,
253				  void *data);
254
255/* TC offload */
256
257int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
258				    struct flow_cls_offload *cls);
259
260int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
261				    struct flow_cls_offload *cls);
262
263int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
264				      struct tc_cls_matchall_offload *cls);
265
266int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
267				      struct tc_cls_matchall_offload *cls);
268
269int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
270			       struct dpaa2_switch_acl_entry *entry);
271
272int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
273				      struct ethsw_port_priv *port_priv);
274
275int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
276					struct ethsw_port_priv *port_priv);
277#endif	/* __ETHSW_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * DPAA2 Ethernet Switch declarations
  4 *
  5 * Copyright 2014-2016 Freescale Semiconductor Inc.
  6 * Copyright 2017-2021 NXP
  7 *
  8 */
  9
 10#ifndef __ETHSW_H
 11#define __ETHSW_H
 12
 13#include <linux/netdevice.h>
 14#include <linux/etherdevice.h>
 15#include <linux/rtnetlink.h>
 16#include <linux/if_vlan.h>
 17#include <uapi/linux/if_bridge.h>
 18#include <net/switchdev.h>
 19#include <linux/if_bridge.h>
 20#include <linux/fsl/mc.h>
 21#include <net/pkt_cls.h>
 22#include <soc/fsl/dpaa2-io.h>
 23
 
 24#include "dpsw.h"
 25
 26/* Number of IRQs supported */
 27#define DPSW_IRQ_NUM	2
 28
 29/* Port is member of VLAN */
 30#define ETHSW_VLAN_MEMBER	1
 31/* VLAN to be treated as untagged on egress */
 32#define ETHSW_VLAN_UNTAGGED	2
 33/* Untagged frames will be assigned to this VLAN */
 34#define ETHSW_VLAN_PVID		4
 35/* VLAN configured on the switch */
 36#define ETHSW_VLAN_GLOBAL	8
 37
 38/* Maximum Frame Length supported by HW (currently 10k) */
 39#define DPAA2_MFL		(10 * 1024)
 40#define ETHSW_MAX_FRAME_LENGTH	(DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
 41#define ETHSW_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
 42
 43#define ETHSW_FEATURE_MAC_ADDR	BIT(0)
 44
 45/* Number of receive queues (one RX and one TX_CONF) */
 46#define DPAA2_SWITCH_RX_NUM_FQS	2
 47
 48/* Hardware requires alignment for ingress/egress buffer addresses */
 49#define DPAA2_SWITCH_RX_BUF_RAW_SIZE	PAGE_SIZE
 50#define DPAA2_SWITCH_RX_BUF_TAILROOM \
 51	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 52#define DPAA2_SWITCH_RX_BUF_SIZE \
 53	(DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
 54
 55#define DPAA2_SWITCH_STORE_SIZE 16
 56
 57/* Buffer management */
 58#define BUFS_PER_CMD			7
 59#define DPAA2_ETHSW_NUM_BUFS		(1024 * BUFS_PER_CMD)
 60#define DPAA2_ETHSW_REFILL_THRESH	(DPAA2_ETHSW_NUM_BUFS * 5 / 6)
 61
 62/* Number of times to retry DPIO portal operations while waiting
 63 * for portal to finish executing current command and become
 64 * available. We want to avoid being stuck in a while loop in case
 65 * hardware becomes unresponsive, but not give up too easily if
 66 * the portal really is busy for valid reasons
 67 */
 68#define DPAA2_SWITCH_SWP_BUSY_RETRIES		1000
 69
 70/* Hardware annotation buffer size */
 71#define DPAA2_SWITCH_HWA_SIZE			64
 72/* Software annotation buffer size */
 73#define DPAA2_SWITCH_SWA_SIZE			64
 74
 75#define DPAA2_SWITCH_TX_BUF_ALIGN		64
 76
 77#define DPAA2_SWITCH_TX_DATA_OFFSET \
 78	(DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
 79
 80#define DPAA2_SWITCH_NEEDED_HEADROOM \
 81	(DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
 82
 83#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES	16
 84#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS		1
 85
 86#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE	256
 87
 88extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
 89
 90struct ethsw_core;
 91
 92struct dpaa2_switch_fq {
 93	struct ethsw_core *ethsw;
 94	enum dpsw_queue_type type;
 95	struct dpaa2_io_store *store;
 96	struct dpaa2_io_notification_ctx nctx;
 97	struct napi_struct napi;
 98	u32 fqid;
 99};
100
101struct dpaa2_switch_fdb {
102	struct net_device	*bridge_dev;
103	u16			fdb_id;
104	bool			in_use;
105};
106
107struct dpaa2_switch_acl_entry {
108	struct list_head	list;
109	u16			prio;
110	unsigned long		cookie;
111
112	struct dpsw_acl_entry_cfg cfg;
113	struct dpsw_acl_key	key;
114};
115
116struct dpaa2_switch_acl_tbl {
117	struct list_head	entries;
 
 
 
 
 
 
118	struct ethsw_core	*ethsw;
119	u64			ports;
 
 
 
 
 
120
121	u16			id;
122	u8			num_rules;
123	bool			in_use;
124};
125
126static inline bool
127dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
128{
129	if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
130	    DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
131		return true;
132	return false;
133}
134
135/* Per port private data */
136struct ethsw_port_priv {
137	struct net_device	*netdev;
138	u16			idx;
139	struct ethsw_core	*ethsw_data;
140	u8			link_state;
141	u8			stp_state;
142
143	u8			vlans[VLAN_VID_MASK + 1];
144	u16			pvid;
145	u16			tx_qdid;
146
147	struct dpaa2_switch_fdb	*fdb;
148	bool			bcast_flood;
149	bool			ucast_flood;
150	bool			learn_ena;
151
152	struct dpaa2_switch_acl_tbl *acl_tbl;
 
 
 
153};
154
155/* Switch data */
156struct ethsw_core {
157	struct device			*dev;
158	struct fsl_mc_io		*mc_io;
159	u16				dpsw_handle;
160	struct dpsw_attr		sw_attr;
161	u16				major, minor;
162	unsigned long			features;
163	int				dev_id;
164	struct ethsw_port_priv		**ports;
165	struct iommu_domain		*iommu_domain;
166
167	u8				vlans[VLAN_VID_MASK + 1];
168
169	struct workqueue_struct		*workqueue;
170
171	struct dpaa2_switch_fq		fq[DPAA2_SWITCH_RX_NUM_FQS];
172	struct fsl_mc_device		*dpbp_dev;
173	int				buf_count;
174	u16				bpid;
175	int				napi_users;
176
177	struct dpaa2_switch_fdb		*fdbs;
178	struct dpaa2_switch_acl_tbl	*acls;
 
179};
180
181static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
182					 struct net_device *netdev)
183{
184	int i;
185
186	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
187		if (ethsw->ports[i]->netdev == netdev)
188			return ethsw->ports[i]->idx;
189
190	return -EINVAL;
191}
192
193static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
194{
195	if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
196		dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
197		return false;
198	}
199
200	if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
201		dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
202		return false;
203	}
204
205	if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
206		dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
207		return false;
208	}
209
210	if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
211		dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
212		return false;
213	}
214
215	return true;
216}
217
 
 
 
 
 
 
 
 
 
 
 
218bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
219
220int dpaa2_switch_port_vlans_add(struct net_device *netdev,
221				const struct switchdev_obj_port_vlan *vlan);
222
223int dpaa2_switch_port_vlans_del(struct net_device *netdev,
224				const struct switchdev_obj_port_vlan *vlan);
225
226typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
227				  struct fdb_dump_entry *fdb_entry,
228				  void *data);
229
230/* TC offload */
231
232int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
233				    struct flow_cls_offload *cls);
234
235int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
236				    struct flow_cls_offload *cls);
237
238int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
239				      struct tc_cls_matchall_offload *cls);
240
241int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
242				      struct tc_cls_matchall_offload *cls);
243
244int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
245			       struct dpaa2_switch_acl_entry *entry);
 
 
 
 
 
 
246#endif	/* __ETHSW_H */