Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Keystone GBE and XGBE subsystem code
   4 *
   5 * Copyright (C) 2014 Texas Instruments Incorporated
   6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   7 *		Sandeep Paulraj <s-paulraj@ti.com>
   8 *		Cyril Chemparathy <cyril@ti.com>
   9 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
  10 *		Wingman Kwok <w-kwok2@ti.com>
 
 
 
 
 
 
 
 
 
  11 */
  12
  13#include <linux/io.h>
  14#include <linux/module.h>
  15#include <linux/of_mdio.h>
  16#include <linux/of_net.h>
  17#include <linux/of_address.h>
  18#include <linux/if_vlan.h>
  19#include <linux/ptp_classify.h>
  20#include <linux/net_tstamp.h>
  21#include <linux/ethtool.h>
  22
  23#include "cpsw.h"
  24#include "cpsw_ale.h"
  25#include "netcp.h"
  26#include "cpts.h"
  27
  28#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
  29#define NETCP_DRIVER_VERSION		"v1.0"
  30
  31#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
  32#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
  33#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
  34#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
  35
  36/* 1G Ethernet SS defines */
  37#define GBE_MODULE_NAME			"netcp-gbe"
  38#define GBE_SS_VERSION_14		0x4ed2
  39
  40#define GBE_SS_REG_INDEX		0
  41#define GBE_SGMII34_REG_INDEX		1
  42#define GBE_SM_REG_INDEX		2
  43/* offset relative to base of GBE_SS_REG_INDEX */
  44#define GBE13_SGMII_MODULE_OFFSET	0x100
  45/* offset relative to base of GBE_SM_REG_INDEX */
  46#define GBE13_HOST_PORT_OFFSET		0x34
  47#define GBE13_SLAVE_PORT_OFFSET		0x60
  48#define GBE13_EMAC_OFFSET		0x100
  49#define GBE13_SLAVE_PORT2_OFFSET	0x200
  50#define GBE13_HW_STATS_OFFSET		0x300
  51#define GBE13_CPTS_OFFSET		0x500
  52#define GBE13_ALE_OFFSET		0x600
  53#define GBE13_HOST_PORT_NUM		0
 
  54
  55/* 1G Ethernet NU SS defines */
  56#define GBENU_MODULE_NAME		"netcp-gbenu"
  57#define GBE_SS_ID_NU			0x4ee6
  58#define GBE_SS_ID_2U			0x4ee8
  59
  60#define IS_SS_ID_MU(d) \
  61	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  62	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  63
  64#define IS_SS_ID_NU(d) \
  65	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  66
  67#define IS_SS_ID_VER_14(d) \
  68	(GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
  69#define IS_SS_ID_2U(d) \
  70	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
  71
  72#define GBENU_SS_REG_INDEX		0
  73#define GBENU_SM_REG_INDEX		1
  74#define GBENU_SGMII_MODULE_OFFSET	0x100
  75#define GBENU_HOST_PORT_OFFSET		0x1000
  76#define GBENU_SLAVE_PORT_OFFSET		0x2000
  77#define GBENU_EMAC_OFFSET		0x2330
  78#define GBENU_HW_STATS_OFFSET		0x1a000
  79#define GBENU_CPTS_OFFSET		0x1d000
  80#define GBENU_ALE_OFFSET		0x1e000
  81#define GBENU_HOST_PORT_NUM		0
 
  82#define GBENU_SGMII_MODULE_SIZE		0x100
  83
  84/* 10G Ethernet SS defines */
  85#define XGBE_MODULE_NAME		"netcp-xgbe"
  86#define XGBE_SS_VERSION_10		0x4ee4
  87
  88#define XGBE_SS_REG_INDEX		0
  89#define XGBE_SM_REG_INDEX		1
  90#define XGBE_SERDES_REG_INDEX		2
  91
  92/* offset relative to base of XGBE_SS_REG_INDEX */
  93#define XGBE10_SGMII_MODULE_OFFSET	0x100
  94#define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
  95/* offset relative to base of XGBE_SM_REG_INDEX */
  96#define XGBE10_HOST_PORT_OFFSET		0x34
  97#define XGBE10_SLAVE_PORT_OFFSET	0x64
  98#define XGBE10_EMAC_OFFSET		0x400
  99#define XGBE10_CPTS_OFFSET		0x600
 100#define XGBE10_ALE_OFFSET		0x700
 101#define XGBE10_HW_STATS_OFFSET		0x800
 102#define XGBE10_HOST_PORT_NUM		0
 
 103
 104#define	GBE_TIMER_INTERVAL			(HZ / 2)
 105
 106/* Soft reset register values */
 107#define SOFT_RESET_MASK				BIT(0)
 108#define SOFT_RESET				BIT(0)
 109#define DEVICE_EMACSL_RESET_POLL_COUNT		100
 110#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
 111
 112#define MACSL_RX_ENABLE_CSF			BIT(23)
 113#define MACSL_ENABLE_EXT_CTL			BIT(18)
 114#define MACSL_XGMII_ENABLE			BIT(13)
 115#define MACSL_XGIG_MODE				BIT(8)
 116#define MACSL_GIG_MODE				BIT(7)
 117#define MACSL_GMII_ENABLE			BIT(5)
 118#define MACSL_FULLDUPLEX			BIT(0)
 119
 120#define GBE_CTL_P0_ENABLE			BIT(2)
 121#define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
 122#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
 123#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
 124#define GBE_STATS_CD_SEL			BIT(28)
 125
 126#define GBE_PORT_MASK(x)			(BIT(x) - 1)
 127#define GBE_MASK_NO_PORTS			0
 128
 129#define GBE_DEF_1G_MAC_CONTROL					\
 130		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
 131		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 132
 133#define GBE_DEF_10G_MAC_CONTROL				\
 134		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
 135		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 136
 137#define GBE_STATSA_MODULE			0
 138#define GBE_STATSB_MODULE			1
 139#define GBE_STATSC_MODULE			2
 140#define GBE_STATSD_MODULE			3
 141
 142#define GBENU_STATS0_MODULE			0
 143#define GBENU_STATS1_MODULE			1
 144#define GBENU_STATS2_MODULE			2
 145#define GBENU_STATS3_MODULE			3
 146#define GBENU_STATS4_MODULE			4
 147#define GBENU_STATS5_MODULE			5
 148#define GBENU_STATS6_MODULE			6
 149#define GBENU_STATS7_MODULE			7
 150#define GBENU_STATS8_MODULE			8
 151
 152#define XGBE_STATS0_MODULE			0
 153#define XGBE_STATS1_MODULE			1
 154#define XGBE_STATS2_MODULE			2
 155
 156/* s: 0-based slave_port */
 157#define SGMII_BASE(d, s) \
 158	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
 159
 160#define GBE_TX_QUEUE				648
 161#define	GBE_TXHOOK_ORDER			0
 162#define	GBE_RXHOOK_ORDER			0
 163#define GBE_DEFAULT_ALE_AGEOUT			30
 164#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
 165#define SLAVE_LINK_IS_RGMII(s) \
 166	(((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
 167	 ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
 168#define SLAVE_LINK_IS_SGMII(s) \
 169	((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
 170#define NETCP_LINK_STATE_INVALID		-1
 171
 172#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 173		offsetof(struct gbe##_##rb, rn)
 174#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 175		offsetof(struct gbenu##_##rb, rn)
 176#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 177		offsetof(struct xgbe##_##rb, rn)
 178#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
 179
 180#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
 181
 182#if IS_ENABLED(CONFIG_TI_CPTS)
 183/* Px_TS_CTL register fields */
 184#define TS_RX_ANX_F_EN				BIT(0)
 185#define TS_RX_VLAN_LT1_EN			BIT(1)
 186#define TS_RX_VLAN_LT2_EN			BIT(2)
 187#define TS_RX_ANX_D_EN				BIT(3)
 188#define TS_TX_ANX_F_EN				BIT(4)
 189#define TS_TX_VLAN_LT1_EN			BIT(5)
 190#define TS_TX_VLAN_LT2_EN			BIT(6)
 191#define TS_TX_ANX_D_EN				BIT(7)
 192#define TS_LT2_EN				BIT(8)
 193#define TS_RX_ANX_E_EN				BIT(9)
 194#define TS_TX_ANX_E_EN				BIT(10)
 195#define TS_MSG_TYPE_EN_SHIFT			16
 196#define TS_MSG_TYPE_EN_MASK			0xffff
 197
 198/* Px_TS_SEQ_LTYPE register fields */
 199#define TS_SEQ_ID_OFS_SHIFT			16
 200#define TS_SEQ_ID_OFS_MASK			0x3f
 201
 202/* Px_TS_CTL_LTYPE2 register fields */
 203#define TS_107					BIT(16)
 204#define TS_129					BIT(17)
 205#define TS_130					BIT(18)
 206#define TS_131					BIT(19)
 207#define TS_132					BIT(20)
 208#define TS_319					BIT(21)
 209#define TS_320					BIT(22)
 210#define TS_TTL_NONZERO				BIT(23)
 211#define TS_UNI_EN				BIT(24)
 212#define TS_UNI_EN_SHIFT				24
 213
 214#define TS_TX_ANX_ALL_EN	 \
 215	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
 216
 217#define TS_RX_ANX_ALL_EN	 \
 218	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
 219
 220#define TS_CTL_DST_PORT				TS_319
 221#define TS_CTL_DST_PORT_SHIFT			21
 222
 223#define TS_CTL_MADDR_ALL	\
 224	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
 225
 226#define TS_CTL_MADDR_SHIFT			16
 227
 228/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
 229#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 230#endif /* CONFIG_TI_CPTS */
 231
 232struct xgbe_ss_regs {
 233	u32	id_ver;
 234	u32	synce_count;
 235	u32	synce_mux;
 236	u32	control;
 237};
 238
 239struct xgbe_switch_regs {
 240	u32	id_ver;
 241	u32	control;
 242	u32	emcontrol;
 243	u32	stat_port_en;
 244	u32	ptype;
 245	u32	soft_idle;
 246	u32	thru_rate;
 247	u32	gap_thresh;
 248	u32	tx_start_wds;
 249	u32	flow_control;
 250	u32	cppi_thresh;
 251};
 252
 253struct xgbe_port_regs {
 254	u32	blk_cnt;
 255	u32	port_vlan;
 256	u32	tx_pri_map;
 257	u32	sa_lo;
 258	u32	sa_hi;
 259	u32	ts_ctl;
 260	u32	ts_seq_ltype;
 261	u32	ts_vlan;
 262	u32	ts_ctl_ltype2;
 263	u32	ts_ctl2;
 264	u32	control;
 265};
 266
 267struct xgbe_host_port_regs {
 268	u32	blk_cnt;
 269	u32	port_vlan;
 270	u32	tx_pri_map;
 271	u32	src_id;
 272	u32	rx_pri_map;
 273	u32	rx_maxlen;
 274};
 275
 276struct xgbe_emac_regs {
 277	u32	id_ver;
 278	u32	mac_control;
 279	u32	mac_status;
 280	u32	soft_reset;
 281	u32	rx_maxlen;
 282	u32	__reserved_0;
 283	u32	rx_pause;
 284	u32	tx_pause;
 285	u32	em_control;
 286	u32	__reserved_1;
 287	u32	tx_gap;
 288	u32	rsvd[4];
 289};
 290
 291struct xgbe_host_hw_stats {
 292	u32	rx_good_frames;
 293	u32	rx_broadcast_frames;
 294	u32	rx_multicast_frames;
 295	u32	__rsvd_0[3];
 296	u32	rx_oversized_frames;
 297	u32	__rsvd_1;
 298	u32	rx_undersized_frames;
 299	u32	__rsvd_2;
 300	u32	overrun_type4;
 301	u32	overrun_type5;
 302	u32	rx_bytes;
 303	u32	tx_good_frames;
 304	u32	tx_broadcast_frames;
 305	u32	tx_multicast_frames;
 306	u32	__rsvd_3[9];
 307	u32	tx_bytes;
 308	u32	tx_64byte_frames;
 309	u32	tx_65_to_127byte_frames;
 310	u32	tx_128_to_255byte_frames;
 311	u32	tx_256_to_511byte_frames;
 312	u32	tx_512_to_1023byte_frames;
 313	u32	tx_1024byte_frames;
 314	u32	net_bytes;
 315	u32	rx_sof_overruns;
 316	u32	rx_mof_overruns;
 317	u32	rx_dma_overruns;
 318};
 319
 320struct xgbe_hw_stats {
 321	u32	rx_good_frames;
 322	u32	rx_broadcast_frames;
 323	u32	rx_multicast_frames;
 324	u32	rx_pause_frames;
 325	u32	rx_crc_errors;
 326	u32	rx_align_code_errors;
 327	u32	rx_oversized_frames;
 328	u32	rx_jabber_frames;
 329	u32	rx_undersized_frames;
 330	u32	rx_fragments;
 331	u32	overrun_type4;
 332	u32	overrun_type5;
 333	u32	rx_bytes;
 334	u32	tx_good_frames;
 335	u32	tx_broadcast_frames;
 336	u32	tx_multicast_frames;
 337	u32	tx_pause_frames;
 338	u32	tx_deferred_frames;
 339	u32	tx_collision_frames;
 340	u32	tx_single_coll_frames;
 341	u32	tx_mult_coll_frames;
 342	u32	tx_excessive_collisions;
 343	u32	tx_late_collisions;
 344	u32	tx_underrun;
 345	u32	tx_carrier_sense_errors;
 346	u32	tx_bytes;
 347	u32	tx_64byte_frames;
 348	u32	tx_65_to_127byte_frames;
 349	u32	tx_128_to_255byte_frames;
 350	u32	tx_256_to_511byte_frames;
 351	u32	tx_512_to_1023byte_frames;
 352	u32	tx_1024byte_frames;
 353	u32	net_bytes;
 354	u32	rx_sof_overruns;
 355	u32	rx_mof_overruns;
 356	u32	rx_dma_overruns;
 357};
 358
 359struct gbenu_ss_regs {
 360	u32	id_ver;
 361	u32	synce_count;		/* NU */
 362	u32	synce_mux;		/* NU */
 363	u32	control;		/* 2U */
 364	u32	__rsvd_0[2];		/* 2U */
 365	u32	rgmii_status;		/* 2U */
 366	u32	ss_status;		/* 2U */
 367};
 368
 369struct gbenu_switch_regs {
 370	u32	id_ver;
 371	u32	control;
 372	u32	__rsvd_0[2];
 373	u32	emcontrol;
 374	u32	stat_port_en;
 375	u32	ptype;			/* NU */
 376	u32	soft_idle;
 377	u32	thru_rate;		/* NU */
 378	u32	gap_thresh;		/* NU */
 379	u32	tx_start_wds;		/* NU */
 380	u32	eee_prescale;		/* 2U */
 381	u32	tx_g_oflow_thresh_set;	/* NU */
 382	u32	tx_g_oflow_thresh_clr;	/* NU */
 383	u32	tx_g_buf_thresh_set_l;	/* NU */
 384	u32	tx_g_buf_thresh_set_h;	/* NU */
 385	u32	tx_g_buf_thresh_clr_l;	/* NU */
 386	u32	tx_g_buf_thresh_clr_h;	/* NU */
 387};
 388
 389struct gbenu_port_regs {
 390	u32	__rsvd_0;
 391	u32	control;
 392	u32	max_blks;		/* 2U */
 393	u32	mem_align1;
 394	u32	blk_cnt;
 395	u32	port_vlan;
 396	u32	tx_pri_map;		/* NU */
 397	u32	pri_ctl;		/* 2U */
 398	u32	rx_pri_map;
 399	u32	rx_maxlen;
 400	u32	tx_blks_pri;		/* NU */
 401	u32	__rsvd_1;
 402	u32	idle2lpi;		/* 2U */
 403	u32	lpi2idle;		/* 2U */
 404	u32	eee_status;		/* 2U */
 405	u32	__rsvd_2;
 406	u32	__rsvd_3[176];		/* NU: more to add */
 407	u32	__rsvd_4[2];
 408	u32	sa_lo;
 409	u32	sa_hi;
 410	u32	ts_ctl;
 411	u32	ts_seq_ltype;
 412	u32	ts_vlan;
 413	u32	ts_ctl_ltype2;
 414	u32	ts_ctl2;
 415};
 416
 417struct gbenu_host_port_regs {
 418	u32	__rsvd_0;
 419	u32	control;
 420	u32	flow_id_offset;		/* 2U */
 421	u32	__rsvd_1;
 422	u32	blk_cnt;
 423	u32	port_vlan;
 424	u32	tx_pri_map;		/* NU */
 425	u32	pri_ctl;
 426	u32	rx_pri_map;
 427	u32	rx_maxlen;
 428	u32	tx_blks_pri;		/* NU */
 429	u32	__rsvd_2;
 430	u32	idle2lpi;		/* 2U */
 431	u32	lpi2wake;		/* 2U */
 432	u32	eee_status;		/* 2U */
 433	u32	__rsvd_3;
 434	u32	__rsvd_4[184];		/* NU */
 435	u32	host_blks_pri;		/* NU */
 436};
 437
 438struct gbenu_emac_regs {
 439	u32	mac_control;
 440	u32	mac_status;
 441	u32	soft_reset;
 442	u32	boff_test;
 443	u32	rx_pause;
 444	u32	__rsvd_0[11];		/* NU */
 445	u32	tx_pause;
 446	u32	__rsvd_1[11];		/* NU */
 447	u32	em_control;
 448	u32	tx_gap;
 449};
 450
 451/* Some hw stat regs are applicable to slave port only.
 452 * This is handled by gbenu_et_stats struct.  Also some
 453 * are for SS version NU and some are for 2U.
 454 */
 455struct gbenu_hw_stats {
 456	u32	rx_good_frames;
 457	u32	rx_broadcast_frames;
 458	u32	rx_multicast_frames;
 459	u32	rx_pause_frames;		/* slave */
 460	u32	rx_crc_errors;
 461	u32	rx_align_code_errors;		/* slave */
 462	u32	rx_oversized_frames;
 463	u32	rx_jabber_frames;		/* slave */
 464	u32	rx_undersized_frames;
 465	u32	rx_fragments;			/* slave */
 466	u32	ale_drop;
 467	u32	ale_overrun_drop;
 468	u32	rx_bytes;
 469	u32	tx_good_frames;
 470	u32	tx_broadcast_frames;
 471	u32	tx_multicast_frames;
 472	u32	tx_pause_frames;		/* slave */
 473	u32	tx_deferred_frames;		/* slave */
 474	u32	tx_collision_frames;		/* slave */
 475	u32	tx_single_coll_frames;		/* slave */
 476	u32	tx_mult_coll_frames;		/* slave */
 477	u32	tx_excessive_collisions;	/* slave */
 478	u32	tx_late_collisions;		/* slave */
 479	u32	rx_ipg_error;			/* slave 10G only */
 480	u32	tx_carrier_sense_errors;	/* slave */
 481	u32	tx_bytes;
 482	u32	tx_64B_frames;
 483	u32	tx_65_to_127B_frames;
 484	u32	tx_128_to_255B_frames;
 485	u32	tx_256_to_511B_frames;
 486	u32	tx_512_to_1023B_frames;
 487	u32	tx_1024B_frames;
 488	u32	net_bytes;
 489	u32	rx_bottom_fifo_drop;
 490	u32	rx_port_mask_drop;
 491	u32	rx_top_fifo_drop;
 492	u32	ale_rate_limit_drop;
 493	u32	ale_vid_ingress_drop;
 494	u32	ale_da_eq_sa_drop;
 495	u32	__rsvd_0[3];
 496	u32	ale_unknown_ucast;
 497	u32	ale_unknown_ucast_bytes;
 498	u32	ale_unknown_mcast;
 499	u32	ale_unknown_mcast_bytes;
 500	u32	ale_unknown_bcast;
 501	u32	ale_unknown_bcast_bytes;
 502	u32	ale_pol_match;
 503	u32	ale_pol_match_red;		/* NU */
 504	u32	ale_pol_match_yellow;		/* NU */
 505	u32	__rsvd_1[44];
 506	u32	tx_mem_protect_err;
 507	/* following NU only */
 508	u32	tx_pri0;
 509	u32	tx_pri1;
 510	u32	tx_pri2;
 511	u32	tx_pri3;
 512	u32	tx_pri4;
 513	u32	tx_pri5;
 514	u32	tx_pri6;
 515	u32	tx_pri7;
 516	u32	tx_pri0_bcnt;
 517	u32	tx_pri1_bcnt;
 518	u32	tx_pri2_bcnt;
 519	u32	tx_pri3_bcnt;
 520	u32	tx_pri4_bcnt;
 521	u32	tx_pri5_bcnt;
 522	u32	tx_pri6_bcnt;
 523	u32	tx_pri7_bcnt;
 524	u32	tx_pri0_drop;
 525	u32	tx_pri1_drop;
 526	u32	tx_pri2_drop;
 527	u32	tx_pri3_drop;
 528	u32	tx_pri4_drop;
 529	u32	tx_pri5_drop;
 530	u32	tx_pri6_drop;
 531	u32	tx_pri7_drop;
 532	u32	tx_pri0_drop_bcnt;
 533	u32	tx_pri1_drop_bcnt;
 534	u32	tx_pri2_drop_bcnt;
 535	u32	tx_pri3_drop_bcnt;
 536	u32	tx_pri4_drop_bcnt;
 537	u32	tx_pri5_drop_bcnt;
 538	u32	tx_pri6_drop_bcnt;
 539	u32	tx_pri7_drop_bcnt;
 540};
 541
 542#define GBENU_HW_STATS_REG_MAP_SZ	0x200
 543
 544struct gbe_ss_regs {
 545	u32	id_ver;
 546	u32	synce_count;
 547	u32	synce_mux;
 548};
 549
 550struct gbe_ss_regs_ofs {
 551	u16	id_ver;
 552	u16	control;
 553	u16	rgmii_status; /* 2U */
 554};
 555
 556struct gbe_switch_regs {
 557	u32	id_ver;
 558	u32	control;
 559	u32	soft_reset;
 560	u32	stat_port_en;
 561	u32	ptype;
 562	u32	soft_idle;
 563	u32	thru_rate;
 564	u32	gap_thresh;
 565	u32	tx_start_wds;
 566	u32	flow_control;
 567};
 568
 569struct gbe_switch_regs_ofs {
 570	u16	id_ver;
 571	u16	control;
 572	u16	soft_reset;
 573	u16	emcontrol;
 574	u16	stat_port_en;
 575	u16	ptype;
 576	u16	flow_control;
 577};
 578
 579struct gbe_port_regs {
 580	u32	max_blks;
 581	u32	blk_cnt;
 582	u32	port_vlan;
 583	u32	tx_pri_map;
 584	u32	sa_lo;
 585	u32	sa_hi;
 586	u32	ts_ctl;
 587	u32	ts_seq_ltype;
 588	u32	ts_vlan;
 589	u32	ts_ctl_ltype2;
 590	u32	ts_ctl2;
 591};
 592
 593struct gbe_port_regs_ofs {
 594	u16	port_vlan;
 595	u16	tx_pri_map;
 596	u16     rx_pri_map;
 597	u16	sa_lo;
 598	u16	sa_hi;
 599	u16	ts_ctl;
 600	u16	ts_seq_ltype;
 601	u16	ts_vlan;
 602	u16	ts_ctl_ltype2;
 603	u16	ts_ctl2;
 604	u16	rx_maxlen;	/* 2U, NU */
 605};
 606
 607struct gbe_host_port_regs {
 608	u32	src_id;
 609	u32	port_vlan;
 610	u32	rx_pri_map;
 611	u32	rx_maxlen;
 612};
 613
 614struct gbe_host_port_regs_ofs {
 615	u16	port_vlan;
 616	u16	tx_pri_map;
 617	u16	rx_maxlen;
 618};
 619
 620struct gbe_emac_regs {
 621	u32	id_ver;
 622	u32	mac_control;
 623	u32	mac_status;
 624	u32	soft_reset;
 625	u32	rx_maxlen;
 626	u32	__reserved_0;
 627	u32	rx_pause;
 628	u32	tx_pause;
 629	u32	__reserved_1;
 630	u32	rx_pri_map;
 631	u32	rsvd[6];
 632};
 633
 634struct gbe_emac_regs_ofs {
 635	u16	mac_control;
 636	u16	soft_reset;
 637	u16	rx_maxlen;
 638};
 639
 640struct gbe_hw_stats {
 641	u32	rx_good_frames;
 642	u32	rx_broadcast_frames;
 643	u32	rx_multicast_frames;
 644	u32	rx_pause_frames;
 645	u32	rx_crc_errors;
 646	u32	rx_align_code_errors;
 647	u32	rx_oversized_frames;
 648	u32	rx_jabber_frames;
 649	u32	rx_undersized_frames;
 650	u32	rx_fragments;
 651	u32	__pad_0[2];
 652	u32	rx_bytes;
 653	u32	tx_good_frames;
 654	u32	tx_broadcast_frames;
 655	u32	tx_multicast_frames;
 656	u32	tx_pause_frames;
 657	u32	tx_deferred_frames;
 658	u32	tx_collision_frames;
 659	u32	tx_single_coll_frames;
 660	u32	tx_mult_coll_frames;
 661	u32	tx_excessive_collisions;
 662	u32	tx_late_collisions;
 663	u32	tx_underrun;
 664	u32	tx_carrier_sense_errors;
 665	u32	tx_bytes;
 666	u32	tx_64byte_frames;
 667	u32	tx_65_to_127byte_frames;
 668	u32	tx_128_to_255byte_frames;
 669	u32	tx_256_to_511byte_frames;
 670	u32	tx_512_to_1023byte_frames;
 671	u32	tx_1024byte_frames;
 672	u32	net_bytes;
 673	u32	rx_sof_overruns;
 674	u32	rx_mof_overruns;
 675	u32	rx_dma_overruns;
 676};
 677
 678#define GBE_MAX_HW_STAT_MODS			9
 679#define GBE_HW_STATS_REG_MAP_SZ			0x100
 680
 681struct ts_ctl {
 682	int     uni;
 683	u8      dst_port_map;
 684	u8      maddr_map;
 685	u8      ts_mcast_type;
 686};
 687
 688struct gbe_slave {
 689	void __iomem			*port_regs;
 690	void __iomem			*emac_regs;
 691	struct gbe_port_regs_ofs	port_regs_ofs;
 692	struct gbe_emac_regs_ofs	emac_regs_ofs;
 693	int				slave_num; /* 0 based logical number */
 694	int				port_num;  /* actual port number */
 695	atomic_t			link_state;
 696	bool				open;
 697	struct phy_device		*phy;
 698	u32				link_interface;
 699	u32				mac_control;
 700	u8				phy_port_t;
 701	struct device_node		*node;
 702	struct device_node		*phy_node;
 703	struct ts_ctl                   ts_ctl;
 704	struct list_head		slave_list;
 705};
 706
 707struct gbe_priv {
 708	struct device			*dev;
 709	struct netcp_device		*netcp_device;
 710	struct timer_list		timer;
 711	u32				num_slaves;
 
 712	u32				ale_ports;
 713	bool				enable_ale;
 714	u8				max_num_slaves;
 715	u8				max_num_ports; /* max_num_slaves + 1 */
 716	u8				num_stats_mods;
 717	struct netcp_tx_pipe		tx_pipe;
 718
 719	int				host_port;
 720	u32				rx_packet_max;
 721	u32				ss_version;
 722	u32				stats_en_mask;
 723
 724	void __iomem			*ss_regs;
 725	void __iomem			*switch_regs;
 726	void __iomem			*host_port_regs;
 727	void __iomem			*ale_reg;
 728	void __iomem                    *cpts_reg;
 729	void __iomem			*sgmii_port_regs;
 730	void __iomem			*sgmii_port34_regs;
 731	void __iomem			*xgbe_serdes_regs;
 732	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
 733
 734	struct gbe_ss_regs_ofs		ss_regs_ofs;
 735	struct gbe_switch_regs_ofs	switch_regs_ofs;
 736	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
 737
 738	struct cpsw_ale			*ale;
 739	unsigned int			tx_queue_id;
 740	const char			*dma_chan_name;
 741
 742	struct list_head		gbe_intf_head;
 743	struct list_head		secondary_slaves;
 744	struct net_device		*dummy_ndev;
 745
 746	u64				*hw_stats;
 747	u32				*hw_stats_prev;
 748	const struct netcp_ethtool_stat *et_stats;
 749	int				num_et_stats;
 750	/*  Lock for updating the hwstats */
 751	spinlock_t			hw_stats_lock;
 752
 753	int                             cpts_registered;
 754	struct cpts                     *cpts;
 755	int				rx_ts_enabled;
 756	int				tx_ts_enabled;
 757};
 758
 759struct gbe_intf {
 760	struct net_device	*ndev;
 761	struct device		*dev;
 762	struct gbe_priv		*gbe_dev;
 763	struct netcp_tx_pipe	tx_pipe;
 764	struct gbe_slave	*slave;
 765	struct list_head	gbe_intf_list;
 766	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 767};
 768
 769static struct netcp_module gbe_module;
 770static struct netcp_module xgbe_module;
 771
 772/* Statistic management */
 773struct netcp_ethtool_stat {
 774	char desc[ETH_GSTRING_LEN];
 775	int type;
 776	u32 size;
 777	int offset;
 778};
 779
 780#define GBE_STATSA_INFO(field)						\
 781{									\
 782	"GBE_A:"#field, GBE_STATSA_MODULE,				\
 783	sizeof_field(struct gbe_hw_stats, field),			\
 784	offsetof(struct gbe_hw_stats, field)				\
 785}
 786
 787#define GBE_STATSB_INFO(field)						\
 788{									\
 789	"GBE_B:"#field, GBE_STATSB_MODULE,				\
 790	sizeof_field(struct gbe_hw_stats, field),			\
 791	offsetof(struct gbe_hw_stats, field)				\
 792}
 793
 794#define GBE_STATSC_INFO(field)						\
 795{									\
 796	"GBE_C:"#field, GBE_STATSC_MODULE,				\
 797	sizeof_field(struct gbe_hw_stats, field),			\
 798	offsetof(struct gbe_hw_stats, field)				\
 799}
 800
 801#define GBE_STATSD_INFO(field)						\
 802{									\
 803	"GBE_D:"#field, GBE_STATSD_MODULE,				\
 804	sizeof_field(struct gbe_hw_stats, field),			\
 805	offsetof(struct gbe_hw_stats, field)				\
 806}
 807
 808static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 809	/* GBE module A */
 810	GBE_STATSA_INFO(rx_good_frames),
 811	GBE_STATSA_INFO(rx_broadcast_frames),
 812	GBE_STATSA_INFO(rx_multicast_frames),
 813	GBE_STATSA_INFO(rx_pause_frames),
 814	GBE_STATSA_INFO(rx_crc_errors),
 815	GBE_STATSA_INFO(rx_align_code_errors),
 816	GBE_STATSA_INFO(rx_oversized_frames),
 817	GBE_STATSA_INFO(rx_jabber_frames),
 818	GBE_STATSA_INFO(rx_undersized_frames),
 819	GBE_STATSA_INFO(rx_fragments),
 820	GBE_STATSA_INFO(rx_bytes),
 821	GBE_STATSA_INFO(tx_good_frames),
 822	GBE_STATSA_INFO(tx_broadcast_frames),
 823	GBE_STATSA_INFO(tx_multicast_frames),
 824	GBE_STATSA_INFO(tx_pause_frames),
 825	GBE_STATSA_INFO(tx_deferred_frames),
 826	GBE_STATSA_INFO(tx_collision_frames),
 827	GBE_STATSA_INFO(tx_single_coll_frames),
 828	GBE_STATSA_INFO(tx_mult_coll_frames),
 829	GBE_STATSA_INFO(tx_excessive_collisions),
 830	GBE_STATSA_INFO(tx_late_collisions),
 831	GBE_STATSA_INFO(tx_underrun),
 832	GBE_STATSA_INFO(tx_carrier_sense_errors),
 833	GBE_STATSA_INFO(tx_bytes),
 834	GBE_STATSA_INFO(tx_64byte_frames),
 835	GBE_STATSA_INFO(tx_65_to_127byte_frames),
 836	GBE_STATSA_INFO(tx_128_to_255byte_frames),
 837	GBE_STATSA_INFO(tx_256_to_511byte_frames),
 838	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
 839	GBE_STATSA_INFO(tx_1024byte_frames),
 840	GBE_STATSA_INFO(net_bytes),
 841	GBE_STATSA_INFO(rx_sof_overruns),
 842	GBE_STATSA_INFO(rx_mof_overruns),
 843	GBE_STATSA_INFO(rx_dma_overruns),
 844	/* GBE module B */
 845	GBE_STATSB_INFO(rx_good_frames),
 846	GBE_STATSB_INFO(rx_broadcast_frames),
 847	GBE_STATSB_INFO(rx_multicast_frames),
 848	GBE_STATSB_INFO(rx_pause_frames),
 849	GBE_STATSB_INFO(rx_crc_errors),
 850	GBE_STATSB_INFO(rx_align_code_errors),
 851	GBE_STATSB_INFO(rx_oversized_frames),
 852	GBE_STATSB_INFO(rx_jabber_frames),
 853	GBE_STATSB_INFO(rx_undersized_frames),
 854	GBE_STATSB_INFO(rx_fragments),
 855	GBE_STATSB_INFO(rx_bytes),
 856	GBE_STATSB_INFO(tx_good_frames),
 857	GBE_STATSB_INFO(tx_broadcast_frames),
 858	GBE_STATSB_INFO(tx_multicast_frames),
 859	GBE_STATSB_INFO(tx_pause_frames),
 860	GBE_STATSB_INFO(tx_deferred_frames),
 861	GBE_STATSB_INFO(tx_collision_frames),
 862	GBE_STATSB_INFO(tx_single_coll_frames),
 863	GBE_STATSB_INFO(tx_mult_coll_frames),
 864	GBE_STATSB_INFO(tx_excessive_collisions),
 865	GBE_STATSB_INFO(tx_late_collisions),
 866	GBE_STATSB_INFO(tx_underrun),
 867	GBE_STATSB_INFO(tx_carrier_sense_errors),
 868	GBE_STATSB_INFO(tx_bytes),
 869	GBE_STATSB_INFO(tx_64byte_frames),
 870	GBE_STATSB_INFO(tx_65_to_127byte_frames),
 871	GBE_STATSB_INFO(tx_128_to_255byte_frames),
 872	GBE_STATSB_INFO(tx_256_to_511byte_frames),
 873	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
 874	GBE_STATSB_INFO(tx_1024byte_frames),
 875	GBE_STATSB_INFO(net_bytes),
 876	GBE_STATSB_INFO(rx_sof_overruns),
 877	GBE_STATSB_INFO(rx_mof_overruns),
 878	GBE_STATSB_INFO(rx_dma_overruns),
 879	/* GBE module C */
 880	GBE_STATSC_INFO(rx_good_frames),
 881	GBE_STATSC_INFO(rx_broadcast_frames),
 882	GBE_STATSC_INFO(rx_multicast_frames),
 883	GBE_STATSC_INFO(rx_pause_frames),
 884	GBE_STATSC_INFO(rx_crc_errors),
 885	GBE_STATSC_INFO(rx_align_code_errors),
 886	GBE_STATSC_INFO(rx_oversized_frames),
 887	GBE_STATSC_INFO(rx_jabber_frames),
 888	GBE_STATSC_INFO(rx_undersized_frames),
 889	GBE_STATSC_INFO(rx_fragments),
 890	GBE_STATSC_INFO(rx_bytes),
 891	GBE_STATSC_INFO(tx_good_frames),
 892	GBE_STATSC_INFO(tx_broadcast_frames),
 893	GBE_STATSC_INFO(tx_multicast_frames),
 894	GBE_STATSC_INFO(tx_pause_frames),
 895	GBE_STATSC_INFO(tx_deferred_frames),
 896	GBE_STATSC_INFO(tx_collision_frames),
 897	GBE_STATSC_INFO(tx_single_coll_frames),
 898	GBE_STATSC_INFO(tx_mult_coll_frames),
 899	GBE_STATSC_INFO(tx_excessive_collisions),
 900	GBE_STATSC_INFO(tx_late_collisions),
 901	GBE_STATSC_INFO(tx_underrun),
 902	GBE_STATSC_INFO(tx_carrier_sense_errors),
 903	GBE_STATSC_INFO(tx_bytes),
 904	GBE_STATSC_INFO(tx_64byte_frames),
 905	GBE_STATSC_INFO(tx_65_to_127byte_frames),
 906	GBE_STATSC_INFO(tx_128_to_255byte_frames),
 907	GBE_STATSC_INFO(tx_256_to_511byte_frames),
 908	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
 909	GBE_STATSC_INFO(tx_1024byte_frames),
 910	GBE_STATSC_INFO(net_bytes),
 911	GBE_STATSC_INFO(rx_sof_overruns),
 912	GBE_STATSC_INFO(rx_mof_overruns),
 913	GBE_STATSC_INFO(rx_dma_overruns),
 914	/* GBE module D */
 915	GBE_STATSD_INFO(rx_good_frames),
 916	GBE_STATSD_INFO(rx_broadcast_frames),
 917	GBE_STATSD_INFO(rx_multicast_frames),
 918	GBE_STATSD_INFO(rx_pause_frames),
 919	GBE_STATSD_INFO(rx_crc_errors),
 920	GBE_STATSD_INFO(rx_align_code_errors),
 921	GBE_STATSD_INFO(rx_oversized_frames),
 922	GBE_STATSD_INFO(rx_jabber_frames),
 923	GBE_STATSD_INFO(rx_undersized_frames),
 924	GBE_STATSD_INFO(rx_fragments),
 925	GBE_STATSD_INFO(rx_bytes),
 926	GBE_STATSD_INFO(tx_good_frames),
 927	GBE_STATSD_INFO(tx_broadcast_frames),
 928	GBE_STATSD_INFO(tx_multicast_frames),
 929	GBE_STATSD_INFO(tx_pause_frames),
 930	GBE_STATSD_INFO(tx_deferred_frames),
 931	GBE_STATSD_INFO(tx_collision_frames),
 932	GBE_STATSD_INFO(tx_single_coll_frames),
 933	GBE_STATSD_INFO(tx_mult_coll_frames),
 934	GBE_STATSD_INFO(tx_excessive_collisions),
 935	GBE_STATSD_INFO(tx_late_collisions),
 936	GBE_STATSD_INFO(tx_underrun),
 937	GBE_STATSD_INFO(tx_carrier_sense_errors),
 938	GBE_STATSD_INFO(tx_bytes),
 939	GBE_STATSD_INFO(tx_64byte_frames),
 940	GBE_STATSD_INFO(tx_65_to_127byte_frames),
 941	GBE_STATSD_INFO(tx_128_to_255byte_frames),
 942	GBE_STATSD_INFO(tx_256_to_511byte_frames),
 943	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
 944	GBE_STATSD_INFO(tx_1024byte_frames),
 945	GBE_STATSD_INFO(net_bytes),
 946	GBE_STATSD_INFO(rx_sof_overruns),
 947	GBE_STATSD_INFO(rx_mof_overruns),
 948	GBE_STATSD_INFO(rx_dma_overruns),
 949};
 950
 951/* This is the size of entries in GBENU_STATS_HOST */
 952#define GBENU_ET_STATS_HOST_SIZE	52
 953
 954#define GBENU_STATS_HOST(field)					\
 955{								\
 956	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
 957	sizeof_field(struct gbenu_hw_stats, field),		\
 958	offsetof(struct gbenu_hw_stats, field)			\
 959}
 960
 961/* This is the size of entries in GBENU_STATS_PORT */
 962#define GBENU_ET_STATS_PORT_SIZE	65
 963
 964#define GBENU_STATS_P1(field)					\
 965{								\
 966	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
 967	sizeof_field(struct gbenu_hw_stats, field),		\
 968	offsetof(struct gbenu_hw_stats, field)			\
 969}
 970
 971#define GBENU_STATS_P2(field)					\
 972{								\
 973	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
 974	sizeof_field(struct gbenu_hw_stats, field),		\
 975	offsetof(struct gbenu_hw_stats, field)			\
 976}
 977
 978#define GBENU_STATS_P3(field)					\
 979{								\
 980	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
 981	sizeof_field(struct gbenu_hw_stats, field),		\
 982	offsetof(struct gbenu_hw_stats, field)			\
 983}
 984
 985#define GBENU_STATS_P4(field)					\
 986{								\
 987	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
 988	sizeof_field(struct gbenu_hw_stats, field),		\
 989	offsetof(struct gbenu_hw_stats, field)			\
 990}
 991
 992#define GBENU_STATS_P5(field)					\
 993{								\
 994	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
 995	sizeof_field(struct gbenu_hw_stats, field),		\
 996	offsetof(struct gbenu_hw_stats, field)			\
 997}
 998
 999#define GBENU_STATS_P6(field)					\
1000{								\
1001	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
1002	sizeof_field(struct gbenu_hw_stats, field),		\
1003	offsetof(struct gbenu_hw_stats, field)			\
1004}
1005
1006#define GBENU_STATS_P7(field)					\
1007{								\
1008	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1009	sizeof_field(struct gbenu_hw_stats, field),		\
1010	offsetof(struct gbenu_hw_stats, field)			\
1011}
1012
1013#define GBENU_STATS_P8(field)					\
1014{								\
1015	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1016	sizeof_field(struct gbenu_hw_stats, field),		\
1017	offsetof(struct gbenu_hw_stats, field)			\
1018}
1019
1020static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1021	/* GBENU Host Module */
1022	GBENU_STATS_HOST(rx_good_frames),
1023	GBENU_STATS_HOST(rx_broadcast_frames),
1024	GBENU_STATS_HOST(rx_multicast_frames),
1025	GBENU_STATS_HOST(rx_crc_errors),
1026	GBENU_STATS_HOST(rx_oversized_frames),
1027	GBENU_STATS_HOST(rx_undersized_frames),
1028	GBENU_STATS_HOST(ale_drop),
1029	GBENU_STATS_HOST(ale_overrun_drop),
1030	GBENU_STATS_HOST(rx_bytes),
1031	GBENU_STATS_HOST(tx_good_frames),
1032	GBENU_STATS_HOST(tx_broadcast_frames),
1033	GBENU_STATS_HOST(tx_multicast_frames),
1034	GBENU_STATS_HOST(tx_bytes),
1035	GBENU_STATS_HOST(tx_64B_frames),
1036	GBENU_STATS_HOST(tx_65_to_127B_frames),
1037	GBENU_STATS_HOST(tx_128_to_255B_frames),
1038	GBENU_STATS_HOST(tx_256_to_511B_frames),
1039	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1040	GBENU_STATS_HOST(tx_1024B_frames),
1041	GBENU_STATS_HOST(net_bytes),
1042	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1043	GBENU_STATS_HOST(rx_port_mask_drop),
1044	GBENU_STATS_HOST(rx_top_fifo_drop),
1045	GBENU_STATS_HOST(ale_rate_limit_drop),
1046	GBENU_STATS_HOST(ale_vid_ingress_drop),
1047	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1048	GBENU_STATS_HOST(ale_unknown_ucast),
1049	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1050	GBENU_STATS_HOST(ale_unknown_mcast),
1051	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1052	GBENU_STATS_HOST(ale_unknown_bcast),
1053	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1054	GBENU_STATS_HOST(ale_pol_match),
1055	GBENU_STATS_HOST(ale_pol_match_red),
1056	GBENU_STATS_HOST(ale_pol_match_yellow),
1057	GBENU_STATS_HOST(tx_mem_protect_err),
1058	GBENU_STATS_HOST(tx_pri0_drop),
1059	GBENU_STATS_HOST(tx_pri1_drop),
1060	GBENU_STATS_HOST(tx_pri2_drop),
1061	GBENU_STATS_HOST(tx_pri3_drop),
1062	GBENU_STATS_HOST(tx_pri4_drop),
1063	GBENU_STATS_HOST(tx_pri5_drop),
1064	GBENU_STATS_HOST(tx_pri6_drop),
1065	GBENU_STATS_HOST(tx_pri7_drop),
1066	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1067	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1068	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1069	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1070	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1071	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1072	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1073	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1074	/* GBENU Module 1 */
1075	GBENU_STATS_P1(rx_good_frames),
1076	GBENU_STATS_P1(rx_broadcast_frames),
1077	GBENU_STATS_P1(rx_multicast_frames),
1078	GBENU_STATS_P1(rx_pause_frames),
1079	GBENU_STATS_P1(rx_crc_errors),
1080	GBENU_STATS_P1(rx_align_code_errors),
1081	GBENU_STATS_P1(rx_oversized_frames),
1082	GBENU_STATS_P1(rx_jabber_frames),
1083	GBENU_STATS_P1(rx_undersized_frames),
1084	GBENU_STATS_P1(rx_fragments),
1085	GBENU_STATS_P1(ale_drop),
1086	GBENU_STATS_P1(ale_overrun_drop),
1087	GBENU_STATS_P1(rx_bytes),
1088	GBENU_STATS_P1(tx_good_frames),
1089	GBENU_STATS_P1(tx_broadcast_frames),
1090	GBENU_STATS_P1(tx_multicast_frames),
1091	GBENU_STATS_P1(tx_pause_frames),
1092	GBENU_STATS_P1(tx_deferred_frames),
1093	GBENU_STATS_P1(tx_collision_frames),
1094	GBENU_STATS_P1(tx_single_coll_frames),
1095	GBENU_STATS_P1(tx_mult_coll_frames),
1096	GBENU_STATS_P1(tx_excessive_collisions),
1097	GBENU_STATS_P1(tx_late_collisions),
1098	GBENU_STATS_P1(rx_ipg_error),
1099	GBENU_STATS_P1(tx_carrier_sense_errors),
1100	GBENU_STATS_P1(tx_bytes),
1101	GBENU_STATS_P1(tx_64B_frames),
1102	GBENU_STATS_P1(tx_65_to_127B_frames),
1103	GBENU_STATS_P1(tx_128_to_255B_frames),
1104	GBENU_STATS_P1(tx_256_to_511B_frames),
1105	GBENU_STATS_P1(tx_512_to_1023B_frames),
1106	GBENU_STATS_P1(tx_1024B_frames),
1107	GBENU_STATS_P1(net_bytes),
1108	GBENU_STATS_P1(rx_bottom_fifo_drop),
1109	GBENU_STATS_P1(rx_port_mask_drop),
1110	GBENU_STATS_P1(rx_top_fifo_drop),
1111	GBENU_STATS_P1(ale_rate_limit_drop),
1112	GBENU_STATS_P1(ale_vid_ingress_drop),
1113	GBENU_STATS_P1(ale_da_eq_sa_drop),
1114	GBENU_STATS_P1(ale_unknown_ucast),
1115	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1116	GBENU_STATS_P1(ale_unknown_mcast),
1117	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1118	GBENU_STATS_P1(ale_unknown_bcast),
1119	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1120	GBENU_STATS_P1(ale_pol_match),
1121	GBENU_STATS_P1(ale_pol_match_red),
1122	GBENU_STATS_P1(ale_pol_match_yellow),
1123	GBENU_STATS_P1(tx_mem_protect_err),
1124	GBENU_STATS_P1(tx_pri0_drop),
1125	GBENU_STATS_P1(tx_pri1_drop),
1126	GBENU_STATS_P1(tx_pri2_drop),
1127	GBENU_STATS_P1(tx_pri3_drop),
1128	GBENU_STATS_P1(tx_pri4_drop),
1129	GBENU_STATS_P1(tx_pri5_drop),
1130	GBENU_STATS_P1(tx_pri6_drop),
1131	GBENU_STATS_P1(tx_pri7_drop),
1132	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1133	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1134	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1135	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1136	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1137	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1138	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1139	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1140	/* GBENU Module 2 */
1141	GBENU_STATS_P2(rx_good_frames),
1142	GBENU_STATS_P2(rx_broadcast_frames),
1143	GBENU_STATS_P2(rx_multicast_frames),
1144	GBENU_STATS_P2(rx_pause_frames),
1145	GBENU_STATS_P2(rx_crc_errors),
1146	GBENU_STATS_P2(rx_align_code_errors),
1147	GBENU_STATS_P2(rx_oversized_frames),
1148	GBENU_STATS_P2(rx_jabber_frames),
1149	GBENU_STATS_P2(rx_undersized_frames),
1150	GBENU_STATS_P2(rx_fragments),
1151	GBENU_STATS_P2(ale_drop),
1152	GBENU_STATS_P2(ale_overrun_drop),
1153	GBENU_STATS_P2(rx_bytes),
1154	GBENU_STATS_P2(tx_good_frames),
1155	GBENU_STATS_P2(tx_broadcast_frames),
1156	GBENU_STATS_P2(tx_multicast_frames),
1157	GBENU_STATS_P2(tx_pause_frames),
1158	GBENU_STATS_P2(tx_deferred_frames),
1159	GBENU_STATS_P2(tx_collision_frames),
1160	GBENU_STATS_P2(tx_single_coll_frames),
1161	GBENU_STATS_P2(tx_mult_coll_frames),
1162	GBENU_STATS_P2(tx_excessive_collisions),
1163	GBENU_STATS_P2(tx_late_collisions),
1164	GBENU_STATS_P2(rx_ipg_error),
1165	GBENU_STATS_P2(tx_carrier_sense_errors),
1166	GBENU_STATS_P2(tx_bytes),
1167	GBENU_STATS_P2(tx_64B_frames),
1168	GBENU_STATS_P2(tx_65_to_127B_frames),
1169	GBENU_STATS_P2(tx_128_to_255B_frames),
1170	GBENU_STATS_P2(tx_256_to_511B_frames),
1171	GBENU_STATS_P2(tx_512_to_1023B_frames),
1172	GBENU_STATS_P2(tx_1024B_frames),
1173	GBENU_STATS_P2(net_bytes),
1174	GBENU_STATS_P2(rx_bottom_fifo_drop),
1175	GBENU_STATS_P2(rx_port_mask_drop),
1176	GBENU_STATS_P2(rx_top_fifo_drop),
1177	GBENU_STATS_P2(ale_rate_limit_drop),
1178	GBENU_STATS_P2(ale_vid_ingress_drop),
1179	GBENU_STATS_P2(ale_da_eq_sa_drop),
1180	GBENU_STATS_P2(ale_unknown_ucast),
1181	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1182	GBENU_STATS_P2(ale_unknown_mcast),
1183	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1184	GBENU_STATS_P2(ale_unknown_bcast),
1185	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1186	GBENU_STATS_P2(ale_pol_match),
1187	GBENU_STATS_P2(ale_pol_match_red),
1188	GBENU_STATS_P2(ale_pol_match_yellow),
1189	GBENU_STATS_P2(tx_mem_protect_err),
1190	GBENU_STATS_P2(tx_pri0_drop),
1191	GBENU_STATS_P2(tx_pri1_drop),
1192	GBENU_STATS_P2(tx_pri2_drop),
1193	GBENU_STATS_P2(tx_pri3_drop),
1194	GBENU_STATS_P2(tx_pri4_drop),
1195	GBENU_STATS_P2(tx_pri5_drop),
1196	GBENU_STATS_P2(tx_pri6_drop),
1197	GBENU_STATS_P2(tx_pri7_drop),
1198	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1199	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1200	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1201	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1202	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1203	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1204	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1205	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1206	/* GBENU Module 3 */
1207	GBENU_STATS_P3(rx_good_frames),
1208	GBENU_STATS_P3(rx_broadcast_frames),
1209	GBENU_STATS_P3(rx_multicast_frames),
1210	GBENU_STATS_P3(rx_pause_frames),
1211	GBENU_STATS_P3(rx_crc_errors),
1212	GBENU_STATS_P3(rx_align_code_errors),
1213	GBENU_STATS_P3(rx_oversized_frames),
1214	GBENU_STATS_P3(rx_jabber_frames),
1215	GBENU_STATS_P3(rx_undersized_frames),
1216	GBENU_STATS_P3(rx_fragments),
1217	GBENU_STATS_P3(ale_drop),
1218	GBENU_STATS_P3(ale_overrun_drop),
1219	GBENU_STATS_P3(rx_bytes),
1220	GBENU_STATS_P3(tx_good_frames),
1221	GBENU_STATS_P3(tx_broadcast_frames),
1222	GBENU_STATS_P3(tx_multicast_frames),
1223	GBENU_STATS_P3(tx_pause_frames),
1224	GBENU_STATS_P3(tx_deferred_frames),
1225	GBENU_STATS_P3(tx_collision_frames),
1226	GBENU_STATS_P3(tx_single_coll_frames),
1227	GBENU_STATS_P3(tx_mult_coll_frames),
1228	GBENU_STATS_P3(tx_excessive_collisions),
1229	GBENU_STATS_P3(tx_late_collisions),
1230	GBENU_STATS_P3(rx_ipg_error),
1231	GBENU_STATS_P3(tx_carrier_sense_errors),
1232	GBENU_STATS_P3(tx_bytes),
1233	GBENU_STATS_P3(tx_64B_frames),
1234	GBENU_STATS_P3(tx_65_to_127B_frames),
1235	GBENU_STATS_P3(tx_128_to_255B_frames),
1236	GBENU_STATS_P3(tx_256_to_511B_frames),
1237	GBENU_STATS_P3(tx_512_to_1023B_frames),
1238	GBENU_STATS_P3(tx_1024B_frames),
1239	GBENU_STATS_P3(net_bytes),
1240	GBENU_STATS_P3(rx_bottom_fifo_drop),
1241	GBENU_STATS_P3(rx_port_mask_drop),
1242	GBENU_STATS_P3(rx_top_fifo_drop),
1243	GBENU_STATS_P3(ale_rate_limit_drop),
1244	GBENU_STATS_P3(ale_vid_ingress_drop),
1245	GBENU_STATS_P3(ale_da_eq_sa_drop),
1246	GBENU_STATS_P3(ale_unknown_ucast),
1247	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1248	GBENU_STATS_P3(ale_unknown_mcast),
1249	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1250	GBENU_STATS_P3(ale_unknown_bcast),
1251	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1252	GBENU_STATS_P3(ale_pol_match),
1253	GBENU_STATS_P3(ale_pol_match_red),
1254	GBENU_STATS_P3(ale_pol_match_yellow),
1255	GBENU_STATS_P3(tx_mem_protect_err),
1256	GBENU_STATS_P3(tx_pri0_drop),
1257	GBENU_STATS_P3(tx_pri1_drop),
1258	GBENU_STATS_P3(tx_pri2_drop),
1259	GBENU_STATS_P3(tx_pri3_drop),
1260	GBENU_STATS_P3(tx_pri4_drop),
1261	GBENU_STATS_P3(tx_pri5_drop),
1262	GBENU_STATS_P3(tx_pri6_drop),
1263	GBENU_STATS_P3(tx_pri7_drop),
1264	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1265	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1266	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1267	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1268	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1269	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1270	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1271	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1272	/* GBENU Module 4 */
1273	GBENU_STATS_P4(rx_good_frames),
1274	GBENU_STATS_P4(rx_broadcast_frames),
1275	GBENU_STATS_P4(rx_multicast_frames),
1276	GBENU_STATS_P4(rx_pause_frames),
1277	GBENU_STATS_P4(rx_crc_errors),
1278	GBENU_STATS_P4(rx_align_code_errors),
1279	GBENU_STATS_P4(rx_oversized_frames),
1280	GBENU_STATS_P4(rx_jabber_frames),
1281	GBENU_STATS_P4(rx_undersized_frames),
1282	GBENU_STATS_P4(rx_fragments),
1283	GBENU_STATS_P4(ale_drop),
1284	GBENU_STATS_P4(ale_overrun_drop),
1285	GBENU_STATS_P4(rx_bytes),
1286	GBENU_STATS_P4(tx_good_frames),
1287	GBENU_STATS_P4(tx_broadcast_frames),
1288	GBENU_STATS_P4(tx_multicast_frames),
1289	GBENU_STATS_P4(tx_pause_frames),
1290	GBENU_STATS_P4(tx_deferred_frames),
1291	GBENU_STATS_P4(tx_collision_frames),
1292	GBENU_STATS_P4(tx_single_coll_frames),
1293	GBENU_STATS_P4(tx_mult_coll_frames),
1294	GBENU_STATS_P4(tx_excessive_collisions),
1295	GBENU_STATS_P4(tx_late_collisions),
1296	GBENU_STATS_P4(rx_ipg_error),
1297	GBENU_STATS_P4(tx_carrier_sense_errors),
1298	GBENU_STATS_P4(tx_bytes),
1299	GBENU_STATS_P4(tx_64B_frames),
1300	GBENU_STATS_P4(tx_65_to_127B_frames),
1301	GBENU_STATS_P4(tx_128_to_255B_frames),
1302	GBENU_STATS_P4(tx_256_to_511B_frames),
1303	GBENU_STATS_P4(tx_512_to_1023B_frames),
1304	GBENU_STATS_P4(tx_1024B_frames),
1305	GBENU_STATS_P4(net_bytes),
1306	GBENU_STATS_P4(rx_bottom_fifo_drop),
1307	GBENU_STATS_P4(rx_port_mask_drop),
1308	GBENU_STATS_P4(rx_top_fifo_drop),
1309	GBENU_STATS_P4(ale_rate_limit_drop),
1310	GBENU_STATS_P4(ale_vid_ingress_drop),
1311	GBENU_STATS_P4(ale_da_eq_sa_drop),
1312	GBENU_STATS_P4(ale_unknown_ucast),
1313	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1314	GBENU_STATS_P4(ale_unknown_mcast),
1315	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1316	GBENU_STATS_P4(ale_unknown_bcast),
1317	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1318	GBENU_STATS_P4(ale_pol_match),
1319	GBENU_STATS_P4(ale_pol_match_red),
1320	GBENU_STATS_P4(ale_pol_match_yellow),
1321	GBENU_STATS_P4(tx_mem_protect_err),
1322	GBENU_STATS_P4(tx_pri0_drop),
1323	GBENU_STATS_P4(tx_pri1_drop),
1324	GBENU_STATS_P4(tx_pri2_drop),
1325	GBENU_STATS_P4(tx_pri3_drop),
1326	GBENU_STATS_P4(tx_pri4_drop),
1327	GBENU_STATS_P4(tx_pri5_drop),
1328	GBENU_STATS_P4(tx_pri6_drop),
1329	GBENU_STATS_P4(tx_pri7_drop),
1330	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1331	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1332	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1333	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1334	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1335	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1336	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1337	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1338	/* GBENU Module 5 */
1339	GBENU_STATS_P5(rx_good_frames),
1340	GBENU_STATS_P5(rx_broadcast_frames),
1341	GBENU_STATS_P5(rx_multicast_frames),
1342	GBENU_STATS_P5(rx_pause_frames),
1343	GBENU_STATS_P5(rx_crc_errors),
1344	GBENU_STATS_P5(rx_align_code_errors),
1345	GBENU_STATS_P5(rx_oversized_frames),
1346	GBENU_STATS_P5(rx_jabber_frames),
1347	GBENU_STATS_P5(rx_undersized_frames),
1348	GBENU_STATS_P5(rx_fragments),
1349	GBENU_STATS_P5(ale_drop),
1350	GBENU_STATS_P5(ale_overrun_drop),
1351	GBENU_STATS_P5(rx_bytes),
1352	GBENU_STATS_P5(tx_good_frames),
1353	GBENU_STATS_P5(tx_broadcast_frames),
1354	GBENU_STATS_P5(tx_multicast_frames),
1355	GBENU_STATS_P5(tx_pause_frames),
1356	GBENU_STATS_P5(tx_deferred_frames),
1357	GBENU_STATS_P5(tx_collision_frames),
1358	GBENU_STATS_P5(tx_single_coll_frames),
1359	GBENU_STATS_P5(tx_mult_coll_frames),
1360	GBENU_STATS_P5(tx_excessive_collisions),
1361	GBENU_STATS_P5(tx_late_collisions),
1362	GBENU_STATS_P5(rx_ipg_error),
1363	GBENU_STATS_P5(tx_carrier_sense_errors),
1364	GBENU_STATS_P5(tx_bytes),
1365	GBENU_STATS_P5(tx_64B_frames),
1366	GBENU_STATS_P5(tx_65_to_127B_frames),
1367	GBENU_STATS_P5(tx_128_to_255B_frames),
1368	GBENU_STATS_P5(tx_256_to_511B_frames),
1369	GBENU_STATS_P5(tx_512_to_1023B_frames),
1370	GBENU_STATS_P5(tx_1024B_frames),
1371	GBENU_STATS_P5(net_bytes),
1372	GBENU_STATS_P5(rx_bottom_fifo_drop),
1373	GBENU_STATS_P5(rx_port_mask_drop),
1374	GBENU_STATS_P5(rx_top_fifo_drop),
1375	GBENU_STATS_P5(ale_rate_limit_drop),
1376	GBENU_STATS_P5(ale_vid_ingress_drop),
1377	GBENU_STATS_P5(ale_da_eq_sa_drop),
1378	GBENU_STATS_P5(ale_unknown_ucast),
1379	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1380	GBENU_STATS_P5(ale_unknown_mcast),
1381	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1382	GBENU_STATS_P5(ale_unknown_bcast),
1383	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1384	GBENU_STATS_P5(ale_pol_match),
1385	GBENU_STATS_P5(ale_pol_match_red),
1386	GBENU_STATS_P5(ale_pol_match_yellow),
1387	GBENU_STATS_P5(tx_mem_protect_err),
1388	GBENU_STATS_P5(tx_pri0_drop),
1389	GBENU_STATS_P5(tx_pri1_drop),
1390	GBENU_STATS_P5(tx_pri2_drop),
1391	GBENU_STATS_P5(tx_pri3_drop),
1392	GBENU_STATS_P5(tx_pri4_drop),
1393	GBENU_STATS_P5(tx_pri5_drop),
1394	GBENU_STATS_P5(tx_pri6_drop),
1395	GBENU_STATS_P5(tx_pri7_drop),
1396	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1397	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1398	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1399	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1400	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1401	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1402	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1403	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1404	/* GBENU Module 6 */
1405	GBENU_STATS_P6(rx_good_frames),
1406	GBENU_STATS_P6(rx_broadcast_frames),
1407	GBENU_STATS_P6(rx_multicast_frames),
1408	GBENU_STATS_P6(rx_pause_frames),
1409	GBENU_STATS_P6(rx_crc_errors),
1410	GBENU_STATS_P6(rx_align_code_errors),
1411	GBENU_STATS_P6(rx_oversized_frames),
1412	GBENU_STATS_P6(rx_jabber_frames),
1413	GBENU_STATS_P6(rx_undersized_frames),
1414	GBENU_STATS_P6(rx_fragments),
1415	GBENU_STATS_P6(ale_drop),
1416	GBENU_STATS_P6(ale_overrun_drop),
1417	GBENU_STATS_P6(rx_bytes),
1418	GBENU_STATS_P6(tx_good_frames),
1419	GBENU_STATS_P6(tx_broadcast_frames),
1420	GBENU_STATS_P6(tx_multicast_frames),
1421	GBENU_STATS_P6(tx_pause_frames),
1422	GBENU_STATS_P6(tx_deferred_frames),
1423	GBENU_STATS_P6(tx_collision_frames),
1424	GBENU_STATS_P6(tx_single_coll_frames),
1425	GBENU_STATS_P6(tx_mult_coll_frames),
1426	GBENU_STATS_P6(tx_excessive_collisions),
1427	GBENU_STATS_P6(tx_late_collisions),
1428	GBENU_STATS_P6(rx_ipg_error),
1429	GBENU_STATS_P6(tx_carrier_sense_errors),
1430	GBENU_STATS_P6(tx_bytes),
1431	GBENU_STATS_P6(tx_64B_frames),
1432	GBENU_STATS_P6(tx_65_to_127B_frames),
1433	GBENU_STATS_P6(tx_128_to_255B_frames),
1434	GBENU_STATS_P6(tx_256_to_511B_frames),
1435	GBENU_STATS_P6(tx_512_to_1023B_frames),
1436	GBENU_STATS_P6(tx_1024B_frames),
1437	GBENU_STATS_P6(net_bytes),
1438	GBENU_STATS_P6(rx_bottom_fifo_drop),
1439	GBENU_STATS_P6(rx_port_mask_drop),
1440	GBENU_STATS_P6(rx_top_fifo_drop),
1441	GBENU_STATS_P6(ale_rate_limit_drop),
1442	GBENU_STATS_P6(ale_vid_ingress_drop),
1443	GBENU_STATS_P6(ale_da_eq_sa_drop),
1444	GBENU_STATS_P6(ale_unknown_ucast),
1445	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1446	GBENU_STATS_P6(ale_unknown_mcast),
1447	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1448	GBENU_STATS_P6(ale_unknown_bcast),
1449	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1450	GBENU_STATS_P6(ale_pol_match),
1451	GBENU_STATS_P6(ale_pol_match_red),
1452	GBENU_STATS_P6(ale_pol_match_yellow),
1453	GBENU_STATS_P6(tx_mem_protect_err),
1454	GBENU_STATS_P6(tx_pri0_drop),
1455	GBENU_STATS_P6(tx_pri1_drop),
1456	GBENU_STATS_P6(tx_pri2_drop),
1457	GBENU_STATS_P6(tx_pri3_drop),
1458	GBENU_STATS_P6(tx_pri4_drop),
1459	GBENU_STATS_P6(tx_pri5_drop),
1460	GBENU_STATS_P6(tx_pri6_drop),
1461	GBENU_STATS_P6(tx_pri7_drop),
1462	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1463	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1464	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1465	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1466	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1467	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1468	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1469	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1470	/* GBENU Module 7 */
1471	GBENU_STATS_P7(rx_good_frames),
1472	GBENU_STATS_P7(rx_broadcast_frames),
1473	GBENU_STATS_P7(rx_multicast_frames),
1474	GBENU_STATS_P7(rx_pause_frames),
1475	GBENU_STATS_P7(rx_crc_errors),
1476	GBENU_STATS_P7(rx_align_code_errors),
1477	GBENU_STATS_P7(rx_oversized_frames),
1478	GBENU_STATS_P7(rx_jabber_frames),
1479	GBENU_STATS_P7(rx_undersized_frames),
1480	GBENU_STATS_P7(rx_fragments),
1481	GBENU_STATS_P7(ale_drop),
1482	GBENU_STATS_P7(ale_overrun_drop),
1483	GBENU_STATS_P7(rx_bytes),
1484	GBENU_STATS_P7(tx_good_frames),
1485	GBENU_STATS_P7(tx_broadcast_frames),
1486	GBENU_STATS_P7(tx_multicast_frames),
1487	GBENU_STATS_P7(tx_pause_frames),
1488	GBENU_STATS_P7(tx_deferred_frames),
1489	GBENU_STATS_P7(tx_collision_frames),
1490	GBENU_STATS_P7(tx_single_coll_frames),
1491	GBENU_STATS_P7(tx_mult_coll_frames),
1492	GBENU_STATS_P7(tx_excessive_collisions),
1493	GBENU_STATS_P7(tx_late_collisions),
1494	GBENU_STATS_P7(rx_ipg_error),
1495	GBENU_STATS_P7(tx_carrier_sense_errors),
1496	GBENU_STATS_P7(tx_bytes),
1497	GBENU_STATS_P7(tx_64B_frames),
1498	GBENU_STATS_P7(tx_65_to_127B_frames),
1499	GBENU_STATS_P7(tx_128_to_255B_frames),
1500	GBENU_STATS_P7(tx_256_to_511B_frames),
1501	GBENU_STATS_P7(tx_512_to_1023B_frames),
1502	GBENU_STATS_P7(tx_1024B_frames),
1503	GBENU_STATS_P7(net_bytes),
1504	GBENU_STATS_P7(rx_bottom_fifo_drop),
1505	GBENU_STATS_P7(rx_port_mask_drop),
1506	GBENU_STATS_P7(rx_top_fifo_drop),
1507	GBENU_STATS_P7(ale_rate_limit_drop),
1508	GBENU_STATS_P7(ale_vid_ingress_drop),
1509	GBENU_STATS_P7(ale_da_eq_sa_drop),
1510	GBENU_STATS_P7(ale_unknown_ucast),
1511	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1512	GBENU_STATS_P7(ale_unknown_mcast),
1513	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1514	GBENU_STATS_P7(ale_unknown_bcast),
1515	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1516	GBENU_STATS_P7(ale_pol_match),
1517	GBENU_STATS_P7(ale_pol_match_red),
1518	GBENU_STATS_P7(ale_pol_match_yellow),
1519	GBENU_STATS_P7(tx_mem_protect_err),
1520	GBENU_STATS_P7(tx_pri0_drop),
1521	GBENU_STATS_P7(tx_pri1_drop),
1522	GBENU_STATS_P7(tx_pri2_drop),
1523	GBENU_STATS_P7(tx_pri3_drop),
1524	GBENU_STATS_P7(tx_pri4_drop),
1525	GBENU_STATS_P7(tx_pri5_drop),
1526	GBENU_STATS_P7(tx_pri6_drop),
1527	GBENU_STATS_P7(tx_pri7_drop),
1528	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1529	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1530	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1531	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1532	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1533	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1534	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1535	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1536	/* GBENU Module 8 */
1537	GBENU_STATS_P8(rx_good_frames),
1538	GBENU_STATS_P8(rx_broadcast_frames),
1539	GBENU_STATS_P8(rx_multicast_frames),
1540	GBENU_STATS_P8(rx_pause_frames),
1541	GBENU_STATS_P8(rx_crc_errors),
1542	GBENU_STATS_P8(rx_align_code_errors),
1543	GBENU_STATS_P8(rx_oversized_frames),
1544	GBENU_STATS_P8(rx_jabber_frames),
1545	GBENU_STATS_P8(rx_undersized_frames),
1546	GBENU_STATS_P8(rx_fragments),
1547	GBENU_STATS_P8(ale_drop),
1548	GBENU_STATS_P8(ale_overrun_drop),
1549	GBENU_STATS_P8(rx_bytes),
1550	GBENU_STATS_P8(tx_good_frames),
1551	GBENU_STATS_P8(tx_broadcast_frames),
1552	GBENU_STATS_P8(tx_multicast_frames),
1553	GBENU_STATS_P8(tx_pause_frames),
1554	GBENU_STATS_P8(tx_deferred_frames),
1555	GBENU_STATS_P8(tx_collision_frames),
1556	GBENU_STATS_P8(tx_single_coll_frames),
1557	GBENU_STATS_P8(tx_mult_coll_frames),
1558	GBENU_STATS_P8(tx_excessive_collisions),
1559	GBENU_STATS_P8(tx_late_collisions),
1560	GBENU_STATS_P8(rx_ipg_error),
1561	GBENU_STATS_P8(tx_carrier_sense_errors),
1562	GBENU_STATS_P8(tx_bytes),
1563	GBENU_STATS_P8(tx_64B_frames),
1564	GBENU_STATS_P8(tx_65_to_127B_frames),
1565	GBENU_STATS_P8(tx_128_to_255B_frames),
1566	GBENU_STATS_P8(tx_256_to_511B_frames),
1567	GBENU_STATS_P8(tx_512_to_1023B_frames),
1568	GBENU_STATS_P8(tx_1024B_frames),
1569	GBENU_STATS_P8(net_bytes),
1570	GBENU_STATS_P8(rx_bottom_fifo_drop),
1571	GBENU_STATS_P8(rx_port_mask_drop),
1572	GBENU_STATS_P8(rx_top_fifo_drop),
1573	GBENU_STATS_P8(ale_rate_limit_drop),
1574	GBENU_STATS_P8(ale_vid_ingress_drop),
1575	GBENU_STATS_P8(ale_da_eq_sa_drop),
1576	GBENU_STATS_P8(ale_unknown_ucast),
1577	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1578	GBENU_STATS_P8(ale_unknown_mcast),
1579	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1580	GBENU_STATS_P8(ale_unknown_bcast),
1581	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1582	GBENU_STATS_P8(ale_pol_match),
1583	GBENU_STATS_P8(ale_pol_match_red),
1584	GBENU_STATS_P8(ale_pol_match_yellow),
1585	GBENU_STATS_P8(tx_mem_protect_err),
1586	GBENU_STATS_P8(tx_pri0_drop),
1587	GBENU_STATS_P8(tx_pri1_drop),
1588	GBENU_STATS_P8(tx_pri2_drop),
1589	GBENU_STATS_P8(tx_pri3_drop),
1590	GBENU_STATS_P8(tx_pri4_drop),
1591	GBENU_STATS_P8(tx_pri5_drop),
1592	GBENU_STATS_P8(tx_pri6_drop),
1593	GBENU_STATS_P8(tx_pri7_drop),
1594	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1595	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1596	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1597	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1598	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1599	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1600	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1601	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1602};
1603
1604#define XGBE_STATS0_INFO(field)				\
1605{							\
1606	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1607	sizeof_field(struct xgbe_hw_stats, field),	\
1608	offsetof(struct xgbe_hw_stats, field)		\
1609}
1610
1611#define XGBE_STATS1_INFO(field)				\
1612{							\
1613	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1614	sizeof_field(struct xgbe_hw_stats, field),	\
1615	offsetof(struct xgbe_hw_stats, field)		\
1616}
1617
1618#define XGBE_STATS2_INFO(field)				\
1619{							\
1620	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1621	sizeof_field(struct xgbe_hw_stats, field),	\
1622	offsetof(struct xgbe_hw_stats, field)		\
1623}
1624
1625static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1626	/* GBE module 0 */
1627	XGBE_STATS0_INFO(rx_good_frames),
1628	XGBE_STATS0_INFO(rx_broadcast_frames),
1629	XGBE_STATS0_INFO(rx_multicast_frames),
1630	XGBE_STATS0_INFO(rx_oversized_frames),
1631	XGBE_STATS0_INFO(rx_undersized_frames),
1632	XGBE_STATS0_INFO(overrun_type4),
1633	XGBE_STATS0_INFO(overrun_type5),
1634	XGBE_STATS0_INFO(rx_bytes),
1635	XGBE_STATS0_INFO(tx_good_frames),
1636	XGBE_STATS0_INFO(tx_broadcast_frames),
1637	XGBE_STATS0_INFO(tx_multicast_frames),
1638	XGBE_STATS0_INFO(tx_bytes),
1639	XGBE_STATS0_INFO(tx_64byte_frames),
1640	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1641	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1642	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1643	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1644	XGBE_STATS0_INFO(tx_1024byte_frames),
1645	XGBE_STATS0_INFO(net_bytes),
1646	XGBE_STATS0_INFO(rx_sof_overruns),
1647	XGBE_STATS0_INFO(rx_mof_overruns),
1648	XGBE_STATS0_INFO(rx_dma_overruns),
1649	/* XGBE module 1 */
1650	XGBE_STATS1_INFO(rx_good_frames),
1651	XGBE_STATS1_INFO(rx_broadcast_frames),
1652	XGBE_STATS1_INFO(rx_multicast_frames),
1653	XGBE_STATS1_INFO(rx_pause_frames),
1654	XGBE_STATS1_INFO(rx_crc_errors),
1655	XGBE_STATS1_INFO(rx_align_code_errors),
1656	XGBE_STATS1_INFO(rx_oversized_frames),
1657	XGBE_STATS1_INFO(rx_jabber_frames),
1658	XGBE_STATS1_INFO(rx_undersized_frames),
1659	XGBE_STATS1_INFO(rx_fragments),
1660	XGBE_STATS1_INFO(overrun_type4),
1661	XGBE_STATS1_INFO(overrun_type5),
1662	XGBE_STATS1_INFO(rx_bytes),
1663	XGBE_STATS1_INFO(tx_good_frames),
1664	XGBE_STATS1_INFO(tx_broadcast_frames),
1665	XGBE_STATS1_INFO(tx_multicast_frames),
1666	XGBE_STATS1_INFO(tx_pause_frames),
1667	XGBE_STATS1_INFO(tx_deferred_frames),
1668	XGBE_STATS1_INFO(tx_collision_frames),
1669	XGBE_STATS1_INFO(tx_single_coll_frames),
1670	XGBE_STATS1_INFO(tx_mult_coll_frames),
1671	XGBE_STATS1_INFO(tx_excessive_collisions),
1672	XGBE_STATS1_INFO(tx_late_collisions),
1673	XGBE_STATS1_INFO(tx_underrun),
1674	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1675	XGBE_STATS1_INFO(tx_bytes),
1676	XGBE_STATS1_INFO(tx_64byte_frames),
1677	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1678	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1679	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1680	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1681	XGBE_STATS1_INFO(tx_1024byte_frames),
1682	XGBE_STATS1_INFO(net_bytes),
1683	XGBE_STATS1_INFO(rx_sof_overruns),
1684	XGBE_STATS1_INFO(rx_mof_overruns),
1685	XGBE_STATS1_INFO(rx_dma_overruns),
1686	/* XGBE module 2 */
1687	XGBE_STATS2_INFO(rx_good_frames),
1688	XGBE_STATS2_INFO(rx_broadcast_frames),
1689	XGBE_STATS2_INFO(rx_multicast_frames),
1690	XGBE_STATS2_INFO(rx_pause_frames),
1691	XGBE_STATS2_INFO(rx_crc_errors),
1692	XGBE_STATS2_INFO(rx_align_code_errors),
1693	XGBE_STATS2_INFO(rx_oversized_frames),
1694	XGBE_STATS2_INFO(rx_jabber_frames),
1695	XGBE_STATS2_INFO(rx_undersized_frames),
1696	XGBE_STATS2_INFO(rx_fragments),
1697	XGBE_STATS2_INFO(overrun_type4),
1698	XGBE_STATS2_INFO(overrun_type5),
1699	XGBE_STATS2_INFO(rx_bytes),
1700	XGBE_STATS2_INFO(tx_good_frames),
1701	XGBE_STATS2_INFO(tx_broadcast_frames),
1702	XGBE_STATS2_INFO(tx_multicast_frames),
1703	XGBE_STATS2_INFO(tx_pause_frames),
1704	XGBE_STATS2_INFO(tx_deferred_frames),
1705	XGBE_STATS2_INFO(tx_collision_frames),
1706	XGBE_STATS2_INFO(tx_single_coll_frames),
1707	XGBE_STATS2_INFO(tx_mult_coll_frames),
1708	XGBE_STATS2_INFO(tx_excessive_collisions),
1709	XGBE_STATS2_INFO(tx_late_collisions),
1710	XGBE_STATS2_INFO(tx_underrun),
1711	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1712	XGBE_STATS2_INFO(tx_bytes),
1713	XGBE_STATS2_INFO(tx_64byte_frames),
1714	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1715	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1716	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1717	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1718	XGBE_STATS2_INFO(tx_1024byte_frames),
1719	XGBE_STATS2_INFO(net_bytes),
1720	XGBE_STATS2_INFO(rx_sof_overruns),
1721	XGBE_STATS2_INFO(rx_mof_overruns),
1722	XGBE_STATS2_INFO(rx_dma_overruns),
1723};
1724
1725#define for_each_intf(i, priv) \
1726	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1727
1728#define for_each_sec_slave(slave, priv) \
1729	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1730
1731#define first_sec_slave(priv)					\
1732	list_first_entry(&priv->secondary_slaves, \
1733			struct gbe_slave, slave_list)
1734
1735static void keystone_get_drvinfo(struct net_device *ndev,
1736				 struct ethtool_drvinfo *info)
1737{
1738	strscpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1739	strscpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1740}
1741
1742static u32 keystone_get_msglevel(struct net_device *ndev)
1743{
1744	struct netcp_intf *netcp = netdev_priv(ndev);
1745
1746	return netcp->msg_enable;
1747}
1748
1749static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1750{
1751	struct netcp_intf *netcp = netdev_priv(ndev);
1752
1753	netcp->msg_enable = value;
1754}
1755
1756static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1757{
1758	struct gbe_intf *gbe_intf;
1759
1760	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1761	if (!gbe_intf)
1762		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1763
1764	return gbe_intf;
1765}
1766
1767static void keystone_get_stat_strings(struct net_device *ndev,
1768				      uint32_t stringset, uint8_t *data)
1769{
1770	struct netcp_intf *netcp = netdev_priv(ndev);
1771	struct gbe_intf *gbe_intf;
1772	struct gbe_priv *gbe_dev;
1773	int i;
1774
1775	gbe_intf = keystone_get_intf_data(netcp);
1776	if (!gbe_intf)
1777		return;
1778	gbe_dev = gbe_intf->gbe_dev;
1779
1780	switch (stringset) {
1781	case ETH_SS_STATS:
1782		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1783			memcpy(data, gbe_dev->et_stats[i].desc,
1784			       ETH_GSTRING_LEN);
1785			data += ETH_GSTRING_LEN;
1786		}
1787		break;
1788	case ETH_SS_TEST:
1789		break;
1790	}
1791}
1792
1793static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1794{
1795	struct netcp_intf *netcp = netdev_priv(ndev);
1796	struct gbe_intf *gbe_intf;
1797	struct gbe_priv *gbe_dev;
1798
1799	gbe_intf = keystone_get_intf_data(netcp);
1800	if (!gbe_intf)
1801		return -EINVAL;
1802	gbe_dev = gbe_intf->gbe_dev;
1803
1804	switch (stringset) {
1805	case ETH_SS_TEST:
1806		return 0;
1807	case ETH_SS_STATS:
1808		return gbe_dev->num_et_stats;
1809	default:
1810		return -EINVAL;
1811	}
1812}
1813
1814static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1815{
1816	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1817	u32  __iomem *p_stats_entry;
1818	int i;
1819
1820	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1821		if (gbe_dev->et_stats[i].type == stats_mod) {
1822			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1823			gbe_dev->hw_stats[i] = 0;
1824			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1825		}
1826	}
1827}
1828
1829static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1830					     int et_stats_entry)
1831{
1832	void __iomem *base = NULL;
1833	u32  __iomem *p_stats_entry;
1834	u32 curr, delta;
1835
1836	/* The hw_stats_regs pointers are already
1837	 * properly set to point to the right base:
1838	 */
1839	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1840	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1841	curr = readl(p_stats_entry);
1842	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1843	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1844	gbe_dev->hw_stats[et_stats_entry] += delta;
1845}
1846
1847static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1848{
1849	int i;
1850
1851	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1852		gbe_update_hw_stats_entry(gbe_dev, i);
1853
1854		if (data)
1855			data[i] = gbe_dev->hw_stats[i];
1856	}
1857}
1858
1859static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1860					       int stats_mod)
1861{
1862	u32 val;
1863
1864	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1865
1866	switch (stats_mod) {
1867	case GBE_STATSA_MODULE:
1868	case GBE_STATSB_MODULE:
1869		val &= ~GBE_STATS_CD_SEL;
1870		break;
1871	case GBE_STATSC_MODULE:
1872	case GBE_STATSD_MODULE:
1873		val |= GBE_STATS_CD_SEL;
1874		break;
1875	default:
1876		return;
1877	}
1878
1879	/* make the stat module visible */
1880	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1881}
1882
1883static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1884{
1885	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1886	gbe_reset_mod_stats(gbe_dev, stats_mod);
1887}
1888
1889static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1890{
1891	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1892	int et_entry, j, pair;
1893
1894	for (pair = 0; pair < 2; pair++) {
1895		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1896						      GBE_STATSC_MODULE :
1897						      GBE_STATSA_MODULE));
1898
1899		for (j = 0; j < half_num_et_stats; j++) {
1900			et_entry = pair * half_num_et_stats + j;
1901			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1902
1903			if (data)
1904				data[et_entry] = gbe_dev->hw_stats[et_entry];
1905		}
1906	}
1907}
1908
1909static void keystone_get_ethtool_stats(struct net_device *ndev,
1910				       struct ethtool_stats *stats,
1911				       uint64_t *data)
1912{
1913	struct netcp_intf *netcp = netdev_priv(ndev);
1914	struct gbe_intf *gbe_intf;
1915	struct gbe_priv *gbe_dev;
1916
1917	gbe_intf = keystone_get_intf_data(netcp);
1918	if (!gbe_intf)
1919		return;
1920
1921	gbe_dev = gbe_intf->gbe_dev;
1922	spin_lock_bh(&gbe_dev->hw_stats_lock);
1923	if (IS_SS_ID_VER_14(gbe_dev))
1924		gbe_update_stats_ver14(gbe_dev, data);
1925	else
1926		gbe_update_stats(gbe_dev, data);
1927	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1928}
1929
1930static int keystone_get_link_ksettings(struct net_device *ndev,
1931				       struct ethtool_link_ksettings *cmd)
1932{
1933	struct netcp_intf *netcp = netdev_priv(ndev);
1934	struct phy_device *phy = ndev->phydev;
1935	struct gbe_intf *gbe_intf;
 
1936
1937	if (!phy)
1938		return -EINVAL;
1939
1940	gbe_intf = keystone_get_intf_data(netcp);
1941	if (!gbe_intf)
1942		return -EINVAL;
1943
1944	if (!gbe_intf->slave)
1945		return -EINVAL;
1946
1947	phy_ethtool_ksettings_get(phy, cmd);
1948	cmd->base.port = gbe_intf->slave->phy_port_t;
 
1949
1950	return 0;
1951}
1952
1953static int keystone_set_link_ksettings(struct net_device *ndev,
1954				       const struct ethtool_link_ksettings *cmd)
1955{
1956	struct netcp_intf *netcp = netdev_priv(ndev);
1957	struct phy_device *phy = ndev->phydev;
1958	struct gbe_intf *gbe_intf;
1959	u8 port = cmd->base.port;
1960	u32 advertising, supported;
1961	u32 features;
1962
1963	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1964						cmd->link_modes.advertising);
1965	ethtool_convert_link_mode_to_legacy_u32(&supported,
1966						cmd->link_modes.supported);
1967	features = advertising & supported;
1968
1969	if (!phy)
1970		return -EINVAL;
1971
1972	gbe_intf = keystone_get_intf_data(netcp);
1973	if (!gbe_intf)
1974		return -EINVAL;
1975
1976	if (!gbe_intf->slave)
1977		return -EINVAL;
1978
1979	if (port != gbe_intf->slave->phy_port_t) {
1980		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1981			return -EINVAL;
1982
1983		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1984			return -EINVAL;
1985
1986		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1987			return -EINVAL;
1988
1989		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1990			return -EINVAL;
1991
1992		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1993			return -EINVAL;
1994	}
1995
1996	gbe_intf->slave->phy_port_t = port;
1997	return phy_ethtool_ksettings_set(phy, cmd);
1998}
1999
2000#if IS_ENABLED(CONFIG_TI_CPTS)
2001static int keystone_get_ts_info(struct net_device *ndev,
2002				struct kernel_ethtool_ts_info *info)
2003{
2004	struct netcp_intf *netcp = netdev_priv(ndev);
2005	struct gbe_intf *gbe_intf;
2006
2007	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2008	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2009		return -EINVAL;
2010
2011	info->so_timestamping =
2012		SOF_TIMESTAMPING_TX_HARDWARE |
2013		SOF_TIMESTAMPING_TX_SOFTWARE |
2014		SOF_TIMESTAMPING_RX_HARDWARE |
 
 
2015		SOF_TIMESTAMPING_RAW_HARDWARE;
2016	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2017	info->tx_types =
2018		(1 << HWTSTAMP_TX_OFF) |
2019		(1 << HWTSTAMP_TX_ON);
2020	info->rx_filters =
2021		(1 << HWTSTAMP_FILTER_NONE) |
2022		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2023		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2024	return 0;
2025}
2026#else
2027static int keystone_get_ts_info(struct net_device *ndev,
2028				struct kernel_ethtool_ts_info *info)
2029{
2030	info->so_timestamping =
2031		SOF_TIMESTAMPING_TX_SOFTWARE;
 
 
 
2032	info->tx_types = 0;
2033	info->rx_filters = 0;
2034	return 0;
2035}
2036#endif /* CONFIG_TI_CPTS */
2037
2038static const struct ethtool_ops keystone_ethtool_ops = {
2039	.get_drvinfo		= keystone_get_drvinfo,
2040	.get_link		= ethtool_op_get_link,
2041	.get_msglevel		= keystone_get_msglevel,
2042	.set_msglevel		= keystone_set_msglevel,
2043	.get_strings		= keystone_get_stat_strings,
2044	.get_sset_count		= keystone_get_sset_count,
2045	.get_ethtool_stats	= keystone_get_ethtool_stats,
2046	.get_link_ksettings	= keystone_get_link_ksettings,
2047	.set_link_ksettings	= keystone_set_link_ksettings,
2048	.get_ts_info		= keystone_get_ts_info,
2049};
2050
 
 
 
 
2051static void gbe_set_slave_mac(struct gbe_slave *slave,
2052			      struct gbe_intf *gbe_intf)
2053{
2054	struct net_device *ndev = gbe_intf->ndev;
2055
2056	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2057	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2058}
2059
2060static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2061{
2062	if (priv->host_port == 0)
2063		return slave_num + 1;
2064
2065	return slave_num;
2066}
2067
2068static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2069					  struct net_device *ndev,
2070					  struct gbe_slave *slave,
2071					  int up)
2072{
2073	struct phy_device *phy = slave->phy;
2074	u32 mac_control = 0;
2075
2076	if (up) {
2077		mac_control = slave->mac_control;
2078		if (phy && (phy->speed == SPEED_1000)) {
2079			mac_control |= MACSL_GIG_MODE;
2080			mac_control &= ~MACSL_XGIG_MODE;
2081		} else if (phy && (phy->speed == SPEED_10000)) {
2082			mac_control |= MACSL_XGIG_MODE;
2083			mac_control &= ~MACSL_GIG_MODE;
2084		}
2085
2086		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2087						 mac_control));
2088
2089		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2090				     ALE_PORT_STATE,
2091				     ALE_PORT_STATE_FORWARD);
2092
2093		if (ndev && slave->open &&
2094		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2095		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2096		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2097			netif_carrier_on(ndev);
2098	} else {
2099		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2100						 mac_control));
2101		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2102				     ALE_PORT_STATE,
2103				     ALE_PORT_STATE_DISABLE);
2104		if (ndev &&
2105		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2106		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2107		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2108			netif_carrier_off(ndev);
2109	}
2110
2111	if (phy)
2112		phy_print_status(phy);
2113}
2114
2115static bool gbe_phy_link_status(struct gbe_slave *slave)
2116{
2117	 return !slave->phy || slave->phy->link;
2118}
2119
2120#define RGMII_REG_STATUS_LINK	BIT(0)
2121
2122static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2123{
2124	u32 val = 0;
2125
2126	val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2127	*status = !!(val & RGMII_REG_STATUS_LINK);
2128}
2129
2130static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2131					  struct gbe_slave *slave,
2132					  struct net_device *ndev)
2133{
2134	bool sw_link_state = true, phy_link_state;
2135	int sp = slave->slave_num, link_state;
2136
2137	if (!slave->open)
2138		return;
2139
2140	if (SLAVE_LINK_IS_RGMII(slave))
2141		netcp_2u_rgmii_get_port_link(gbe_dev,
2142					     &sw_link_state);
2143	if (SLAVE_LINK_IS_SGMII(slave))
2144		sw_link_state =
2145		netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2146
2147	phy_link_state = gbe_phy_link_status(slave);
2148	link_state = phy_link_state & sw_link_state;
2149
2150	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2151		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2152					      link_state);
2153}
2154
2155static void xgbe_adjust_link(struct net_device *ndev)
2156{
2157	struct netcp_intf *netcp = netdev_priv(ndev);
2158	struct gbe_intf *gbe_intf;
2159
2160	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2161	if (!gbe_intf)
2162		return;
2163
2164	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2165				      ndev);
2166}
2167
2168static void gbe_adjust_link(struct net_device *ndev)
2169{
2170	struct netcp_intf *netcp = netdev_priv(ndev);
2171	struct gbe_intf *gbe_intf;
2172
2173	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2174	if (!gbe_intf)
2175		return;
2176
2177	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2178				      ndev);
2179}
2180
2181static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2182{
2183	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2184	struct gbe_slave *slave;
2185
2186	for_each_sec_slave(slave, gbe_dev)
2187		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2188}
2189
2190/* Reset EMAC
2191 * Soft reset is set and polled until clear, or until a timeout occurs
2192 */
2193static int gbe_port_reset(struct gbe_slave *slave)
2194{
2195	u32 i, v;
2196
2197	/* Set the soft reset bit */
2198	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2199
2200	/* Wait for the bit to clear */
2201	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2202		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2203		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2204			return 0;
2205	}
2206
2207	/* Timeout on the reset */
2208	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2209}
2210
2211/* Configure EMAC */
2212static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2213			    int max_rx_len)
2214{
2215	void __iomem *rx_maxlen_reg;
2216	u32 xgmii_mode;
2217
2218	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2219		max_rx_len = NETCP_MAX_FRAME_SIZE;
2220
2221	/* Enable correct MII mode at SS level */
2222	if (IS_SS_ID_XGBE(gbe_dev) &&
2223	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2224		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2225		xgmii_mode |= (1 << slave->slave_num);
2226		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2227	}
2228
2229	if (IS_SS_ID_MU(gbe_dev))
2230		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2231	else
2232		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2233
2234	writel(max_rx_len, rx_maxlen_reg);
2235	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2236}
2237
2238static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2239			      struct gbe_slave *slave, bool set)
2240{
2241	if (SLAVE_LINK_IS_XGMII(slave))
2242		return;
2243
2244	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2245			    slave->slave_num, set);
2246}
2247
2248static void gbe_slave_stop(struct gbe_intf *intf)
2249{
2250	struct gbe_priv *gbe_dev = intf->gbe_dev;
2251	struct gbe_slave *slave = intf->slave;
2252
2253	if (!IS_SS_ID_2U(gbe_dev))
2254		gbe_sgmii_rtreset(gbe_dev, slave, true);
2255	gbe_port_reset(slave);
2256	/* Disable forwarding */
2257	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2258			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2259	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2260			   1 << slave->port_num, 0, 0);
2261
2262	if (!slave->phy)
2263		return;
2264
2265	phy_stop(slave->phy);
2266	phy_disconnect(slave->phy);
2267	slave->phy = NULL;
2268}
2269
2270static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2271{
2272	if (SLAVE_LINK_IS_XGMII(slave))
2273		return;
2274
2275	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2276	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2277			   slave->link_interface);
2278}
2279
2280static int gbe_slave_open(struct gbe_intf *gbe_intf)
2281{
2282	struct gbe_priv *priv = gbe_intf->gbe_dev;
2283	struct gbe_slave *slave = gbe_intf->slave;
2284	phy_interface_t phy_mode;
2285	bool has_phy = false;
2286	int err;
2287
2288	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2289
2290	if (!IS_SS_ID_2U(priv))
2291		gbe_sgmii_config(priv, slave);
2292	gbe_port_reset(slave);
2293	if (!IS_SS_ID_2U(priv))
2294		gbe_sgmii_rtreset(priv, slave, false);
2295	gbe_port_config(priv, slave, priv->rx_packet_max);
2296	gbe_set_slave_mac(slave, gbe_intf);
2297	/* For NU & 2U switch, map the vlan priorities to zero
2298	 * as we only configure to use priority 0
2299	 */
2300	if (IS_SS_ID_MU(priv))
2301		writel(HOST_TX_PRI_MAP_DEFAULT,
2302		       GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2303
2304	/* enable forwarding */
2305	cpsw_ale_control_set(priv->ale, slave->port_num,
2306			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2307	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2308			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2309
2310	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2311		has_phy = true;
2312		phy_mode = PHY_INTERFACE_MODE_SGMII;
2313		slave->phy_port_t = PORT_MII;
2314	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2315		has_phy = true;
2316		err = of_get_phy_mode(slave->node, &phy_mode);
2317		/* if phy-mode is not present, default to
2318		 * PHY_INTERFACE_MODE_RGMII
2319		 */
2320		if (err)
2321			phy_mode = PHY_INTERFACE_MODE_RGMII;
2322
2323		if (!phy_interface_mode_is_rgmii(phy_mode)) {
2324			dev_err(priv->dev,
2325				"Unsupported phy mode %d\n", phy_mode);
2326			return -EINVAL;
2327		}
2328		slave->phy_port_t = PORT_MII;
2329	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2330		has_phy = true;
2331		phy_mode = PHY_INTERFACE_MODE_NA;
2332		slave->phy_port_t = PORT_FIBRE;
2333	}
2334
2335	if (has_phy) {
2336		if (IS_SS_ID_XGBE(priv))
2337			hndlr = xgbe_adjust_link;
2338
2339		slave->phy = of_phy_connect(gbe_intf->ndev,
2340					    slave->phy_node,
2341					    hndlr, 0,
2342					    phy_mode);
2343		if (!slave->phy) {
2344			dev_err(priv->dev, "phy not found on slave %d\n",
2345				slave->slave_num);
2346			return -ENODEV;
2347		}
2348		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2349			phydev_name(slave->phy));
2350		phy_start(slave->phy);
 
2351	}
2352	return 0;
2353}
2354
2355static void gbe_init_host_port(struct gbe_priv *priv)
2356{
2357	int bypass_en = 1;
2358
2359	/* Host Tx Pri */
2360	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2361		writel(HOST_TX_PRI_MAP_DEFAULT,
2362		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2363
2364	/* Max length register */
2365	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2366						  rx_maxlen));
2367
2368	cpsw_ale_start(priv->ale);
2369
2370	if (priv->enable_ale)
2371		bypass_en = 0;
2372
2373	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2374
2375	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2376
2377	cpsw_ale_control_set(priv->ale, priv->host_port,
2378			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2379
2380	cpsw_ale_control_set(priv->ale, 0,
2381			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2382			     GBE_PORT_MASK(priv->ale_ports));
2383
2384	cpsw_ale_control_set(priv->ale, 0,
2385			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2386			     GBE_PORT_MASK(priv->ale_ports - 1));
2387
2388	cpsw_ale_control_set(priv->ale, 0,
2389			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2390			     GBE_PORT_MASK(priv->ale_ports));
2391
2392	cpsw_ale_control_set(priv->ale, 0,
2393			     ALE_PORT_UNTAGGED_EGRESS,
2394			     GBE_PORT_MASK(priv->ale_ports));
2395}
2396
2397static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2398{
2399	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2400	u16 vlan_id;
2401
2402	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2403			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2404			   ALE_MCAST_FWD_2);
2405	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2406		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2407				   GBE_PORT_MASK(gbe_dev->ale_ports),
2408				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2409	}
2410}
2411
2412static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2413{
2414	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2415	u16 vlan_id;
2416
2417	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2418
2419	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2420		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2421				   ALE_VLAN, vlan_id);
2422}
2423
2424static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2425{
2426	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2427	u16 vlan_id;
2428
2429	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2430
2431	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2432		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2433	}
2434}
2435
2436static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2437{
2438	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2439	u16 vlan_id;
2440
2441	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2442
2443	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2444		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2445				   ALE_VLAN, vlan_id);
2446	}
2447}
2448
2449static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2450{
2451	struct gbe_intf *gbe_intf = intf_priv;
2452	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2453
2454	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2455		naddr->addr, naddr->type);
2456
2457	switch (naddr->type) {
2458	case ADDR_MCAST:
2459	case ADDR_BCAST:
2460		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2461		break;
2462	case ADDR_UCAST:
2463	case ADDR_DEV:
2464		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2465		break;
2466	case ADDR_ANY:
2467		/* nothing to do for promiscuous */
2468	default:
2469		break;
2470	}
2471
2472	return 0;
2473}
2474
2475static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2476{
2477	struct gbe_intf *gbe_intf = intf_priv;
2478	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2479
2480	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2481		naddr->addr, naddr->type);
2482
2483	switch (naddr->type) {
2484	case ADDR_MCAST:
2485	case ADDR_BCAST:
2486		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2487		break;
2488	case ADDR_UCAST:
2489	case ADDR_DEV:
2490		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2491		break;
2492	case ADDR_ANY:
2493		/* nothing to do for promiscuous */
2494	default:
2495		break;
2496	}
2497
2498	return 0;
2499}
2500
2501static int gbe_add_vid(void *intf_priv, int vid)
2502{
2503	struct gbe_intf *gbe_intf = intf_priv;
2504	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2505
2506	set_bit(vid, gbe_intf->active_vlans);
2507
2508	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2509			  GBE_PORT_MASK(gbe_dev->ale_ports),
2510			  GBE_MASK_NO_PORTS,
2511			  GBE_PORT_MASK(gbe_dev->ale_ports),
2512			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2513
2514	return 0;
2515}
2516
2517static int gbe_del_vid(void *intf_priv, int vid)
2518{
2519	struct gbe_intf *gbe_intf = intf_priv;
2520	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2521
2522	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2523	clear_bit(vid, gbe_intf->active_vlans);
2524	return 0;
2525}
2526
2527#if IS_ENABLED(CONFIG_TI_CPTS)
 
 
2528
2529static void gbe_txtstamp(void *context, struct sk_buff *skb)
2530{
2531	struct gbe_intf *gbe_intf = context;
2532	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2533
2534	cpts_tx_timestamp(gbe_dev->cpts, skb);
2535}
2536
2537static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2538			      const struct netcp_packet *p_info)
2539{
2540	struct sk_buff *skb = p_info->skb;
 
2541
2542	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2543}
2544
2545static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2546				 struct netcp_packet *p_info)
2547{
2548	struct phy_device *phydev = p_info->skb->dev->phydev;
2549	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2550
2551	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2552	    !gbe_dev->tx_ts_enabled)
2553		return 0;
2554
2555	/* If phy has the txtstamp api, assume it will do it.
2556	 * We mark it here because skb_tx_timestamp() is called
2557	 * after all the txhooks are called.
2558	 */
2559	if (phy_has_txtstamp(phydev)) {
2560		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2561		return 0;
2562	}
2563
2564	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2565		p_info->txtstamp = gbe_txtstamp;
2566		p_info->ts_context = (void *)gbe_intf;
2567		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2568	}
2569
2570	return 0;
2571}
2572
2573static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2574{
2575	struct phy_device *phydev = p_info->skb->dev->phydev;
2576	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2577
2578	if (p_info->rxtstamp_complete)
2579		return 0;
2580
2581	if (phy_has_rxtstamp(phydev)) {
2582		p_info->rxtstamp_complete = true;
2583		return 0;
2584	}
2585
2586	if (gbe_dev->rx_ts_enabled)
2587		cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2588
2589	p_info->rxtstamp_complete = true;
2590
2591	return 0;
2592}
2593
2594static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2595{
2596	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2597	struct cpts *cpts = gbe_dev->cpts;
2598	struct hwtstamp_config cfg;
2599
2600	if (!cpts)
2601		return -EOPNOTSUPP;
2602
2603	cfg.flags = 0;
2604	cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2605	cfg.rx_filter = gbe_dev->rx_ts_enabled;
 
 
2606
2607	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2608}
2609
2610static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2611{
2612	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2613	struct gbe_slave *slave = gbe_intf->slave;
2614	u32 ts_en, seq_id, ctl;
2615
2616	if (!gbe_dev->rx_ts_enabled &&
2617	    !gbe_dev->tx_ts_enabled) {
2618		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2619		return;
2620	}
2621
2622	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2623	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2624	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2625		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2626		(slave->ts_ctl.uni ?  TS_UNI_EN :
2627			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2628
2629	if (gbe_dev->tx_ts_enabled)
2630		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2631
2632	if (gbe_dev->rx_ts_enabled)
2633		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2634
2635	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2636	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2637	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2638}
2639
2640static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2641{
2642	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2643	struct cpts *cpts = gbe_dev->cpts;
2644	struct hwtstamp_config cfg;
2645
2646	if (!cpts)
2647		return -EOPNOTSUPP;
2648
2649	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2650		return -EFAULT;
2651
 
 
 
 
2652	switch (cfg.tx_type) {
2653	case HWTSTAMP_TX_OFF:
2654		gbe_dev->tx_ts_enabled = 0;
2655		break;
2656	case HWTSTAMP_TX_ON:
2657		gbe_dev->tx_ts_enabled = 1;
2658		break;
2659	default:
2660		return -ERANGE;
2661	}
2662
2663	switch (cfg.rx_filter) {
2664	case HWTSTAMP_FILTER_NONE:
2665		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2666		break;
 
2667	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2668	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2669	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2670		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2671		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2672		break;
2673	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2674	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2675	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2676	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2677	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2678	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2679	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2680	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2681	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2682		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2683		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2684		break;
2685	default:
2686		return -ERANGE;
2687	}
2688
2689	gbe_hwtstamp(gbe_intf);
2690
2691	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2692}
2693
2694static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2695{
2696	if (!gbe_dev->cpts)
2697		return;
2698
2699	if (gbe_dev->cpts_registered > 0)
2700		goto done;
2701
2702	if (cpts_register(gbe_dev->cpts)) {
2703		dev_err(gbe_dev->dev, "error registering cpts device\n");
2704		return;
2705	}
2706
2707done:
2708	++gbe_dev->cpts_registered;
2709}
2710
2711static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2712{
2713	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2714		return;
2715
2716	if (--gbe_dev->cpts_registered)
2717		return;
2718
2719	cpts_unregister(gbe_dev->cpts);
2720}
2721#else
2722static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2723					struct netcp_packet *p_info)
2724{
2725	return 0;
2726}
2727
2728static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2729			       struct netcp_packet *p_info)
2730{
2731	return 0;
2732}
2733
2734static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2735			       struct ifreq *ifr, int cmd)
2736{
2737	return -EOPNOTSUPP;
2738}
2739
2740static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2741{
2742}
2743
2744static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2745{
2746}
2747
2748static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2749{
2750	return -EOPNOTSUPP;
2751}
2752
2753static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2754{
2755	return -EOPNOTSUPP;
2756}
2757#endif /* CONFIG_TI_CPTS */
2758
2759static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2760{
2761	struct gbe_intf *gbe_intf = intf_priv;
2762	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2763	struct cpsw_ale *ale = gbe_dev->ale;
2764	unsigned long timeout;
2765	int i, ret = -ETIMEDOUT;
2766
2767	/* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2768	 * slaves are port 1 and up
2769	 */
2770	for (i = 0; i <= gbe_dev->num_slaves; i++) {
2771		cpsw_ale_control_set(ale, i,
2772				     ALE_PORT_NOLEARN, !!promisc);
2773		cpsw_ale_control_set(ale, i,
2774				     ALE_PORT_NO_SA_UPDATE, !!promisc);
2775	}
2776
2777	if (!promisc) {
2778		/* Don't Flood All Unicast Packets to Host port */
2779		cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2780		dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2781		return 0;
2782	}
2783
2784	timeout = jiffies + HZ;
2785
2786	/* Clear All Untouched entries */
2787	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2788	do {
2789		cpu_relax();
2790		if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2791			ret = 0;
2792			break;
2793		}
2794
2795	} while (time_after(timeout, jiffies));
2796
2797	/* Make sure it is not a false timeout */
2798	if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2799		return ret;
2800
2801	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2802
2803	/* Clear all mcast from ALE */
2804	cpsw_ale_flush_multicast(ale,
2805				 GBE_PORT_MASK(gbe_dev->ale_ports),
2806				 -1);
2807
2808	/* Flood All Unicast Packets to Host port */
2809	cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2810	dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2811	return ret;
2812}
2813
2814static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2815{
2816	struct gbe_intf *gbe_intf = intf_priv;
2817	struct phy_device *phy = gbe_intf->slave->phy;
2818
2819	if (!phy_has_hwtstamp(phy)) {
2820		switch (cmd) {
2821		case SIOCGHWTSTAMP:
2822			return gbe_hwtstamp_get(gbe_intf, req);
2823		case SIOCSHWTSTAMP:
2824			return gbe_hwtstamp_set(gbe_intf, req);
2825		}
2826	}
2827
2828	if (phy)
2829		return phy_mii_ioctl(phy, req, cmd);
2830
2831	return -EOPNOTSUPP;
2832}
2833
2834static void netcp_ethss_timer(struct timer_list *t)
2835{
2836	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2837	struct gbe_intf *gbe_intf;
2838	struct gbe_slave *slave;
2839
2840	/* Check & update SGMII link state of interfaces */
2841	for_each_intf(gbe_intf, gbe_dev) {
2842		if (!gbe_intf->slave->open)
2843			continue;
2844		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2845					      gbe_intf->ndev);
2846	}
2847
2848	/* Check & update SGMII link state of secondary ports */
2849	for_each_sec_slave(slave, gbe_dev) {
2850		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2851	}
2852
2853	/* A timer runs as a BH, no need to block them */
2854	spin_lock(&gbe_dev->hw_stats_lock);
2855
2856	if (IS_SS_ID_VER_14(gbe_dev))
2857		gbe_update_stats_ver14(gbe_dev, NULL);
2858	else
2859		gbe_update_stats(gbe_dev, NULL);
2860
2861	spin_unlock(&gbe_dev->hw_stats_lock);
2862
2863	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2864	add_timer(&gbe_dev->timer);
2865}
2866
2867static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2868{
2869	struct gbe_intf *gbe_intf = data;
2870
2871	p_info->tx_pipe = &gbe_intf->tx_pipe;
2872
2873	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2874}
2875
2876static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2877{
2878	struct gbe_intf *gbe_intf = data;
2879
2880	return gbe_rxtstamp(gbe_intf, p_info);
2881}
2882
2883static int gbe_open(void *intf_priv, struct net_device *ndev)
2884{
2885	struct gbe_intf *gbe_intf = intf_priv;
2886	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2887	struct netcp_intf *netcp = netdev_priv(ndev);
2888	struct gbe_slave *slave = gbe_intf->slave;
2889	int port_num = slave->port_num;
2890	u32 reg, val;
2891	int ret;
2892
2893	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2894	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2895		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2896		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2897
2898	/* For 10G and on NetCP 1.5, use directed to port */
2899	if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2900		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2901
2902	if (gbe_dev->enable_ale)
2903		gbe_intf->tx_pipe.switch_to_port = 0;
2904	else
2905		gbe_intf->tx_pipe.switch_to_port = port_num;
2906
2907	dev_dbg(gbe_dev->dev,
2908		"opened TX channel %s: %p with to port %d, flags %d\n",
2909		gbe_intf->tx_pipe.dma_chan_name,
2910		gbe_intf->tx_pipe.dma_channel,
2911		gbe_intf->tx_pipe.switch_to_port,
2912		gbe_intf->tx_pipe.flags);
2913
2914	gbe_slave_stop(gbe_intf);
2915
2916	/* disable priority elevation and enable statistics on all ports */
2917	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2918
2919	/* Control register */
2920	val = GBE_CTL_P0_ENABLE;
2921	if (IS_SS_ID_MU(gbe_dev)) {
2922		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2923		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2924	}
2925	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2926
2927	/* All statistics enabled and STAT AB visible by default */
2928	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2929						    stat_port_en));
2930
2931	ret = gbe_slave_open(gbe_intf);
2932	if (ret)
2933		goto fail;
2934
2935	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2936	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2937
2938	slave->open = true;
2939	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2940
2941	gbe_register_cpts(gbe_dev);
2942
2943	return 0;
2944
2945fail:
2946	gbe_slave_stop(gbe_intf);
2947	return ret;
2948}
2949
2950static int gbe_close(void *intf_priv, struct net_device *ndev)
2951{
2952	struct gbe_intf *gbe_intf = intf_priv;
2953	struct netcp_intf *netcp = netdev_priv(ndev);
2954	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2955
2956	gbe_unregister_cpts(gbe_dev);
2957
2958	gbe_slave_stop(gbe_intf);
2959
2960	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2961	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2962
2963	gbe_intf->slave->open = false;
2964	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2965	return 0;
2966}
2967
2968#if IS_ENABLED(CONFIG_TI_CPTS)
2969static void init_slave_ts_ctl(struct gbe_slave *slave)
2970{
2971	slave->ts_ctl.uni = 1;
2972	slave->ts_ctl.dst_port_map =
2973		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2974	slave->ts_ctl.maddr_map =
2975		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2976}
2977
2978#else
2979static void init_slave_ts_ctl(struct gbe_slave *slave)
2980{
2981}
2982#endif /* CONFIG_TI_CPTS */
2983
2984static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2985		      struct device_node *node)
2986{
2987	int port_reg_num;
2988	u32 port_reg_ofs, emac_reg_ofs;
2989	u32 port_reg_blk_sz, emac_reg_blk_sz;
2990
2991	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2992		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2993		return -EINVAL;
2994	}
2995
2996	if (of_property_read_u32(node, "link-interface",
2997				 &slave->link_interface)) {
2998		dev_warn(gbe_dev->dev,
2999			 "missing link-interface value defaulting to 1G mac-phy link\n");
3000		slave->link_interface = SGMII_LINK_MAC_PHY;
3001	}
3002
3003	slave->node = node;
3004	slave->open = false;
3005	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3006	    (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3007	    (slave->link_interface == XGMII_LINK_MAC_PHY))
3008		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3009	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3010
3011	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3012		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3013	else
3014		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3015
3016	/* Emac regs memmap are contiguous but port regs are not */
3017	port_reg_num = slave->slave_num;
3018	if (IS_SS_ID_VER_14(gbe_dev)) {
3019		if (slave->slave_num > 1) {
3020			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3021			port_reg_num -= 2;
3022		} else {
3023			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3024		}
3025		emac_reg_ofs = GBE13_EMAC_OFFSET;
3026		port_reg_blk_sz = 0x30;
3027		emac_reg_blk_sz = 0x40;
3028	} else if (IS_SS_ID_MU(gbe_dev)) {
3029		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3030		emac_reg_ofs = GBENU_EMAC_OFFSET;
3031		port_reg_blk_sz = 0x1000;
3032		emac_reg_blk_sz = 0x1000;
3033	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3034		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3035		emac_reg_ofs = XGBE10_EMAC_OFFSET;
3036		port_reg_blk_sz = 0x30;
3037		emac_reg_blk_sz = 0x40;
3038	} else {
3039		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3040			gbe_dev->ss_version);
3041		return -EINVAL;
3042	}
3043
3044	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3045				(port_reg_blk_sz * port_reg_num);
3046	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3047				(emac_reg_blk_sz * slave->slave_num);
3048
3049	if (IS_SS_ID_VER_14(gbe_dev)) {
3050		/* Initialize  slave port register offsets */
3051		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3052		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3053		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3054		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3055		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3056		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3057		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3058		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3059		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3060
3061		/* Initialize EMAC register offsets */
3062		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3063		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3064		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3065
3066	} else if (IS_SS_ID_MU(gbe_dev)) {
3067		/* Initialize  slave port register offsets */
3068		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3069		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3070		GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3071		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3072		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3073		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3074		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3075		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3076		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3077		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3078		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3079
3080		/* Initialize EMAC register offsets */
3081		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3082		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3083
3084	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3085		/* Initialize  slave port register offsets */
3086		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3087		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3088		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3089		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3090		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3091		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3092		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3093		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3094		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3095
3096		/* Initialize EMAC register offsets */
3097		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3098		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3099		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3100	}
3101
3102	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3103
3104	init_slave_ts_ctl(slave);
3105	return 0;
3106}
3107
3108static void init_secondary_ports(struct gbe_priv *gbe_dev,
3109				 struct device_node *node)
3110{
3111	struct device *dev = gbe_dev->dev;
3112	phy_interface_t phy_mode;
3113	struct gbe_priv **priv;
3114	struct device_node *port;
3115	struct gbe_slave *slave;
3116	bool mac_phy_link = false;
3117
3118	for_each_child_of_node(node, port) {
3119		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3120		if (!slave) {
3121			dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3122				port);
 
3123			continue;
3124		}
3125
3126		if (init_slave(gbe_dev, slave, port)) {
3127			dev_err(dev,
3128				"Failed to initialize secondary port(%pOFn), skipping...\n",
3129				port);
3130			devm_kfree(dev, slave);
3131			continue;
3132		}
3133
3134		if (!IS_SS_ID_2U(gbe_dev))
3135			gbe_sgmii_config(gbe_dev, slave);
3136		gbe_port_reset(slave);
3137		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3138		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3139		gbe_dev->num_slaves++;
3140		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3141		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3142			mac_phy_link = true;
3143
3144		slave->open = true;
3145		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3146			of_node_put(port);
3147			break;
3148		}
3149	}
3150
3151	/* of_phy_connect() is needed only for MAC-PHY interface */
3152	if (!mac_phy_link)
3153		return;
3154
3155	/* Allocate dummy netdev device for attaching to phy device */
3156	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3157					NET_NAME_UNKNOWN, ether_setup);
3158	if (!gbe_dev->dummy_ndev) {
3159		dev_err(dev,
3160			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3161		return;
3162	}
3163	priv = netdev_priv(gbe_dev->dummy_ndev);
3164	*priv = gbe_dev;
3165
3166	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3167		phy_mode = PHY_INTERFACE_MODE_SGMII;
3168		slave->phy_port_t = PORT_MII;
3169	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3170		phy_mode = PHY_INTERFACE_MODE_RGMII;
3171		slave->phy_port_t = PORT_MII;
3172	} else {
3173		phy_mode = PHY_INTERFACE_MODE_NA;
3174		slave->phy_port_t = PORT_FIBRE;
3175	}
3176
3177	for_each_sec_slave(slave, gbe_dev) {
3178		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3179		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3180		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3181			continue;
3182		slave->phy =
3183			of_phy_connect(gbe_dev->dummy_ndev,
3184				       slave->phy_node,
3185				       gbe_adjust_link_sec_slaves,
3186				       0, phy_mode);
3187		if (!slave->phy) {
3188			dev_err(dev, "phy not found for slave %d\n",
3189				slave->slave_num);
 
3190		} else {
3191			dev_dbg(dev, "phy found: id is: 0x%s\n",
3192				phydev_name(slave->phy));
3193			phy_start(slave->phy);
 
3194		}
3195	}
3196}
3197
3198static void free_secondary_ports(struct gbe_priv *gbe_dev)
3199{
3200	struct gbe_slave *slave;
3201
3202	while (!list_empty(&gbe_dev->secondary_slaves)) {
3203		slave = first_sec_slave(gbe_dev);
3204
3205		if (slave->phy)
3206			phy_disconnect(slave->phy);
3207		list_del(&slave->slave_list);
3208	}
3209	if (gbe_dev->dummy_ndev)
3210		free_netdev(gbe_dev->dummy_ndev);
3211}
3212
3213static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3214				 struct device_node *node)
3215{
3216	struct resource res;
3217	void __iomem *regs;
3218	int ret, i;
3219
3220	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3221	if (ret) {
3222		dev_err(gbe_dev->dev,
3223			"Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3224			node, XGBE_SS_REG_INDEX);
3225		return ret;
3226	}
3227
3228	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3229	if (IS_ERR(regs)) {
3230		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3231		return PTR_ERR(regs);
3232	}
3233	gbe_dev->ss_regs = regs;
3234
3235	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3236	if (ret) {
3237		dev_err(gbe_dev->dev,
3238			"Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3239			node, XGBE_SM_REG_INDEX);
3240		return ret;
3241	}
3242
3243	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3244	if (IS_ERR(regs)) {
3245		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3246		return PTR_ERR(regs);
3247	}
3248	gbe_dev->switch_regs = regs;
3249
3250	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3251	if (ret) {
3252		dev_err(gbe_dev->dev,
3253			"Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3254			node, XGBE_SERDES_REG_INDEX);
3255		return ret;
3256	}
3257
3258	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3259	if (IS_ERR(regs)) {
3260		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3261		return PTR_ERR(regs);
3262	}
3263	gbe_dev->xgbe_serdes_regs = regs;
3264
3265	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3266	gbe_dev->et_stats = xgbe10_et_stats;
3267	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3268
3269	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3270					 gbe_dev->num_et_stats, sizeof(u64),
3271					 GFP_KERNEL);
3272	if (!gbe_dev->hw_stats) {
3273		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3274		return -ENOMEM;
3275	}
3276
3277	gbe_dev->hw_stats_prev =
3278		devm_kcalloc(gbe_dev->dev,
3279			     gbe_dev->num_et_stats, sizeof(u32),
3280			     GFP_KERNEL);
3281	if (!gbe_dev->hw_stats_prev) {
3282		dev_err(gbe_dev->dev,
3283			"hw_stats_prev memory allocation failed\n");
3284		return -ENOMEM;
3285	}
3286
3287	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3288	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3289					XGBE10_SGMII_MODULE_OFFSET;
3290	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3291
3292	for (i = 0; i < gbe_dev->max_num_ports; i++)
3293		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3294			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3295
3296	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3297	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3298	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3299	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
 
3300	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3301
3302	/* Subsystem registers */
3303	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3304	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3305
3306	/* Switch module registers */
3307	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3308	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3309	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3310	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3311	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3312
3313	/* Host port registers */
3314	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3315	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3316	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3317	return 0;
3318}
3319
3320static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3321				    struct device_node *node)
3322{
3323	struct resource res;
3324	void __iomem *regs;
3325	int ret;
3326
3327	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3328	if (ret) {
3329		dev_err(gbe_dev->dev,
3330			"Can't translate of node(%pOFn) of gbe ss address at %d\n",
3331			node, GBE_SS_REG_INDEX);
3332		return ret;
3333	}
3334
3335	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3336	if (IS_ERR(regs)) {
3337		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3338		return PTR_ERR(regs);
3339	}
3340	gbe_dev->ss_regs = regs;
3341	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3342	return 0;
3343}
3344
3345static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3346				struct device_node *node)
3347{
3348	struct resource res;
3349	void __iomem *regs;
3350	int i, ret;
3351
3352	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3353	if (ret) {
3354		dev_err(gbe_dev->dev,
3355			"Can't translate of gbe node(%pOFn) address at index %d\n",
3356			node, GBE_SGMII34_REG_INDEX);
3357		return ret;
3358	}
3359
3360	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3361	if (IS_ERR(regs)) {
3362		dev_err(gbe_dev->dev,
3363			"Failed to map gbe sgmii port34 register base\n");
3364		return PTR_ERR(regs);
3365	}
3366	gbe_dev->sgmii_port34_regs = regs;
3367
3368	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3369	if (ret) {
3370		dev_err(gbe_dev->dev,
3371			"Can't translate of gbe node(%pOFn) address at index %d\n",
3372			node, GBE_SM_REG_INDEX);
3373		return ret;
3374	}
3375
3376	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3377	if (IS_ERR(regs)) {
3378		dev_err(gbe_dev->dev,
3379			"Failed to map gbe switch module register base\n");
3380		return PTR_ERR(regs);
3381	}
3382	gbe_dev->switch_regs = regs;
3383
3384	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3385	gbe_dev->et_stats = gbe13_et_stats;
3386	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3387
3388	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3389					 gbe_dev->num_et_stats, sizeof(u64),
3390					 GFP_KERNEL);
3391	if (!gbe_dev->hw_stats) {
3392		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3393		return -ENOMEM;
3394	}
3395
3396	gbe_dev->hw_stats_prev =
3397		devm_kcalloc(gbe_dev->dev,
3398			     gbe_dev->num_et_stats, sizeof(u32),
3399			     GFP_KERNEL);
3400	if (!gbe_dev->hw_stats_prev) {
3401		dev_err(gbe_dev->dev,
3402			"hw_stats_prev memory allocation failed\n");
3403		return -ENOMEM;
3404	}
3405
3406	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3407	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3408
3409	/* K2HK has only 2 hw stats modules visible at a time, so
3410	 * module 0 & 2 points to one base and
3411	 * module 1 & 3 points to the other base
3412	 */
3413	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3414		gbe_dev->hw_stats_regs[i] =
3415			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3416			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3417	}
3418
3419	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3420	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3421	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3422	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
 
3423	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3424
3425	/* Subsystem registers */
3426	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3427
3428	/* Switch module registers */
3429	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3430	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3431	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3432	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3433	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3434	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3435
3436	/* Host port registers */
3437	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3438	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3439	return 0;
3440}
3441
3442static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3443				struct device_node *node)
3444{
3445	struct resource res;
3446	void __iomem *regs;
3447	int i, ret;
3448
3449	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3450	gbe_dev->et_stats = gbenu_et_stats;
3451
3452	if (IS_SS_ID_MU(gbe_dev))
3453		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3454			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3455	else
3456		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3457					GBENU_ET_STATS_PORT_SIZE;
3458
3459	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3460					 gbe_dev->num_et_stats, sizeof(u64),
3461					 GFP_KERNEL);
3462	if (!gbe_dev->hw_stats) {
3463		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3464		return -ENOMEM;
3465	}
3466
3467	gbe_dev->hw_stats_prev =
3468		devm_kcalloc(gbe_dev->dev,
3469			     gbe_dev->num_et_stats, sizeof(u32),
3470			     GFP_KERNEL);
3471	if (!gbe_dev->hw_stats_prev) {
3472		dev_err(gbe_dev->dev,
3473			"hw_stats_prev memory allocation failed\n");
3474		return -ENOMEM;
3475	}
3476
3477	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3478	if (ret) {
3479		dev_err(gbe_dev->dev,
3480			"Can't translate of gbenu node(%pOFn) addr at index %d\n",
3481			node, GBENU_SM_REG_INDEX);
3482		return ret;
3483	}
3484
3485	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3486	if (IS_ERR(regs)) {
3487		dev_err(gbe_dev->dev,
3488			"Failed to map gbenu switch module register base\n");
3489		return PTR_ERR(regs);
3490	}
3491	gbe_dev->switch_regs = regs;
3492
3493	if (!IS_SS_ID_2U(gbe_dev))
3494		gbe_dev->sgmii_port_regs =
3495		       gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3496
3497	/* Although sgmii modules are mem mapped to one contiguous
3498	 * region on GBENU devices, setting sgmii_port34_regs allows
3499	 * consistent code when accessing sgmii api
3500	 */
3501	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3502				     (2 * GBENU_SGMII_MODULE_SIZE);
3503
3504	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3505
3506	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3507		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3508			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3509
3510	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3511	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3512	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3513	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
 
3514	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3515
3516	/* Subsystem registers */
3517	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3518	/* ok to set for MU, but used by 2U only */
3519	GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3520
3521	/* Switch module registers */
3522	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3523	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3524	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3525	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3526
3527	/* Host port registers */
3528	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3529	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3530
3531	/* For NU only.  2U does not need tx_pri_map.
3532	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3533	 * while 2U has only 1 such thread
3534	 */
3535	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3536	return 0;
3537}
3538
3539static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3540		     struct device_node *node, void **inst_priv)
3541{
3542	struct device_node *interfaces, *interface, *cpts_node;
3543	struct device_node *secondary_ports;
3544	struct cpsw_ale_params ale_params;
3545	struct gbe_priv *gbe_dev;
3546	u32 slave_num;
3547	int i, ret = 0;
3548
3549	if (!node) {
3550		dev_err(dev, "device tree info unavailable\n");
3551		return -ENODEV;
3552	}
3553
3554	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3555	if (!gbe_dev)
3556		return -ENOMEM;
3557
3558	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3559	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3560		gbe_dev->max_num_slaves = 4;
3561	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3562		gbe_dev->max_num_slaves = 8;
3563	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3564		gbe_dev->max_num_slaves = 1;
3565		gbe_module.set_rx_mode = gbe_set_rx_mode;
3566	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3567		gbe_dev->max_num_slaves = 2;
3568	} else {
3569		dev_err(dev, "device tree node for unknown device\n");
3570		return -EINVAL;
3571	}
3572	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3573
3574	gbe_dev->dev = dev;
3575	gbe_dev->netcp_device = netcp_device;
3576	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3577
3578	/* init the hw stats lock */
3579	spin_lock_init(&gbe_dev->hw_stats_lock);
3580
3581	gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale");
3582	if (gbe_dev->enable_ale)
3583		dev_info(dev, "ALE enabled\n");
3584	else
 
3585		dev_dbg(dev, "ALE bypass enabled*\n");
 
3586
3587	ret = of_property_read_u32(node, "tx-queue",
3588				   &gbe_dev->tx_queue_id);
3589	if (ret < 0) {
3590		dev_err(dev, "missing tx_queue parameter\n");
3591		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3592	}
3593
3594	ret = of_property_read_string(node, "tx-channel",
3595				      &gbe_dev->dma_chan_name);
3596	if (ret < 0) {
3597		dev_err(dev, "missing \"tx-channel\" parameter\n");
3598		return -EINVAL;
3599	}
3600
3601	if (of_node_name_eq(node, "gbe")) {
3602		ret = get_gbe_resource_version(gbe_dev, node);
3603		if (ret)
3604			return ret;
3605
3606		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3607
3608		if (IS_SS_ID_VER_14(gbe_dev))
3609			ret = set_gbe_ethss14_priv(gbe_dev, node);
3610		else if (IS_SS_ID_MU(gbe_dev))
3611			ret = set_gbenu_ethss_priv(gbe_dev, node);
3612		else
3613			ret = -ENODEV;
3614
3615	} else if (of_node_name_eq(node, "xgbe")) {
3616		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3617		if (ret)
3618			return ret;
3619		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3620					     gbe_dev->ss_regs);
3621	} else {
3622		dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3623		ret = -ENODEV;
3624	}
3625
3626	if (ret)
3627		return ret;
3628
3629	interfaces = of_get_child_by_name(node, "interfaces");
3630	if (!interfaces)
3631		dev_err(dev, "could not find interfaces\n");
3632
3633	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3634				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3635	if (ret) {
3636		of_node_put(interfaces);
3637		return ret;
3638	}
3639
3640	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3641	if (ret) {
3642		of_node_put(interfaces);
3643		return ret;
3644	}
3645
3646	/* Create network interfaces */
3647	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3648	for_each_child_of_node(interfaces, interface) {
3649		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3650		if (ret) {
3651			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3652				interface);
3653			continue;
3654		}
3655		gbe_dev->num_slaves++;
3656		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3657			of_node_put(interface);
3658			break;
3659		}
3660	}
3661	of_node_put(interfaces);
3662
3663	if (!gbe_dev->num_slaves)
3664		dev_warn(dev, "No network interface configured\n");
3665
3666	/* Initialize Secondary slave ports */
3667	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3668	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3669	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3670		init_secondary_ports(gbe_dev, secondary_ports);
3671	of_node_put(secondary_ports);
3672
3673	if (!gbe_dev->num_slaves) {
3674		dev_err(dev,
3675			"No network interface or secondary ports configured\n");
3676		ret = -ENODEV;
3677		goto free_sec_ports;
3678	}
3679
3680	memset(&ale_params, 0, sizeof(ale_params));
3681	ale_params.dev		= gbe_dev->dev;
3682	ale_params.ale_regs	= gbe_dev->ale_reg;
3683	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
 
3684	ale_params.ale_ports	= gbe_dev->ale_ports;
3685	ale_params.dev_id	= "cpsw";
3686	if (IS_SS_ID_NU(gbe_dev))
3687		ale_params.dev_id = "66ak2el";
3688	else if (IS_SS_ID_2U(gbe_dev))
3689		ale_params.dev_id = "66ak2g";
3690	else if (IS_SS_ID_XGBE(gbe_dev))
3691		ale_params.dev_id = "66ak2h-xgbe";
3692
3693	gbe_dev->ale = cpsw_ale_create(&ale_params);
3694	if (IS_ERR(gbe_dev->ale)) {
3695		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3696		ret = PTR_ERR(gbe_dev->ale);
3697		goto free_sec_ports;
3698	} else {
3699		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3700	}
3701
3702	cpts_node = of_get_child_by_name(node, "cpts");
3703	if (!cpts_node)
3704		cpts_node = of_node_get(node);
3705
3706	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
3707				    cpts_node, 0);
3708	of_node_put(cpts_node);
3709	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3710		ret = PTR_ERR(gbe_dev->cpts);
3711		goto free_sec_ports;
3712	}
3713
3714	/* initialize host port */
3715	gbe_init_host_port(gbe_dev);
3716
3717	spin_lock_bh(&gbe_dev->hw_stats_lock);
3718	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3719		if (IS_SS_ID_VER_14(gbe_dev))
3720			gbe_reset_mod_stats_ver14(gbe_dev, i);
3721		else
3722			gbe_reset_mod_stats(gbe_dev, i);
3723	}
3724	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3725
3726	timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
 
 
3727	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3728	add_timer(&gbe_dev->timer);
3729	*inst_priv = gbe_dev;
3730	return 0;
3731
3732free_sec_ports:
3733	free_secondary_ports(gbe_dev);
3734	return ret;
3735}
3736
3737static int gbe_attach(void *inst_priv, struct net_device *ndev,
3738		      struct device_node *node, void **intf_priv)
3739{
3740	struct gbe_priv *gbe_dev = inst_priv;
3741	struct gbe_intf *gbe_intf;
3742	int ret;
3743
3744	if (!node) {
3745		dev_err(gbe_dev->dev, "interface node not available\n");
3746		return -ENODEV;
3747	}
3748
3749	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3750	if (!gbe_intf)
3751		return -ENOMEM;
3752
3753	gbe_intf->ndev = ndev;
3754	gbe_intf->dev = gbe_dev->dev;
3755	gbe_intf->gbe_dev = gbe_dev;
3756
3757	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3758					sizeof(*gbe_intf->slave),
3759					GFP_KERNEL);
3760	if (!gbe_intf->slave) {
3761		ret = -ENOMEM;
3762		goto fail;
3763	}
3764
3765	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3766		ret = -ENODEV;
3767		goto fail;
3768	}
3769
3770	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3771	ndev->ethtool_ops = &keystone_ethtool_ops;
3772	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3773	*intf_priv = gbe_intf;
3774	return 0;
3775
3776fail:
3777	if (gbe_intf->slave)
3778		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3779	if (gbe_intf)
3780		devm_kfree(gbe_dev->dev, gbe_intf);
3781	return ret;
3782}
3783
3784static int gbe_release(void *intf_priv)
3785{
3786	struct gbe_intf *gbe_intf = intf_priv;
3787
3788	gbe_intf->ndev->ethtool_ops = NULL;
3789	list_del(&gbe_intf->gbe_intf_list);
3790	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3791	devm_kfree(gbe_intf->dev, gbe_intf);
3792	return 0;
3793}
3794
3795static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3796{
3797	struct gbe_priv *gbe_dev = inst_priv;
3798
3799	del_timer_sync(&gbe_dev->timer);
3800	cpts_release(gbe_dev->cpts);
3801	cpsw_ale_stop(gbe_dev->ale);
 
3802	netcp_txpipe_close(&gbe_dev->tx_pipe);
3803	free_secondary_ports(gbe_dev);
3804
3805	if (!list_empty(&gbe_dev->gbe_intf_head))
3806		dev_alert(gbe_dev->dev,
3807			  "unreleased ethss interfaces present\n");
3808
3809	return 0;
3810}
3811
3812static struct netcp_module gbe_module = {
3813	.name		= GBE_MODULE_NAME,
3814	.owner		= THIS_MODULE,
3815	.primary	= true,
3816	.probe		= gbe_probe,
3817	.open		= gbe_open,
3818	.close		= gbe_close,
3819	.remove		= gbe_remove,
3820	.attach		= gbe_attach,
3821	.release	= gbe_release,
3822	.add_addr	= gbe_add_addr,
3823	.del_addr	= gbe_del_addr,
3824	.add_vid	= gbe_add_vid,
3825	.del_vid	= gbe_del_vid,
3826	.ioctl		= gbe_ioctl,
3827};
3828
3829static struct netcp_module xgbe_module = {
3830	.name		= XGBE_MODULE_NAME,
3831	.owner		= THIS_MODULE,
3832	.primary	= true,
3833	.probe		= gbe_probe,
3834	.open		= gbe_open,
3835	.close		= gbe_close,
3836	.remove		= gbe_remove,
3837	.attach		= gbe_attach,
3838	.release	= gbe_release,
3839	.add_addr	= gbe_add_addr,
3840	.del_addr	= gbe_del_addr,
3841	.add_vid	= gbe_add_vid,
3842	.del_vid	= gbe_del_vid,
3843	.ioctl		= gbe_ioctl,
3844};
3845
3846static int __init keystone_gbe_init(void)
3847{
3848	int ret;
3849
3850	ret = netcp_register_module(&gbe_module);
3851	if (ret)
3852		return ret;
3853
3854	ret = netcp_register_module(&xgbe_module);
3855	if (ret)
3856		return ret;
3857
3858	return 0;
3859}
3860module_init(keystone_gbe_init);
3861
3862static void __exit keystone_gbe_exit(void)
3863{
3864	netcp_unregister_module(&gbe_module);
3865	netcp_unregister_module(&xgbe_module);
3866}
3867module_exit(keystone_gbe_exit);
3868
3869MODULE_LICENSE("GPL v2");
3870MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3871MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
v4.10.11
 
   1/*
   2 * Keystone GBE and XGBE subsystem code
   3 *
   4 * Copyright (C) 2014 Texas Instruments Incorporated
   5 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   6 *		Sandeep Paulraj <s-paulraj@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *		Wingman Kwok <w-kwok2@ti.com>
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License as
  13 * published by the Free Software Foundation version 2.
  14 *
  15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16 * kind, whether express or implied; without even the implied warranty
  17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 */
  20
  21#include <linux/io.h>
  22#include <linux/module.h>
  23#include <linux/of_mdio.h>
 
  24#include <linux/of_address.h>
  25#include <linux/if_vlan.h>
  26#include <linux/ptp_classify.h>
  27#include <linux/net_tstamp.h>
  28#include <linux/ethtool.h>
  29
 
  30#include "cpsw_ale.h"
  31#include "netcp.h"
  32#include "cpts.h"
  33
  34#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
  35#define NETCP_DRIVER_VERSION		"v1.0"
  36
  37#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
  38#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
  39#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
  40#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
  41
  42/* 1G Ethernet SS defines */
  43#define GBE_MODULE_NAME			"netcp-gbe"
  44#define GBE_SS_VERSION_14		0x4ed21104
  45
  46#define GBE_SS_REG_INDEX		0
  47#define GBE_SGMII34_REG_INDEX		1
  48#define GBE_SM_REG_INDEX		2
  49/* offset relative to base of GBE_SS_REG_INDEX */
  50#define GBE13_SGMII_MODULE_OFFSET	0x100
  51/* offset relative to base of GBE_SM_REG_INDEX */
  52#define GBE13_HOST_PORT_OFFSET		0x34
  53#define GBE13_SLAVE_PORT_OFFSET		0x60
  54#define GBE13_EMAC_OFFSET		0x100
  55#define GBE13_SLAVE_PORT2_OFFSET	0x200
  56#define GBE13_HW_STATS_OFFSET		0x300
  57#define GBE13_CPTS_OFFSET		0x500
  58#define GBE13_ALE_OFFSET		0x600
  59#define GBE13_HOST_PORT_NUM		0
  60#define GBE13_NUM_ALE_ENTRIES		1024
  61
  62/* 1G Ethernet NU SS defines */
  63#define GBENU_MODULE_NAME		"netcp-gbenu"
  64#define GBE_SS_ID_NU			0x4ee6
  65#define GBE_SS_ID_2U			0x4ee8
  66
  67#define IS_SS_ID_MU(d) \
  68	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  69	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  70
  71#define IS_SS_ID_NU(d) \
  72	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  73
 
 
 
 
 
  74#define GBENU_SS_REG_INDEX		0
  75#define GBENU_SM_REG_INDEX		1
  76#define GBENU_SGMII_MODULE_OFFSET	0x100
  77#define GBENU_HOST_PORT_OFFSET		0x1000
  78#define GBENU_SLAVE_PORT_OFFSET		0x2000
  79#define GBENU_EMAC_OFFSET		0x2330
  80#define GBENU_HW_STATS_OFFSET		0x1a000
  81#define GBENU_CPTS_OFFSET		0x1d000
  82#define GBENU_ALE_OFFSET		0x1e000
  83#define GBENU_HOST_PORT_NUM		0
  84#define GBENU_NUM_ALE_ENTRIES		1024
  85#define GBENU_SGMII_MODULE_SIZE		0x100
  86
  87/* 10G Ethernet SS defines */
  88#define XGBE_MODULE_NAME		"netcp-xgbe"
  89#define XGBE_SS_VERSION_10		0x4ee42100
  90
  91#define XGBE_SS_REG_INDEX		0
  92#define XGBE_SM_REG_INDEX		1
  93#define XGBE_SERDES_REG_INDEX		2
  94
  95/* offset relative to base of XGBE_SS_REG_INDEX */
  96#define XGBE10_SGMII_MODULE_OFFSET	0x100
  97#define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
  98/* offset relative to base of XGBE_SM_REG_INDEX */
  99#define XGBE10_HOST_PORT_OFFSET		0x34
 100#define XGBE10_SLAVE_PORT_OFFSET	0x64
 101#define XGBE10_EMAC_OFFSET		0x400
 102#define XGBE10_CPTS_OFFSET		0x600
 103#define XGBE10_ALE_OFFSET		0x700
 104#define XGBE10_HW_STATS_OFFSET		0x800
 105#define XGBE10_HOST_PORT_NUM		0
 106#define XGBE10_NUM_ALE_ENTRIES		1024
 107
 108#define	GBE_TIMER_INTERVAL			(HZ / 2)
 109
 110/* Soft reset register values */
 111#define SOFT_RESET_MASK				BIT(0)
 112#define SOFT_RESET				BIT(0)
 113#define DEVICE_EMACSL_RESET_POLL_COUNT		100
 114#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
 115
 116#define MACSL_RX_ENABLE_CSF			BIT(23)
 117#define MACSL_ENABLE_EXT_CTL			BIT(18)
 118#define MACSL_XGMII_ENABLE			BIT(13)
 119#define MACSL_XGIG_MODE				BIT(8)
 120#define MACSL_GIG_MODE				BIT(7)
 121#define MACSL_GMII_ENABLE			BIT(5)
 122#define MACSL_FULLDUPLEX			BIT(0)
 123
 124#define GBE_CTL_P0_ENABLE			BIT(2)
 
 125#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
 126#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
 127#define GBE_STATS_CD_SEL			BIT(28)
 128
 129#define GBE_PORT_MASK(x)			(BIT(x) - 1)
 130#define GBE_MASK_NO_PORTS			0
 131
 132#define GBE_DEF_1G_MAC_CONTROL					\
 133		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
 134		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 135
 136#define GBE_DEF_10G_MAC_CONTROL				\
 137		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
 138		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 139
 140#define GBE_STATSA_MODULE			0
 141#define GBE_STATSB_MODULE			1
 142#define GBE_STATSC_MODULE			2
 143#define GBE_STATSD_MODULE			3
 144
 145#define GBENU_STATS0_MODULE			0
 146#define GBENU_STATS1_MODULE			1
 147#define GBENU_STATS2_MODULE			2
 148#define GBENU_STATS3_MODULE			3
 149#define GBENU_STATS4_MODULE			4
 150#define GBENU_STATS5_MODULE			5
 151#define GBENU_STATS6_MODULE			6
 152#define GBENU_STATS7_MODULE			7
 153#define GBENU_STATS8_MODULE			8
 154
 155#define XGBE_STATS0_MODULE			0
 156#define XGBE_STATS1_MODULE			1
 157#define XGBE_STATS2_MODULE			2
 158
 159/* s: 0-based slave_port */
 160#define SGMII_BASE(d, s) \
 161	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
 162
 163#define GBE_TX_QUEUE				648
 164#define	GBE_TXHOOK_ORDER			0
 165#define	GBE_RXHOOK_ORDER			0
 166#define GBE_DEFAULT_ALE_AGEOUT			30
 167#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
 
 
 
 
 
 168#define NETCP_LINK_STATE_INVALID		-1
 169
 170#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 171		offsetof(struct gbe##_##rb, rn)
 172#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 173		offsetof(struct gbenu##_##rb, rn)
 174#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 175		offsetof(struct xgbe##_##rb, rn)
 176#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
 177
 178#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
 179
 180#if IS_ENABLED(CONFIG_TI_CPTS)
 181/* Px_TS_CTL register fields */
 182#define TS_RX_ANX_F_EN				BIT(0)
 183#define TS_RX_VLAN_LT1_EN			BIT(1)
 184#define TS_RX_VLAN_LT2_EN			BIT(2)
 185#define TS_RX_ANX_D_EN				BIT(3)
 186#define TS_TX_ANX_F_EN				BIT(4)
 187#define TS_TX_VLAN_LT1_EN			BIT(5)
 188#define TS_TX_VLAN_LT2_EN			BIT(6)
 189#define TS_TX_ANX_D_EN				BIT(7)
 190#define TS_LT2_EN				BIT(8)
 191#define TS_RX_ANX_E_EN				BIT(9)
 192#define TS_TX_ANX_E_EN				BIT(10)
 193#define TS_MSG_TYPE_EN_SHIFT			16
 194#define TS_MSG_TYPE_EN_MASK			0xffff
 195
 196/* Px_TS_SEQ_LTYPE register fields */
 197#define TS_SEQ_ID_OFS_SHIFT			16
 198#define TS_SEQ_ID_OFS_MASK			0x3f
 199
 200/* Px_TS_CTL_LTYPE2 register fields */
 201#define TS_107					BIT(16)
 202#define TS_129					BIT(17)
 203#define TS_130					BIT(18)
 204#define TS_131					BIT(19)
 205#define TS_132					BIT(20)
 206#define TS_319					BIT(21)
 207#define TS_320					BIT(22)
 208#define TS_TTL_NONZERO				BIT(23)
 209#define TS_UNI_EN				BIT(24)
 210#define TS_UNI_EN_SHIFT				24
 211
 212#define TS_TX_ANX_ALL_EN	 \
 213	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
 214
 215#define TS_RX_ANX_ALL_EN	 \
 216	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
 217
 218#define TS_CTL_DST_PORT				TS_319
 219#define TS_CTL_DST_PORT_SHIFT			21
 220
 221#define TS_CTL_MADDR_ALL	\
 222	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
 223
 224#define TS_CTL_MADDR_SHIFT			16
 225
 226/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
 227#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 228#endif /* CONFIG_TI_CPTS */
 229
 230struct xgbe_ss_regs {
 231	u32	id_ver;
 232	u32	synce_count;
 233	u32	synce_mux;
 234	u32	control;
 235};
 236
 237struct xgbe_switch_regs {
 238	u32	id_ver;
 239	u32	control;
 240	u32	emcontrol;
 241	u32	stat_port_en;
 242	u32	ptype;
 243	u32	soft_idle;
 244	u32	thru_rate;
 245	u32	gap_thresh;
 246	u32	tx_start_wds;
 247	u32	flow_control;
 248	u32	cppi_thresh;
 249};
 250
 251struct xgbe_port_regs {
 252	u32	blk_cnt;
 253	u32	port_vlan;
 254	u32	tx_pri_map;
 255	u32	sa_lo;
 256	u32	sa_hi;
 257	u32	ts_ctl;
 258	u32	ts_seq_ltype;
 259	u32	ts_vlan;
 260	u32	ts_ctl_ltype2;
 261	u32	ts_ctl2;
 262	u32	control;
 263};
 264
 265struct xgbe_host_port_regs {
 266	u32	blk_cnt;
 267	u32	port_vlan;
 268	u32	tx_pri_map;
 269	u32	src_id;
 270	u32	rx_pri_map;
 271	u32	rx_maxlen;
 272};
 273
 274struct xgbe_emac_regs {
 275	u32	id_ver;
 276	u32	mac_control;
 277	u32	mac_status;
 278	u32	soft_reset;
 279	u32	rx_maxlen;
 280	u32	__reserved_0;
 281	u32	rx_pause;
 282	u32	tx_pause;
 283	u32	em_control;
 284	u32	__reserved_1;
 285	u32	tx_gap;
 286	u32	rsvd[4];
 287};
 288
 289struct xgbe_host_hw_stats {
 290	u32	rx_good_frames;
 291	u32	rx_broadcast_frames;
 292	u32	rx_multicast_frames;
 293	u32	__rsvd_0[3];
 294	u32	rx_oversized_frames;
 295	u32	__rsvd_1;
 296	u32	rx_undersized_frames;
 297	u32	__rsvd_2;
 298	u32	overrun_type4;
 299	u32	overrun_type5;
 300	u32	rx_bytes;
 301	u32	tx_good_frames;
 302	u32	tx_broadcast_frames;
 303	u32	tx_multicast_frames;
 304	u32	__rsvd_3[9];
 305	u32	tx_bytes;
 306	u32	tx_64byte_frames;
 307	u32	tx_65_to_127byte_frames;
 308	u32	tx_128_to_255byte_frames;
 309	u32	tx_256_to_511byte_frames;
 310	u32	tx_512_to_1023byte_frames;
 311	u32	tx_1024byte_frames;
 312	u32	net_bytes;
 313	u32	rx_sof_overruns;
 314	u32	rx_mof_overruns;
 315	u32	rx_dma_overruns;
 316};
 317
 318struct xgbe_hw_stats {
 319	u32	rx_good_frames;
 320	u32	rx_broadcast_frames;
 321	u32	rx_multicast_frames;
 322	u32	rx_pause_frames;
 323	u32	rx_crc_errors;
 324	u32	rx_align_code_errors;
 325	u32	rx_oversized_frames;
 326	u32	rx_jabber_frames;
 327	u32	rx_undersized_frames;
 328	u32	rx_fragments;
 329	u32	overrun_type4;
 330	u32	overrun_type5;
 331	u32	rx_bytes;
 332	u32	tx_good_frames;
 333	u32	tx_broadcast_frames;
 334	u32	tx_multicast_frames;
 335	u32	tx_pause_frames;
 336	u32	tx_deferred_frames;
 337	u32	tx_collision_frames;
 338	u32	tx_single_coll_frames;
 339	u32	tx_mult_coll_frames;
 340	u32	tx_excessive_collisions;
 341	u32	tx_late_collisions;
 342	u32	tx_underrun;
 343	u32	tx_carrier_sense_errors;
 344	u32	tx_bytes;
 345	u32	tx_64byte_frames;
 346	u32	tx_65_to_127byte_frames;
 347	u32	tx_128_to_255byte_frames;
 348	u32	tx_256_to_511byte_frames;
 349	u32	tx_512_to_1023byte_frames;
 350	u32	tx_1024byte_frames;
 351	u32	net_bytes;
 352	u32	rx_sof_overruns;
 353	u32	rx_mof_overruns;
 354	u32	rx_dma_overruns;
 355};
 356
 357struct gbenu_ss_regs {
 358	u32	id_ver;
 359	u32	synce_count;		/* NU */
 360	u32	synce_mux;		/* NU */
 361	u32	control;		/* 2U */
 362	u32	__rsvd_0[2];		/* 2U */
 363	u32	rgmii_status;		/* 2U */
 364	u32	ss_status;		/* 2U */
 365};
 366
 367struct gbenu_switch_regs {
 368	u32	id_ver;
 369	u32	control;
 370	u32	__rsvd_0[2];
 371	u32	emcontrol;
 372	u32	stat_port_en;
 373	u32	ptype;			/* NU */
 374	u32	soft_idle;
 375	u32	thru_rate;		/* NU */
 376	u32	gap_thresh;		/* NU */
 377	u32	tx_start_wds;		/* NU */
 378	u32	eee_prescale;		/* 2U */
 379	u32	tx_g_oflow_thresh_set;	/* NU */
 380	u32	tx_g_oflow_thresh_clr;	/* NU */
 381	u32	tx_g_buf_thresh_set_l;	/* NU */
 382	u32	tx_g_buf_thresh_set_h;	/* NU */
 383	u32	tx_g_buf_thresh_clr_l;	/* NU */
 384	u32	tx_g_buf_thresh_clr_h;	/* NU */
 385};
 386
 387struct gbenu_port_regs {
 388	u32	__rsvd_0;
 389	u32	control;
 390	u32	max_blks;		/* 2U */
 391	u32	mem_align1;
 392	u32	blk_cnt;
 393	u32	port_vlan;
 394	u32	tx_pri_map;		/* NU */
 395	u32	pri_ctl;		/* 2U */
 396	u32	rx_pri_map;
 397	u32	rx_maxlen;
 398	u32	tx_blks_pri;		/* NU */
 399	u32	__rsvd_1;
 400	u32	idle2lpi;		/* 2U */
 401	u32	lpi2idle;		/* 2U */
 402	u32	eee_status;		/* 2U */
 403	u32	__rsvd_2;
 404	u32	__rsvd_3[176];		/* NU: more to add */
 405	u32	__rsvd_4[2];
 406	u32	sa_lo;
 407	u32	sa_hi;
 408	u32	ts_ctl;
 409	u32	ts_seq_ltype;
 410	u32	ts_vlan;
 411	u32	ts_ctl_ltype2;
 412	u32	ts_ctl2;
 413};
 414
 415struct gbenu_host_port_regs {
 416	u32	__rsvd_0;
 417	u32	control;
 418	u32	flow_id_offset;		/* 2U */
 419	u32	__rsvd_1;
 420	u32	blk_cnt;
 421	u32	port_vlan;
 422	u32	tx_pri_map;		/* NU */
 423	u32	pri_ctl;
 424	u32	rx_pri_map;
 425	u32	rx_maxlen;
 426	u32	tx_blks_pri;		/* NU */
 427	u32	__rsvd_2;
 428	u32	idle2lpi;		/* 2U */
 429	u32	lpi2wake;		/* 2U */
 430	u32	eee_status;		/* 2U */
 431	u32	__rsvd_3;
 432	u32	__rsvd_4[184];		/* NU */
 433	u32	host_blks_pri;		/* NU */
 434};
 435
 436struct gbenu_emac_regs {
 437	u32	mac_control;
 438	u32	mac_status;
 439	u32	soft_reset;
 440	u32	boff_test;
 441	u32	rx_pause;
 442	u32	__rsvd_0[11];		/* NU */
 443	u32	tx_pause;
 444	u32	__rsvd_1[11];		/* NU */
 445	u32	em_control;
 446	u32	tx_gap;
 447};
 448
 449/* Some hw stat regs are applicable to slave port only.
 450 * This is handled by gbenu_et_stats struct.  Also some
 451 * are for SS version NU and some are for 2U.
 452 */
 453struct gbenu_hw_stats {
 454	u32	rx_good_frames;
 455	u32	rx_broadcast_frames;
 456	u32	rx_multicast_frames;
 457	u32	rx_pause_frames;		/* slave */
 458	u32	rx_crc_errors;
 459	u32	rx_align_code_errors;		/* slave */
 460	u32	rx_oversized_frames;
 461	u32	rx_jabber_frames;		/* slave */
 462	u32	rx_undersized_frames;
 463	u32	rx_fragments;			/* slave */
 464	u32	ale_drop;
 465	u32	ale_overrun_drop;
 466	u32	rx_bytes;
 467	u32	tx_good_frames;
 468	u32	tx_broadcast_frames;
 469	u32	tx_multicast_frames;
 470	u32	tx_pause_frames;		/* slave */
 471	u32	tx_deferred_frames;		/* slave */
 472	u32	tx_collision_frames;		/* slave */
 473	u32	tx_single_coll_frames;		/* slave */
 474	u32	tx_mult_coll_frames;		/* slave */
 475	u32	tx_excessive_collisions;	/* slave */
 476	u32	tx_late_collisions;		/* slave */
 477	u32	rx_ipg_error;			/* slave 10G only */
 478	u32	tx_carrier_sense_errors;	/* slave */
 479	u32	tx_bytes;
 480	u32	tx_64B_frames;
 481	u32	tx_65_to_127B_frames;
 482	u32	tx_128_to_255B_frames;
 483	u32	tx_256_to_511B_frames;
 484	u32	tx_512_to_1023B_frames;
 485	u32	tx_1024B_frames;
 486	u32	net_bytes;
 487	u32	rx_bottom_fifo_drop;
 488	u32	rx_port_mask_drop;
 489	u32	rx_top_fifo_drop;
 490	u32	ale_rate_limit_drop;
 491	u32	ale_vid_ingress_drop;
 492	u32	ale_da_eq_sa_drop;
 493	u32	__rsvd_0[3];
 494	u32	ale_unknown_ucast;
 495	u32	ale_unknown_ucast_bytes;
 496	u32	ale_unknown_mcast;
 497	u32	ale_unknown_mcast_bytes;
 498	u32	ale_unknown_bcast;
 499	u32	ale_unknown_bcast_bytes;
 500	u32	ale_pol_match;
 501	u32	ale_pol_match_red;		/* NU */
 502	u32	ale_pol_match_yellow;		/* NU */
 503	u32	__rsvd_1[44];
 504	u32	tx_mem_protect_err;
 505	/* following NU only */
 506	u32	tx_pri0;
 507	u32	tx_pri1;
 508	u32	tx_pri2;
 509	u32	tx_pri3;
 510	u32	tx_pri4;
 511	u32	tx_pri5;
 512	u32	tx_pri6;
 513	u32	tx_pri7;
 514	u32	tx_pri0_bcnt;
 515	u32	tx_pri1_bcnt;
 516	u32	tx_pri2_bcnt;
 517	u32	tx_pri3_bcnt;
 518	u32	tx_pri4_bcnt;
 519	u32	tx_pri5_bcnt;
 520	u32	tx_pri6_bcnt;
 521	u32	tx_pri7_bcnt;
 522	u32	tx_pri0_drop;
 523	u32	tx_pri1_drop;
 524	u32	tx_pri2_drop;
 525	u32	tx_pri3_drop;
 526	u32	tx_pri4_drop;
 527	u32	tx_pri5_drop;
 528	u32	tx_pri6_drop;
 529	u32	tx_pri7_drop;
 530	u32	tx_pri0_drop_bcnt;
 531	u32	tx_pri1_drop_bcnt;
 532	u32	tx_pri2_drop_bcnt;
 533	u32	tx_pri3_drop_bcnt;
 534	u32	tx_pri4_drop_bcnt;
 535	u32	tx_pri5_drop_bcnt;
 536	u32	tx_pri6_drop_bcnt;
 537	u32	tx_pri7_drop_bcnt;
 538};
 539
 540#define GBENU_HW_STATS_REG_MAP_SZ	0x200
 541
 542struct gbe_ss_regs {
 543	u32	id_ver;
 544	u32	synce_count;
 545	u32	synce_mux;
 546};
 547
 548struct gbe_ss_regs_ofs {
 549	u16	id_ver;
 550	u16	control;
 
 551};
 552
 553struct gbe_switch_regs {
 554	u32	id_ver;
 555	u32	control;
 556	u32	soft_reset;
 557	u32	stat_port_en;
 558	u32	ptype;
 559	u32	soft_idle;
 560	u32	thru_rate;
 561	u32	gap_thresh;
 562	u32	tx_start_wds;
 563	u32	flow_control;
 564};
 565
 566struct gbe_switch_regs_ofs {
 567	u16	id_ver;
 568	u16	control;
 569	u16	soft_reset;
 570	u16	emcontrol;
 571	u16	stat_port_en;
 572	u16	ptype;
 573	u16	flow_control;
 574};
 575
 576struct gbe_port_regs {
 577	u32	max_blks;
 578	u32	blk_cnt;
 579	u32	port_vlan;
 580	u32	tx_pri_map;
 581	u32	sa_lo;
 582	u32	sa_hi;
 583	u32	ts_ctl;
 584	u32	ts_seq_ltype;
 585	u32	ts_vlan;
 586	u32	ts_ctl_ltype2;
 587	u32	ts_ctl2;
 588};
 589
 590struct gbe_port_regs_ofs {
 591	u16	port_vlan;
 592	u16	tx_pri_map;
 
 593	u16	sa_lo;
 594	u16	sa_hi;
 595	u16	ts_ctl;
 596	u16	ts_seq_ltype;
 597	u16	ts_vlan;
 598	u16	ts_ctl_ltype2;
 599	u16	ts_ctl2;
 600	u16	rx_maxlen;	/* 2U, NU */
 601};
 602
 603struct gbe_host_port_regs {
 604	u32	src_id;
 605	u32	port_vlan;
 606	u32	rx_pri_map;
 607	u32	rx_maxlen;
 608};
 609
 610struct gbe_host_port_regs_ofs {
 611	u16	port_vlan;
 612	u16	tx_pri_map;
 613	u16	rx_maxlen;
 614};
 615
 616struct gbe_emac_regs {
 617	u32	id_ver;
 618	u32	mac_control;
 619	u32	mac_status;
 620	u32	soft_reset;
 621	u32	rx_maxlen;
 622	u32	__reserved_0;
 623	u32	rx_pause;
 624	u32	tx_pause;
 625	u32	__reserved_1;
 626	u32	rx_pri_map;
 627	u32	rsvd[6];
 628};
 629
 630struct gbe_emac_regs_ofs {
 631	u16	mac_control;
 632	u16	soft_reset;
 633	u16	rx_maxlen;
 634};
 635
 636struct gbe_hw_stats {
 637	u32	rx_good_frames;
 638	u32	rx_broadcast_frames;
 639	u32	rx_multicast_frames;
 640	u32	rx_pause_frames;
 641	u32	rx_crc_errors;
 642	u32	rx_align_code_errors;
 643	u32	rx_oversized_frames;
 644	u32	rx_jabber_frames;
 645	u32	rx_undersized_frames;
 646	u32	rx_fragments;
 647	u32	__pad_0[2];
 648	u32	rx_bytes;
 649	u32	tx_good_frames;
 650	u32	tx_broadcast_frames;
 651	u32	tx_multicast_frames;
 652	u32	tx_pause_frames;
 653	u32	tx_deferred_frames;
 654	u32	tx_collision_frames;
 655	u32	tx_single_coll_frames;
 656	u32	tx_mult_coll_frames;
 657	u32	tx_excessive_collisions;
 658	u32	tx_late_collisions;
 659	u32	tx_underrun;
 660	u32	tx_carrier_sense_errors;
 661	u32	tx_bytes;
 662	u32	tx_64byte_frames;
 663	u32	tx_65_to_127byte_frames;
 664	u32	tx_128_to_255byte_frames;
 665	u32	tx_256_to_511byte_frames;
 666	u32	tx_512_to_1023byte_frames;
 667	u32	tx_1024byte_frames;
 668	u32	net_bytes;
 669	u32	rx_sof_overruns;
 670	u32	rx_mof_overruns;
 671	u32	rx_dma_overruns;
 672};
 673
 674#define GBE_MAX_HW_STAT_MODS			9
 675#define GBE_HW_STATS_REG_MAP_SZ			0x100
 676
 677struct ts_ctl {
 678	int     uni;
 679	u8      dst_port_map;
 680	u8      maddr_map;
 681	u8      ts_mcast_type;
 682};
 683
 684struct gbe_slave {
 685	void __iomem			*port_regs;
 686	void __iomem			*emac_regs;
 687	struct gbe_port_regs_ofs	port_regs_ofs;
 688	struct gbe_emac_regs_ofs	emac_regs_ofs;
 689	int				slave_num; /* 0 based logical number */
 690	int				port_num;  /* actual port number */
 691	atomic_t			link_state;
 692	bool				open;
 693	struct phy_device		*phy;
 694	u32				link_interface;
 695	u32				mac_control;
 696	u8				phy_port_t;
 
 697	struct device_node		*phy_node;
 698	struct ts_ctl                   ts_ctl;
 699	struct list_head		slave_list;
 700};
 701
 702struct gbe_priv {
 703	struct device			*dev;
 704	struct netcp_device		*netcp_device;
 705	struct timer_list		timer;
 706	u32				num_slaves;
 707	u32				ale_entries;
 708	u32				ale_ports;
 709	bool				enable_ale;
 710	u8				max_num_slaves;
 711	u8				max_num_ports; /* max_num_slaves + 1 */
 712	u8				num_stats_mods;
 713	struct netcp_tx_pipe		tx_pipe;
 714
 715	int				host_port;
 716	u32				rx_packet_max;
 717	u32				ss_version;
 718	u32				stats_en_mask;
 719
 720	void __iomem			*ss_regs;
 721	void __iomem			*switch_regs;
 722	void __iomem			*host_port_regs;
 723	void __iomem			*ale_reg;
 724	void __iomem                    *cpts_reg;
 725	void __iomem			*sgmii_port_regs;
 726	void __iomem			*sgmii_port34_regs;
 727	void __iomem			*xgbe_serdes_regs;
 728	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
 729
 730	struct gbe_ss_regs_ofs		ss_regs_ofs;
 731	struct gbe_switch_regs_ofs	switch_regs_ofs;
 732	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
 733
 734	struct cpsw_ale			*ale;
 735	unsigned int			tx_queue_id;
 736	const char			*dma_chan_name;
 737
 738	struct list_head		gbe_intf_head;
 739	struct list_head		secondary_slaves;
 740	struct net_device		*dummy_ndev;
 741
 742	u64				*hw_stats;
 743	u32				*hw_stats_prev;
 744	const struct netcp_ethtool_stat *et_stats;
 745	int				num_et_stats;
 746	/*  Lock for updating the hwstats */
 747	spinlock_t			hw_stats_lock;
 748
 749	int                             cpts_registered;
 750	struct cpts                     *cpts;
 
 
 751};
 752
 753struct gbe_intf {
 754	struct net_device	*ndev;
 755	struct device		*dev;
 756	struct gbe_priv		*gbe_dev;
 757	struct netcp_tx_pipe	tx_pipe;
 758	struct gbe_slave	*slave;
 759	struct list_head	gbe_intf_list;
 760	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 761};
 762
 763static struct netcp_module gbe_module;
 764static struct netcp_module xgbe_module;
 765
 766/* Statistic management */
 767struct netcp_ethtool_stat {
 768	char desc[ETH_GSTRING_LEN];
 769	int type;
 770	u32 size;
 771	int offset;
 772};
 773
 774#define GBE_STATSA_INFO(field)						\
 775{									\
 776	"GBE_A:"#field, GBE_STATSA_MODULE,				\
 777	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 778	offsetof(struct gbe_hw_stats, field)				\
 779}
 780
 781#define GBE_STATSB_INFO(field)						\
 782{									\
 783	"GBE_B:"#field, GBE_STATSB_MODULE,				\
 784	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 785	offsetof(struct gbe_hw_stats, field)				\
 786}
 787
 788#define GBE_STATSC_INFO(field)						\
 789{									\
 790	"GBE_C:"#field, GBE_STATSC_MODULE,				\
 791	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 792	offsetof(struct gbe_hw_stats, field)				\
 793}
 794
 795#define GBE_STATSD_INFO(field)						\
 796{									\
 797	"GBE_D:"#field, GBE_STATSD_MODULE,				\
 798	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 799	offsetof(struct gbe_hw_stats, field)				\
 800}
 801
 802static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 803	/* GBE module A */
 804	GBE_STATSA_INFO(rx_good_frames),
 805	GBE_STATSA_INFO(rx_broadcast_frames),
 806	GBE_STATSA_INFO(rx_multicast_frames),
 807	GBE_STATSA_INFO(rx_pause_frames),
 808	GBE_STATSA_INFO(rx_crc_errors),
 809	GBE_STATSA_INFO(rx_align_code_errors),
 810	GBE_STATSA_INFO(rx_oversized_frames),
 811	GBE_STATSA_INFO(rx_jabber_frames),
 812	GBE_STATSA_INFO(rx_undersized_frames),
 813	GBE_STATSA_INFO(rx_fragments),
 814	GBE_STATSA_INFO(rx_bytes),
 815	GBE_STATSA_INFO(tx_good_frames),
 816	GBE_STATSA_INFO(tx_broadcast_frames),
 817	GBE_STATSA_INFO(tx_multicast_frames),
 818	GBE_STATSA_INFO(tx_pause_frames),
 819	GBE_STATSA_INFO(tx_deferred_frames),
 820	GBE_STATSA_INFO(tx_collision_frames),
 821	GBE_STATSA_INFO(tx_single_coll_frames),
 822	GBE_STATSA_INFO(tx_mult_coll_frames),
 823	GBE_STATSA_INFO(tx_excessive_collisions),
 824	GBE_STATSA_INFO(tx_late_collisions),
 825	GBE_STATSA_INFO(tx_underrun),
 826	GBE_STATSA_INFO(tx_carrier_sense_errors),
 827	GBE_STATSA_INFO(tx_bytes),
 828	GBE_STATSA_INFO(tx_64byte_frames),
 829	GBE_STATSA_INFO(tx_65_to_127byte_frames),
 830	GBE_STATSA_INFO(tx_128_to_255byte_frames),
 831	GBE_STATSA_INFO(tx_256_to_511byte_frames),
 832	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
 833	GBE_STATSA_INFO(tx_1024byte_frames),
 834	GBE_STATSA_INFO(net_bytes),
 835	GBE_STATSA_INFO(rx_sof_overruns),
 836	GBE_STATSA_INFO(rx_mof_overruns),
 837	GBE_STATSA_INFO(rx_dma_overruns),
 838	/* GBE module B */
 839	GBE_STATSB_INFO(rx_good_frames),
 840	GBE_STATSB_INFO(rx_broadcast_frames),
 841	GBE_STATSB_INFO(rx_multicast_frames),
 842	GBE_STATSB_INFO(rx_pause_frames),
 843	GBE_STATSB_INFO(rx_crc_errors),
 844	GBE_STATSB_INFO(rx_align_code_errors),
 845	GBE_STATSB_INFO(rx_oversized_frames),
 846	GBE_STATSB_INFO(rx_jabber_frames),
 847	GBE_STATSB_INFO(rx_undersized_frames),
 848	GBE_STATSB_INFO(rx_fragments),
 849	GBE_STATSB_INFO(rx_bytes),
 850	GBE_STATSB_INFO(tx_good_frames),
 851	GBE_STATSB_INFO(tx_broadcast_frames),
 852	GBE_STATSB_INFO(tx_multicast_frames),
 853	GBE_STATSB_INFO(tx_pause_frames),
 854	GBE_STATSB_INFO(tx_deferred_frames),
 855	GBE_STATSB_INFO(tx_collision_frames),
 856	GBE_STATSB_INFO(tx_single_coll_frames),
 857	GBE_STATSB_INFO(tx_mult_coll_frames),
 858	GBE_STATSB_INFO(tx_excessive_collisions),
 859	GBE_STATSB_INFO(tx_late_collisions),
 860	GBE_STATSB_INFO(tx_underrun),
 861	GBE_STATSB_INFO(tx_carrier_sense_errors),
 862	GBE_STATSB_INFO(tx_bytes),
 863	GBE_STATSB_INFO(tx_64byte_frames),
 864	GBE_STATSB_INFO(tx_65_to_127byte_frames),
 865	GBE_STATSB_INFO(tx_128_to_255byte_frames),
 866	GBE_STATSB_INFO(tx_256_to_511byte_frames),
 867	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
 868	GBE_STATSB_INFO(tx_1024byte_frames),
 869	GBE_STATSB_INFO(net_bytes),
 870	GBE_STATSB_INFO(rx_sof_overruns),
 871	GBE_STATSB_INFO(rx_mof_overruns),
 872	GBE_STATSB_INFO(rx_dma_overruns),
 873	/* GBE module C */
 874	GBE_STATSC_INFO(rx_good_frames),
 875	GBE_STATSC_INFO(rx_broadcast_frames),
 876	GBE_STATSC_INFO(rx_multicast_frames),
 877	GBE_STATSC_INFO(rx_pause_frames),
 878	GBE_STATSC_INFO(rx_crc_errors),
 879	GBE_STATSC_INFO(rx_align_code_errors),
 880	GBE_STATSC_INFO(rx_oversized_frames),
 881	GBE_STATSC_INFO(rx_jabber_frames),
 882	GBE_STATSC_INFO(rx_undersized_frames),
 883	GBE_STATSC_INFO(rx_fragments),
 884	GBE_STATSC_INFO(rx_bytes),
 885	GBE_STATSC_INFO(tx_good_frames),
 886	GBE_STATSC_INFO(tx_broadcast_frames),
 887	GBE_STATSC_INFO(tx_multicast_frames),
 888	GBE_STATSC_INFO(tx_pause_frames),
 889	GBE_STATSC_INFO(tx_deferred_frames),
 890	GBE_STATSC_INFO(tx_collision_frames),
 891	GBE_STATSC_INFO(tx_single_coll_frames),
 892	GBE_STATSC_INFO(tx_mult_coll_frames),
 893	GBE_STATSC_INFO(tx_excessive_collisions),
 894	GBE_STATSC_INFO(tx_late_collisions),
 895	GBE_STATSC_INFO(tx_underrun),
 896	GBE_STATSC_INFO(tx_carrier_sense_errors),
 897	GBE_STATSC_INFO(tx_bytes),
 898	GBE_STATSC_INFO(tx_64byte_frames),
 899	GBE_STATSC_INFO(tx_65_to_127byte_frames),
 900	GBE_STATSC_INFO(tx_128_to_255byte_frames),
 901	GBE_STATSC_INFO(tx_256_to_511byte_frames),
 902	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
 903	GBE_STATSC_INFO(tx_1024byte_frames),
 904	GBE_STATSC_INFO(net_bytes),
 905	GBE_STATSC_INFO(rx_sof_overruns),
 906	GBE_STATSC_INFO(rx_mof_overruns),
 907	GBE_STATSC_INFO(rx_dma_overruns),
 908	/* GBE module D */
 909	GBE_STATSD_INFO(rx_good_frames),
 910	GBE_STATSD_INFO(rx_broadcast_frames),
 911	GBE_STATSD_INFO(rx_multicast_frames),
 912	GBE_STATSD_INFO(rx_pause_frames),
 913	GBE_STATSD_INFO(rx_crc_errors),
 914	GBE_STATSD_INFO(rx_align_code_errors),
 915	GBE_STATSD_INFO(rx_oversized_frames),
 916	GBE_STATSD_INFO(rx_jabber_frames),
 917	GBE_STATSD_INFO(rx_undersized_frames),
 918	GBE_STATSD_INFO(rx_fragments),
 919	GBE_STATSD_INFO(rx_bytes),
 920	GBE_STATSD_INFO(tx_good_frames),
 921	GBE_STATSD_INFO(tx_broadcast_frames),
 922	GBE_STATSD_INFO(tx_multicast_frames),
 923	GBE_STATSD_INFO(tx_pause_frames),
 924	GBE_STATSD_INFO(tx_deferred_frames),
 925	GBE_STATSD_INFO(tx_collision_frames),
 926	GBE_STATSD_INFO(tx_single_coll_frames),
 927	GBE_STATSD_INFO(tx_mult_coll_frames),
 928	GBE_STATSD_INFO(tx_excessive_collisions),
 929	GBE_STATSD_INFO(tx_late_collisions),
 930	GBE_STATSD_INFO(tx_underrun),
 931	GBE_STATSD_INFO(tx_carrier_sense_errors),
 932	GBE_STATSD_INFO(tx_bytes),
 933	GBE_STATSD_INFO(tx_64byte_frames),
 934	GBE_STATSD_INFO(tx_65_to_127byte_frames),
 935	GBE_STATSD_INFO(tx_128_to_255byte_frames),
 936	GBE_STATSD_INFO(tx_256_to_511byte_frames),
 937	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
 938	GBE_STATSD_INFO(tx_1024byte_frames),
 939	GBE_STATSD_INFO(net_bytes),
 940	GBE_STATSD_INFO(rx_sof_overruns),
 941	GBE_STATSD_INFO(rx_mof_overruns),
 942	GBE_STATSD_INFO(rx_dma_overruns),
 943};
 944
 945/* This is the size of entries in GBENU_STATS_HOST */
 946#define GBENU_ET_STATS_HOST_SIZE	52
 947
 948#define GBENU_STATS_HOST(field)					\
 949{								\
 950	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
 951	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 952	offsetof(struct gbenu_hw_stats, field)			\
 953}
 954
 955/* This is the size of entries in GBENU_STATS_PORT */
 956#define GBENU_ET_STATS_PORT_SIZE	65
 957
 958#define GBENU_STATS_P1(field)					\
 959{								\
 960	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
 961	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 962	offsetof(struct gbenu_hw_stats, field)			\
 963}
 964
 965#define GBENU_STATS_P2(field)					\
 966{								\
 967	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
 968	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 969	offsetof(struct gbenu_hw_stats, field)			\
 970}
 971
 972#define GBENU_STATS_P3(field)					\
 973{								\
 974	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
 975	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 976	offsetof(struct gbenu_hw_stats, field)			\
 977}
 978
 979#define GBENU_STATS_P4(field)					\
 980{								\
 981	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
 982	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 983	offsetof(struct gbenu_hw_stats, field)			\
 984}
 985
 986#define GBENU_STATS_P5(field)					\
 987{								\
 988	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
 989	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 990	offsetof(struct gbenu_hw_stats, field)			\
 991}
 992
 993#define GBENU_STATS_P6(field)					\
 994{								\
 995	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
 996	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 997	offsetof(struct gbenu_hw_stats, field)			\
 998}
 999
1000#define GBENU_STATS_P7(field)					\
1001{								\
1002	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1003	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1004	offsetof(struct gbenu_hw_stats, field)			\
1005}
1006
1007#define GBENU_STATS_P8(field)					\
1008{								\
1009	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1010	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1011	offsetof(struct gbenu_hw_stats, field)			\
1012}
1013
1014static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1015	/* GBENU Host Module */
1016	GBENU_STATS_HOST(rx_good_frames),
1017	GBENU_STATS_HOST(rx_broadcast_frames),
1018	GBENU_STATS_HOST(rx_multicast_frames),
1019	GBENU_STATS_HOST(rx_crc_errors),
1020	GBENU_STATS_HOST(rx_oversized_frames),
1021	GBENU_STATS_HOST(rx_undersized_frames),
1022	GBENU_STATS_HOST(ale_drop),
1023	GBENU_STATS_HOST(ale_overrun_drop),
1024	GBENU_STATS_HOST(rx_bytes),
1025	GBENU_STATS_HOST(tx_good_frames),
1026	GBENU_STATS_HOST(tx_broadcast_frames),
1027	GBENU_STATS_HOST(tx_multicast_frames),
1028	GBENU_STATS_HOST(tx_bytes),
1029	GBENU_STATS_HOST(tx_64B_frames),
1030	GBENU_STATS_HOST(tx_65_to_127B_frames),
1031	GBENU_STATS_HOST(tx_128_to_255B_frames),
1032	GBENU_STATS_HOST(tx_256_to_511B_frames),
1033	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1034	GBENU_STATS_HOST(tx_1024B_frames),
1035	GBENU_STATS_HOST(net_bytes),
1036	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1037	GBENU_STATS_HOST(rx_port_mask_drop),
1038	GBENU_STATS_HOST(rx_top_fifo_drop),
1039	GBENU_STATS_HOST(ale_rate_limit_drop),
1040	GBENU_STATS_HOST(ale_vid_ingress_drop),
1041	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1042	GBENU_STATS_HOST(ale_unknown_ucast),
1043	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1044	GBENU_STATS_HOST(ale_unknown_mcast),
1045	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1046	GBENU_STATS_HOST(ale_unknown_bcast),
1047	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1048	GBENU_STATS_HOST(ale_pol_match),
1049	GBENU_STATS_HOST(ale_pol_match_red),
1050	GBENU_STATS_HOST(ale_pol_match_yellow),
1051	GBENU_STATS_HOST(tx_mem_protect_err),
1052	GBENU_STATS_HOST(tx_pri0_drop),
1053	GBENU_STATS_HOST(tx_pri1_drop),
1054	GBENU_STATS_HOST(tx_pri2_drop),
1055	GBENU_STATS_HOST(tx_pri3_drop),
1056	GBENU_STATS_HOST(tx_pri4_drop),
1057	GBENU_STATS_HOST(tx_pri5_drop),
1058	GBENU_STATS_HOST(tx_pri6_drop),
1059	GBENU_STATS_HOST(tx_pri7_drop),
1060	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1061	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1062	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1063	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1064	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1065	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1066	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1067	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1068	/* GBENU Module 1 */
1069	GBENU_STATS_P1(rx_good_frames),
1070	GBENU_STATS_P1(rx_broadcast_frames),
1071	GBENU_STATS_P1(rx_multicast_frames),
1072	GBENU_STATS_P1(rx_pause_frames),
1073	GBENU_STATS_P1(rx_crc_errors),
1074	GBENU_STATS_P1(rx_align_code_errors),
1075	GBENU_STATS_P1(rx_oversized_frames),
1076	GBENU_STATS_P1(rx_jabber_frames),
1077	GBENU_STATS_P1(rx_undersized_frames),
1078	GBENU_STATS_P1(rx_fragments),
1079	GBENU_STATS_P1(ale_drop),
1080	GBENU_STATS_P1(ale_overrun_drop),
1081	GBENU_STATS_P1(rx_bytes),
1082	GBENU_STATS_P1(tx_good_frames),
1083	GBENU_STATS_P1(tx_broadcast_frames),
1084	GBENU_STATS_P1(tx_multicast_frames),
1085	GBENU_STATS_P1(tx_pause_frames),
1086	GBENU_STATS_P1(tx_deferred_frames),
1087	GBENU_STATS_P1(tx_collision_frames),
1088	GBENU_STATS_P1(tx_single_coll_frames),
1089	GBENU_STATS_P1(tx_mult_coll_frames),
1090	GBENU_STATS_P1(tx_excessive_collisions),
1091	GBENU_STATS_P1(tx_late_collisions),
1092	GBENU_STATS_P1(rx_ipg_error),
1093	GBENU_STATS_P1(tx_carrier_sense_errors),
1094	GBENU_STATS_P1(tx_bytes),
1095	GBENU_STATS_P1(tx_64B_frames),
1096	GBENU_STATS_P1(tx_65_to_127B_frames),
1097	GBENU_STATS_P1(tx_128_to_255B_frames),
1098	GBENU_STATS_P1(tx_256_to_511B_frames),
1099	GBENU_STATS_P1(tx_512_to_1023B_frames),
1100	GBENU_STATS_P1(tx_1024B_frames),
1101	GBENU_STATS_P1(net_bytes),
1102	GBENU_STATS_P1(rx_bottom_fifo_drop),
1103	GBENU_STATS_P1(rx_port_mask_drop),
1104	GBENU_STATS_P1(rx_top_fifo_drop),
1105	GBENU_STATS_P1(ale_rate_limit_drop),
1106	GBENU_STATS_P1(ale_vid_ingress_drop),
1107	GBENU_STATS_P1(ale_da_eq_sa_drop),
1108	GBENU_STATS_P1(ale_unknown_ucast),
1109	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1110	GBENU_STATS_P1(ale_unknown_mcast),
1111	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1112	GBENU_STATS_P1(ale_unknown_bcast),
1113	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1114	GBENU_STATS_P1(ale_pol_match),
1115	GBENU_STATS_P1(ale_pol_match_red),
1116	GBENU_STATS_P1(ale_pol_match_yellow),
1117	GBENU_STATS_P1(tx_mem_protect_err),
1118	GBENU_STATS_P1(tx_pri0_drop),
1119	GBENU_STATS_P1(tx_pri1_drop),
1120	GBENU_STATS_P1(tx_pri2_drop),
1121	GBENU_STATS_P1(tx_pri3_drop),
1122	GBENU_STATS_P1(tx_pri4_drop),
1123	GBENU_STATS_P1(tx_pri5_drop),
1124	GBENU_STATS_P1(tx_pri6_drop),
1125	GBENU_STATS_P1(tx_pri7_drop),
1126	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1127	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1128	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1129	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1130	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1131	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1132	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1133	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1134	/* GBENU Module 2 */
1135	GBENU_STATS_P2(rx_good_frames),
1136	GBENU_STATS_P2(rx_broadcast_frames),
1137	GBENU_STATS_P2(rx_multicast_frames),
1138	GBENU_STATS_P2(rx_pause_frames),
1139	GBENU_STATS_P2(rx_crc_errors),
1140	GBENU_STATS_P2(rx_align_code_errors),
1141	GBENU_STATS_P2(rx_oversized_frames),
1142	GBENU_STATS_P2(rx_jabber_frames),
1143	GBENU_STATS_P2(rx_undersized_frames),
1144	GBENU_STATS_P2(rx_fragments),
1145	GBENU_STATS_P2(ale_drop),
1146	GBENU_STATS_P2(ale_overrun_drop),
1147	GBENU_STATS_P2(rx_bytes),
1148	GBENU_STATS_P2(tx_good_frames),
1149	GBENU_STATS_P2(tx_broadcast_frames),
1150	GBENU_STATS_P2(tx_multicast_frames),
1151	GBENU_STATS_P2(tx_pause_frames),
1152	GBENU_STATS_P2(tx_deferred_frames),
1153	GBENU_STATS_P2(tx_collision_frames),
1154	GBENU_STATS_P2(tx_single_coll_frames),
1155	GBENU_STATS_P2(tx_mult_coll_frames),
1156	GBENU_STATS_P2(tx_excessive_collisions),
1157	GBENU_STATS_P2(tx_late_collisions),
1158	GBENU_STATS_P2(rx_ipg_error),
1159	GBENU_STATS_P2(tx_carrier_sense_errors),
1160	GBENU_STATS_P2(tx_bytes),
1161	GBENU_STATS_P2(tx_64B_frames),
1162	GBENU_STATS_P2(tx_65_to_127B_frames),
1163	GBENU_STATS_P2(tx_128_to_255B_frames),
1164	GBENU_STATS_P2(tx_256_to_511B_frames),
1165	GBENU_STATS_P2(tx_512_to_1023B_frames),
1166	GBENU_STATS_P2(tx_1024B_frames),
1167	GBENU_STATS_P2(net_bytes),
1168	GBENU_STATS_P2(rx_bottom_fifo_drop),
1169	GBENU_STATS_P2(rx_port_mask_drop),
1170	GBENU_STATS_P2(rx_top_fifo_drop),
1171	GBENU_STATS_P2(ale_rate_limit_drop),
1172	GBENU_STATS_P2(ale_vid_ingress_drop),
1173	GBENU_STATS_P2(ale_da_eq_sa_drop),
1174	GBENU_STATS_P2(ale_unknown_ucast),
1175	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1176	GBENU_STATS_P2(ale_unknown_mcast),
1177	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1178	GBENU_STATS_P2(ale_unknown_bcast),
1179	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1180	GBENU_STATS_P2(ale_pol_match),
1181	GBENU_STATS_P2(ale_pol_match_red),
1182	GBENU_STATS_P2(ale_pol_match_yellow),
1183	GBENU_STATS_P2(tx_mem_protect_err),
1184	GBENU_STATS_P2(tx_pri0_drop),
1185	GBENU_STATS_P2(tx_pri1_drop),
1186	GBENU_STATS_P2(tx_pri2_drop),
1187	GBENU_STATS_P2(tx_pri3_drop),
1188	GBENU_STATS_P2(tx_pri4_drop),
1189	GBENU_STATS_P2(tx_pri5_drop),
1190	GBENU_STATS_P2(tx_pri6_drop),
1191	GBENU_STATS_P2(tx_pri7_drop),
1192	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1193	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1194	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1195	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1196	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1197	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1198	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1199	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1200	/* GBENU Module 3 */
1201	GBENU_STATS_P3(rx_good_frames),
1202	GBENU_STATS_P3(rx_broadcast_frames),
1203	GBENU_STATS_P3(rx_multicast_frames),
1204	GBENU_STATS_P3(rx_pause_frames),
1205	GBENU_STATS_P3(rx_crc_errors),
1206	GBENU_STATS_P3(rx_align_code_errors),
1207	GBENU_STATS_P3(rx_oversized_frames),
1208	GBENU_STATS_P3(rx_jabber_frames),
1209	GBENU_STATS_P3(rx_undersized_frames),
1210	GBENU_STATS_P3(rx_fragments),
1211	GBENU_STATS_P3(ale_drop),
1212	GBENU_STATS_P3(ale_overrun_drop),
1213	GBENU_STATS_P3(rx_bytes),
1214	GBENU_STATS_P3(tx_good_frames),
1215	GBENU_STATS_P3(tx_broadcast_frames),
1216	GBENU_STATS_P3(tx_multicast_frames),
1217	GBENU_STATS_P3(tx_pause_frames),
1218	GBENU_STATS_P3(tx_deferred_frames),
1219	GBENU_STATS_P3(tx_collision_frames),
1220	GBENU_STATS_P3(tx_single_coll_frames),
1221	GBENU_STATS_P3(tx_mult_coll_frames),
1222	GBENU_STATS_P3(tx_excessive_collisions),
1223	GBENU_STATS_P3(tx_late_collisions),
1224	GBENU_STATS_P3(rx_ipg_error),
1225	GBENU_STATS_P3(tx_carrier_sense_errors),
1226	GBENU_STATS_P3(tx_bytes),
1227	GBENU_STATS_P3(tx_64B_frames),
1228	GBENU_STATS_P3(tx_65_to_127B_frames),
1229	GBENU_STATS_P3(tx_128_to_255B_frames),
1230	GBENU_STATS_P3(tx_256_to_511B_frames),
1231	GBENU_STATS_P3(tx_512_to_1023B_frames),
1232	GBENU_STATS_P3(tx_1024B_frames),
1233	GBENU_STATS_P3(net_bytes),
1234	GBENU_STATS_P3(rx_bottom_fifo_drop),
1235	GBENU_STATS_P3(rx_port_mask_drop),
1236	GBENU_STATS_P3(rx_top_fifo_drop),
1237	GBENU_STATS_P3(ale_rate_limit_drop),
1238	GBENU_STATS_P3(ale_vid_ingress_drop),
1239	GBENU_STATS_P3(ale_da_eq_sa_drop),
1240	GBENU_STATS_P3(ale_unknown_ucast),
1241	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1242	GBENU_STATS_P3(ale_unknown_mcast),
1243	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1244	GBENU_STATS_P3(ale_unknown_bcast),
1245	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1246	GBENU_STATS_P3(ale_pol_match),
1247	GBENU_STATS_P3(ale_pol_match_red),
1248	GBENU_STATS_P3(ale_pol_match_yellow),
1249	GBENU_STATS_P3(tx_mem_protect_err),
1250	GBENU_STATS_P3(tx_pri0_drop),
1251	GBENU_STATS_P3(tx_pri1_drop),
1252	GBENU_STATS_P3(tx_pri2_drop),
1253	GBENU_STATS_P3(tx_pri3_drop),
1254	GBENU_STATS_P3(tx_pri4_drop),
1255	GBENU_STATS_P3(tx_pri5_drop),
1256	GBENU_STATS_P3(tx_pri6_drop),
1257	GBENU_STATS_P3(tx_pri7_drop),
1258	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1259	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1260	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1261	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1262	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1263	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1264	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1265	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1266	/* GBENU Module 4 */
1267	GBENU_STATS_P4(rx_good_frames),
1268	GBENU_STATS_P4(rx_broadcast_frames),
1269	GBENU_STATS_P4(rx_multicast_frames),
1270	GBENU_STATS_P4(rx_pause_frames),
1271	GBENU_STATS_P4(rx_crc_errors),
1272	GBENU_STATS_P4(rx_align_code_errors),
1273	GBENU_STATS_P4(rx_oversized_frames),
1274	GBENU_STATS_P4(rx_jabber_frames),
1275	GBENU_STATS_P4(rx_undersized_frames),
1276	GBENU_STATS_P4(rx_fragments),
1277	GBENU_STATS_P4(ale_drop),
1278	GBENU_STATS_P4(ale_overrun_drop),
1279	GBENU_STATS_P4(rx_bytes),
1280	GBENU_STATS_P4(tx_good_frames),
1281	GBENU_STATS_P4(tx_broadcast_frames),
1282	GBENU_STATS_P4(tx_multicast_frames),
1283	GBENU_STATS_P4(tx_pause_frames),
1284	GBENU_STATS_P4(tx_deferred_frames),
1285	GBENU_STATS_P4(tx_collision_frames),
1286	GBENU_STATS_P4(tx_single_coll_frames),
1287	GBENU_STATS_P4(tx_mult_coll_frames),
1288	GBENU_STATS_P4(tx_excessive_collisions),
1289	GBENU_STATS_P4(tx_late_collisions),
1290	GBENU_STATS_P4(rx_ipg_error),
1291	GBENU_STATS_P4(tx_carrier_sense_errors),
1292	GBENU_STATS_P4(tx_bytes),
1293	GBENU_STATS_P4(tx_64B_frames),
1294	GBENU_STATS_P4(tx_65_to_127B_frames),
1295	GBENU_STATS_P4(tx_128_to_255B_frames),
1296	GBENU_STATS_P4(tx_256_to_511B_frames),
1297	GBENU_STATS_P4(tx_512_to_1023B_frames),
1298	GBENU_STATS_P4(tx_1024B_frames),
1299	GBENU_STATS_P4(net_bytes),
1300	GBENU_STATS_P4(rx_bottom_fifo_drop),
1301	GBENU_STATS_P4(rx_port_mask_drop),
1302	GBENU_STATS_P4(rx_top_fifo_drop),
1303	GBENU_STATS_P4(ale_rate_limit_drop),
1304	GBENU_STATS_P4(ale_vid_ingress_drop),
1305	GBENU_STATS_P4(ale_da_eq_sa_drop),
1306	GBENU_STATS_P4(ale_unknown_ucast),
1307	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1308	GBENU_STATS_P4(ale_unknown_mcast),
1309	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1310	GBENU_STATS_P4(ale_unknown_bcast),
1311	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1312	GBENU_STATS_P4(ale_pol_match),
1313	GBENU_STATS_P4(ale_pol_match_red),
1314	GBENU_STATS_P4(ale_pol_match_yellow),
1315	GBENU_STATS_P4(tx_mem_protect_err),
1316	GBENU_STATS_P4(tx_pri0_drop),
1317	GBENU_STATS_P4(tx_pri1_drop),
1318	GBENU_STATS_P4(tx_pri2_drop),
1319	GBENU_STATS_P4(tx_pri3_drop),
1320	GBENU_STATS_P4(tx_pri4_drop),
1321	GBENU_STATS_P4(tx_pri5_drop),
1322	GBENU_STATS_P4(tx_pri6_drop),
1323	GBENU_STATS_P4(tx_pri7_drop),
1324	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1325	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1326	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1327	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1328	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1329	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1330	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1331	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1332	/* GBENU Module 5 */
1333	GBENU_STATS_P5(rx_good_frames),
1334	GBENU_STATS_P5(rx_broadcast_frames),
1335	GBENU_STATS_P5(rx_multicast_frames),
1336	GBENU_STATS_P5(rx_pause_frames),
1337	GBENU_STATS_P5(rx_crc_errors),
1338	GBENU_STATS_P5(rx_align_code_errors),
1339	GBENU_STATS_P5(rx_oversized_frames),
1340	GBENU_STATS_P5(rx_jabber_frames),
1341	GBENU_STATS_P5(rx_undersized_frames),
1342	GBENU_STATS_P5(rx_fragments),
1343	GBENU_STATS_P5(ale_drop),
1344	GBENU_STATS_P5(ale_overrun_drop),
1345	GBENU_STATS_P5(rx_bytes),
1346	GBENU_STATS_P5(tx_good_frames),
1347	GBENU_STATS_P5(tx_broadcast_frames),
1348	GBENU_STATS_P5(tx_multicast_frames),
1349	GBENU_STATS_P5(tx_pause_frames),
1350	GBENU_STATS_P5(tx_deferred_frames),
1351	GBENU_STATS_P5(tx_collision_frames),
1352	GBENU_STATS_P5(tx_single_coll_frames),
1353	GBENU_STATS_P5(tx_mult_coll_frames),
1354	GBENU_STATS_P5(tx_excessive_collisions),
1355	GBENU_STATS_P5(tx_late_collisions),
1356	GBENU_STATS_P5(rx_ipg_error),
1357	GBENU_STATS_P5(tx_carrier_sense_errors),
1358	GBENU_STATS_P5(tx_bytes),
1359	GBENU_STATS_P5(tx_64B_frames),
1360	GBENU_STATS_P5(tx_65_to_127B_frames),
1361	GBENU_STATS_P5(tx_128_to_255B_frames),
1362	GBENU_STATS_P5(tx_256_to_511B_frames),
1363	GBENU_STATS_P5(tx_512_to_1023B_frames),
1364	GBENU_STATS_P5(tx_1024B_frames),
1365	GBENU_STATS_P5(net_bytes),
1366	GBENU_STATS_P5(rx_bottom_fifo_drop),
1367	GBENU_STATS_P5(rx_port_mask_drop),
1368	GBENU_STATS_P5(rx_top_fifo_drop),
1369	GBENU_STATS_P5(ale_rate_limit_drop),
1370	GBENU_STATS_P5(ale_vid_ingress_drop),
1371	GBENU_STATS_P5(ale_da_eq_sa_drop),
1372	GBENU_STATS_P5(ale_unknown_ucast),
1373	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1374	GBENU_STATS_P5(ale_unknown_mcast),
1375	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1376	GBENU_STATS_P5(ale_unknown_bcast),
1377	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1378	GBENU_STATS_P5(ale_pol_match),
1379	GBENU_STATS_P5(ale_pol_match_red),
1380	GBENU_STATS_P5(ale_pol_match_yellow),
1381	GBENU_STATS_P5(tx_mem_protect_err),
1382	GBENU_STATS_P5(tx_pri0_drop),
1383	GBENU_STATS_P5(tx_pri1_drop),
1384	GBENU_STATS_P5(tx_pri2_drop),
1385	GBENU_STATS_P5(tx_pri3_drop),
1386	GBENU_STATS_P5(tx_pri4_drop),
1387	GBENU_STATS_P5(tx_pri5_drop),
1388	GBENU_STATS_P5(tx_pri6_drop),
1389	GBENU_STATS_P5(tx_pri7_drop),
1390	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1391	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1392	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1393	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1394	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1395	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1396	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1397	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1398	/* GBENU Module 6 */
1399	GBENU_STATS_P6(rx_good_frames),
1400	GBENU_STATS_P6(rx_broadcast_frames),
1401	GBENU_STATS_P6(rx_multicast_frames),
1402	GBENU_STATS_P6(rx_pause_frames),
1403	GBENU_STATS_P6(rx_crc_errors),
1404	GBENU_STATS_P6(rx_align_code_errors),
1405	GBENU_STATS_P6(rx_oversized_frames),
1406	GBENU_STATS_P6(rx_jabber_frames),
1407	GBENU_STATS_P6(rx_undersized_frames),
1408	GBENU_STATS_P6(rx_fragments),
1409	GBENU_STATS_P6(ale_drop),
1410	GBENU_STATS_P6(ale_overrun_drop),
1411	GBENU_STATS_P6(rx_bytes),
1412	GBENU_STATS_P6(tx_good_frames),
1413	GBENU_STATS_P6(tx_broadcast_frames),
1414	GBENU_STATS_P6(tx_multicast_frames),
1415	GBENU_STATS_P6(tx_pause_frames),
1416	GBENU_STATS_P6(tx_deferred_frames),
1417	GBENU_STATS_P6(tx_collision_frames),
1418	GBENU_STATS_P6(tx_single_coll_frames),
1419	GBENU_STATS_P6(tx_mult_coll_frames),
1420	GBENU_STATS_P6(tx_excessive_collisions),
1421	GBENU_STATS_P6(tx_late_collisions),
1422	GBENU_STATS_P6(rx_ipg_error),
1423	GBENU_STATS_P6(tx_carrier_sense_errors),
1424	GBENU_STATS_P6(tx_bytes),
1425	GBENU_STATS_P6(tx_64B_frames),
1426	GBENU_STATS_P6(tx_65_to_127B_frames),
1427	GBENU_STATS_P6(tx_128_to_255B_frames),
1428	GBENU_STATS_P6(tx_256_to_511B_frames),
1429	GBENU_STATS_P6(tx_512_to_1023B_frames),
1430	GBENU_STATS_P6(tx_1024B_frames),
1431	GBENU_STATS_P6(net_bytes),
1432	GBENU_STATS_P6(rx_bottom_fifo_drop),
1433	GBENU_STATS_P6(rx_port_mask_drop),
1434	GBENU_STATS_P6(rx_top_fifo_drop),
1435	GBENU_STATS_P6(ale_rate_limit_drop),
1436	GBENU_STATS_P6(ale_vid_ingress_drop),
1437	GBENU_STATS_P6(ale_da_eq_sa_drop),
1438	GBENU_STATS_P6(ale_unknown_ucast),
1439	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1440	GBENU_STATS_P6(ale_unknown_mcast),
1441	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1442	GBENU_STATS_P6(ale_unknown_bcast),
1443	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1444	GBENU_STATS_P6(ale_pol_match),
1445	GBENU_STATS_P6(ale_pol_match_red),
1446	GBENU_STATS_P6(ale_pol_match_yellow),
1447	GBENU_STATS_P6(tx_mem_protect_err),
1448	GBENU_STATS_P6(tx_pri0_drop),
1449	GBENU_STATS_P6(tx_pri1_drop),
1450	GBENU_STATS_P6(tx_pri2_drop),
1451	GBENU_STATS_P6(tx_pri3_drop),
1452	GBENU_STATS_P6(tx_pri4_drop),
1453	GBENU_STATS_P6(tx_pri5_drop),
1454	GBENU_STATS_P6(tx_pri6_drop),
1455	GBENU_STATS_P6(tx_pri7_drop),
1456	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1457	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1458	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1459	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1460	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1461	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1462	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1463	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1464	/* GBENU Module 7 */
1465	GBENU_STATS_P7(rx_good_frames),
1466	GBENU_STATS_P7(rx_broadcast_frames),
1467	GBENU_STATS_P7(rx_multicast_frames),
1468	GBENU_STATS_P7(rx_pause_frames),
1469	GBENU_STATS_P7(rx_crc_errors),
1470	GBENU_STATS_P7(rx_align_code_errors),
1471	GBENU_STATS_P7(rx_oversized_frames),
1472	GBENU_STATS_P7(rx_jabber_frames),
1473	GBENU_STATS_P7(rx_undersized_frames),
1474	GBENU_STATS_P7(rx_fragments),
1475	GBENU_STATS_P7(ale_drop),
1476	GBENU_STATS_P7(ale_overrun_drop),
1477	GBENU_STATS_P7(rx_bytes),
1478	GBENU_STATS_P7(tx_good_frames),
1479	GBENU_STATS_P7(tx_broadcast_frames),
1480	GBENU_STATS_P7(tx_multicast_frames),
1481	GBENU_STATS_P7(tx_pause_frames),
1482	GBENU_STATS_P7(tx_deferred_frames),
1483	GBENU_STATS_P7(tx_collision_frames),
1484	GBENU_STATS_P7(tx_single_coll_frames),
1485	GBENU_STATS_P7(tx_mult_coll_frames),
1486	GBENU_STATS_P7(tx_excessive_collisions),
1487	GBENU_STATS_P7(tx_late_collisions),
1488	GBENU_STATS_P7(rx_ipg_error),
1489	GBENU_STATS_P7(tx_carrier_sense_errors),
1490	GBENU_STATS_P7(tx_bytes),
1491	GBENU_STATS_P7(tx_64B_frames),
1492	GBENU_STATS_P7(tx_65_to_127B_frames),
1493	GBENU_STATS_P7(tx_128_to_255B_frames),
1494	GBENU_STATS_P7(tx_256_to_511B_frames),
1495	GBENU_STATS_P7(tx_512_to_1023B_frames),
1496	GBENU_STATS_P7(tx_1024B_frames),
1497	GBENU_STATS_P7(net_bytes),
1498	GBENU_STATS_P7(rx_bottom_fifo_drop),
1499	GBENU_STATS_P7(rx_port_mask_drop),
1500	GBENU_STATS_P7(rx_top_fifo_drop),
1501	GBENU_STATS_P7(ale_rate_limit_drop),
1502	GBENU_STATS_P7(ale_vid_ingress_drop),
1503	GBENU_STATS_P7(ale_da_eq_sa_drop),
1504	GBENU_STATS_P7(ale_unknown_ucast),
1505	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1506	GBENU_STATS_P7(ale_unknown_mcast),
1507	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1508	GBENU_STATS_P7(ale_unknown_bcast),
1509	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1510	GBENU_STATS_P7(ale_pol_match),
1511	GBENU_STATS_P7(ale_pol_match_red),
1512	GBENU_STATS_P7(ale_pol_match_yellow),
1513	GBENU_STATS_P7(tx_mem_protect_err),
1514	GBENU_STATS_P7(tx_pri0_drop),
1515	GBENU_STATS_P7(tx_pri1_drop),
1516	GBENU_STATS_P7(tx_pri2_drop),
1517	GBENU_STATS_P7(tx_pri3_drop),
1518	GBENU_STATS_P7(tx_pri4_drop),
1519	GBENU_STATS_P7(tx_pri5_drop),
1520	GBENU_STATS_P7(tx_pri6_drop),
1521	GBENU_STATS_P7(tx_pri7_drop),
1522	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1523	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1524	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1525	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1526	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1527	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1528	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1529	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1530	/* GBENU Module 8 */
1531	GBENU_STATS_P8(rx_good_frames),
1532	GBENU_STATS_P8(rx_broadcast_frames),
1533	GBENU_STATS_P8(rx_multicast_frames),
1534	GBENU_STATS_P8(rx_pause_frames),
1535	GBENU_STATS_P8(rx_crc_errors),
1536	GBENU_STATS_P8(rx_align_code_errors),
1537	GBENU_STATS_P8(rx_oversized_frames),
1538	GBENU_STATS_P8(rx_jabber_frames),
1539	GBENU_STATS_P8(rx_undersized_frames),
1540	GBENU_STATS_P8(rx_fragments),
1541	GBENU_STATS_P8(ale_drop),
1542	GBENU_STATS_P8(ale_overrun_drop),
1543	GBENU_STATS_P8(rx_bytes),
1544	GBENU_STATS_P8(tx_good_frames),
1545	GBENU_STATS_P8(tx_broadcast_frames),
1546	GBENU_STATS_P8(tx_multicast_frames),
1547	GBENU_STATS_P8(tx_pause_frames),
1548	GBENU_STATS_P8(tx_deferred_frames),
1549	GBENU_STATS_P8(tx_collision_frames),
1550	GBENU_STATS_P8(tx_single_coll_frames),
1551	GBENU_STATS_P8(tx_mult_coll_frames),
1552	GBENU_STATS_P8(tx_excessive_collisions),
1553	GBENU_STATS_P8(tx_late_collisions),
1554	GBENU_STATS_P8(rx_ipg_error),
1555	GBENU_STATS_P8(tx_carrier_sense_errors),
1556	GBENU_STATS_P8(tx_bytes),
1557	GBENU_STATS_P8(tx_64B_frames),
1558	GBENU_STATS_P8(tx_65_to_127B_frames),
1559	GBENU_STATS_P8(tx_128_to_255B_frames),
1560	GBENU_STATS_P8(tx_256_to_511B_frames),
1561	GBENU_STATS_P8(tx_512_to_1023B_frames),
1562	GBENU_STATS_P8(tx_1024B_frames),
1563	GBENU_STATS_P8(net_bytes),
1564	GBENU_STATS_P8(rx_bottom_fifo_drop),
1565	GBENU_STATS_P8(rx_port_mask_drop),
1566	GBENU_STATS_P8(rx_top_fifo_drop),
1567	GBENU_STATS_P8(ale_rate_limit_drop),
1568	GBENU_STATS_P8(ale_vid_ingress_drop),
1569	GBENU_STATS_P8(ale_da_eq_sa_drop),
1570	GBENU_STATS_P8(ale_unknown_ucast),
1571	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1572	GBENU_STATS_P8(ale_unknown_mcast),
1573	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1574	GBENU_STATS_P8(ale_unknown_bcast),
1575	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1576	GBENU_STATS_P8(ale_pol_match),
1577	GBENU_STATS_P8(ale_pol_match_red),
1578	GBENU_STATS_P8(ale_pol_match_yellow),
1579	GBENU_STATS_P8(tx_mem_protect_err),
1580	GBENU_STATS_P8(tx_pri0_drop),
1581	GBENU_STATS_P8(tx_pri1_drop),
1582	GBENU_STATS_P8(tx_pri2_drop),
1583	GBENU_STATS_P8(tx_pri3_drop),
1584	GBENU_STATS_P8(tx_pri4_drop),
1585	GBENU_STATS_P8(tx_pri5_drop),
1586	GBENU_STATS_P8(tx_pri6_drop),
1587	GBENU_STATS_P8(tx_pri7_drop),
1588	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1589	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1590	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1591	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1592	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1593	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1594	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1595	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1596};
1597
1598#define XGBE_STATS0_INFO(field)				\
1599{							\
1600	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1601	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1602	offsetof(struct xgbe_hw_stats, field)		\
1603}
1604
1605#define XGBE_STATS1_INFO(field)				\
1606{							\
1607	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1608	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1609	offsetof(struct xgbe_hw_stats, field)		\
1610}
1611
1612#define XGBE_STATS2_INFO(field)				\
1613{							\
1614	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1615	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1616	offsetof(struct xgbe_hw_stats, field)		\
1617}
1618
1619static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1620	/* GBE module 0 */
1621	XGBE_STATS0_INFO(rx_good_frames),
1622	XGBE_STATS0_INFO(rx_broadcast_frames),
1623	XGBE_STATS0_INFO(rx_multicast_frames),
1624	XGBE_STATS0_INFO(rx_oversized_frames),
1625	XGBE_STATS0_INFO(rx_undersized_frames),
1626	XGBE_STATS0_INFO(overrun_type4),
1627	XGBE_STATS0_INFO(overrun_type5),
1628	XGBE_STATS0_INFO(rx_bytes),
1629	XGBE_STATS0_INFO(tx_good_frames),
1630	XGBE_STATS0_INFO(tx_broadcast_frames),
1631	XGBE_STATS0_INFO(tx_multicast_frames),
1632	XGBE_STATS0_INFO(tx_bytes),
1633	XGBE_STATS0_INFO(tx_64byte_frames),
1634	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1635	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1636	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1637	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1638	XGBE_STATS0_INFO(tx_1024byte_frames),
1639	XGBE_STATS0_INFO(net_bytes),
1640	XGBE_STATS0_INFO(rx_sof_overruns),
1641	XGBE_STATS0_INFO(rx_mof_overruns),
1642	XGBE_STATS0_INFO(rx_dma_overruns),
1643	/* XGBE module 1 */
1644	XGBE_STATS1_INFO(rx_good_frames),
1645	XGBE_STATS1_INFO(rx_broadcast_frames),
1646	XGBE_STATS1_INFO(rx_multicast_frames),
1647	XGBE_STATS1_INFO(rx_pause_frames),
1648	XGBE_STATS1_INFO(rx_crc_errors),
1649	XGBE_STATS1_INFO(rx_align_code_errors),
1650	XGBE_STATS1_INFO(rx_oversized_frames),
1651	XGBE_STATS1_INFO(rx_jabber_frames),
1652	XGBE_STATS1_INFO(rx_undersized_frames),
1653	XGBE_STATS1_INFO(rx_fragments),
1654	XGBE_STATS1_INFO(overrun_type4),
1655	XGBE_STATS1_INFO(overrun_type5),
1656	XGBE_STATS1_INFO(rx_bytes),
1657	XGBE_STATS1_INFO(tx_good_frames),
1658	XGBE_STATS1_INFO(tx_broadcast_frames),
1659	XGBE_STATS1_INFO(tx_multicast_frames),
1660	XGBE_STATS1_INFO(tx_pause_frames),
1661	XGBE_STATS1_INFO(tx_deferred_frames),
1662	XGBE_STATS1_INFO(tx_collision_frames),
1663	XGBE_STATS1_INFO(tx_single_coll_frames),
1664	XGBE_STATS1_INFO(tx_mult_coll_frames),
1665	XGBE_STATS1_INFO(tx_excessive_collisions),
1666	XGBE_STATS1_INFO(tx_late_collisions),
1667	XGBE_STATS1_INFO(tx_underrun),
1668	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1669	XGBE_STATS1_INFO(tx_bytes),
1670	XGBE_STATS1_INFO(tx_64byte_frames),
1671	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1672	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1673	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1674	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1675	XGBE_STATS1_INFO(tx_1024byte_frames),
1676	XGBE_STATS1_INFO(net_bytes),
1677	XGBE_STATS1_INFO(rx_sof_overruns),
1678	XGBE_STATS1_INFO(rx_mof_overruns),
1679	XGBE_STATS1_INFO(rx_dma_overruns),
1680	/* XGBE module 2 */
1681	XGBE_STATS2_INFO(rx_good_frames),
1682	XGBE_STATS2_INFO(rx_broadcast_frames),
1683	XGBE_STATS2_INFO(rx_multicast_frames),
1684	XGBE_STATS2_INFO(rx_pause_frames),
1685	XGBE_STATS2_INFO(rx_crc_errors),
1686	XGBE_STATS2_INFO(rx_align_code_errors),
1687	XGBE_STATS2_INFO(rx_oversized_frames),
1688	XGBE_STATS2_INFO(rx_jabber_frames),
1689	XGBE_STATS2_INFO(rx_undersized_frames),
1690	XGBE_STATS2_INFO(rx_fragments),
1691	XGBE_STATS2_INFO(overrun_type4),
1692	XGBE_STATS2_INFO(overrun_type5),
1693	XGBE_STATS2_INFO(rx_bytes),
1694	XGBE_STATS2_INFO(tx_good_frames),
1695	XGBE_STATS2_INFO(tx_broadcast_frames),
1696	XGBE_STATS2_INFO(tx_multicast_frames),
1697	XGBE_STATS2_INFO(tx_pause_frames),
1698	XGBE_STATS2_INFO(tx_deferred_frames),
1699	XGBE_STATS2_INFO(tx_collision_frames),
1700	XGBE_STATS2_INFO(tx_single_coll_frames),
1701	XGBE_STATS2_INFO(tx_mult_coll_frames),
1702	XGBE_STATS2_INFO(tx_excessive_collisions),
1703	XGBE_STATS2_INFO(tx_late_collisions),
1704	XGBE_STATS2_INFO(tx_underrun),
1705	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1706	XGBE_STATS2_INFO(tx_bytes),
1707	XGBE_STATS2_INFO(tx_64byte_frames),
1708	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1709	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1710	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1711	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1712	XGBE_STATS2_INFO(tx_1024byte_frames),
1713	XGBE_STATS2_INFO(net_bytes),
1714	XGBE_STATS2_INFO(rx_sof_overruns),
1715	XGBE_STATS2_INFO(rx_mof_overruns),
1716	XGBE_STATS2_INFO(rx_dma_overruns),
1717};
1718
1719#define for_each_intf(i, priv) \
1720	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1721
1722#define for_each_sec_slave(slave, priv) \
1723	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1724
1725#define first_sec_slave(priv)					\
1726	list_first_entry(&priv->secondary_slaves, \
1727			struct gbe_slave, slave_list)
1728
1729static void keystone_get_drvinfo(struct net_device *ndev,
1730				 struct ethtool_drvinfo *info)
1731{
1732	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1733	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1734}
1735
1736static u32 keystone_get_msglevel(struct net_device *ndev)
1737{
1738	struct netcp_intf *netcp = netdev_priv(ndev);
1739
1740	return netcp->msg_enable;
1741}
1742
1743static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1744{
1745	struct netcp_intf *netcp = netdev_priv(ndev);
1746
1747	netcp->msg_enable = value;
1748}
1749
1750static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1751{
1752	struct gbe_intf *gbe_intf;
1753
1754	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1755	if (!gbe_intf)
1756		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1757
1758	return gbe_intf;
1759}
1760
1761static void keystone_get_stat_strings(struct net_device *ndev,
1762				      uint32_t stringset, uint8_t *data)
1763{
1764	struct netcp_intf *netcp = netdev_priv(ndev);
1765	struct gbe_intf *gbe_intf;
1766	struct gbe_priv *gbe_dev;
1767	int i;
1768
1769	gbe_intf = keystone_get_intf_data(netcp);
1770	if (!gbe_intf)
1771		return;
1772	gbe_dev = gbe_intf->gbe_dev;
1773
1774	switch (stringset) {
1775	case ETH_SS_STATS:
1776		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1777			memcpy(data, gbe_dev->et_stats[i].desc,
1778			       ETH_GSTRING_LEN);
1779			data += ETH_GSTRING_LEN;
1780		}
1781		break;
1782	case ETH_SS_TEST:
1783		break;
1784	}
1785}
1786
1787static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1788{
1789	struct netcp_intf *netcp = netdev_priv(ndev);
1790	struct gbe_intf *gbe_intf;
1791	struct gbe_priv *gbe_dev;
1792
1793	gbe_intf = keystone_get_intf_data(netcp);
1794	if (!gbe_intf)
1795		return -EINVAL;
1796	gbe_dev = gbe_intf->gbe_dev;
1797
1798	switch (stringset) {
1799	case ETH_SS_TEST:
1800		return 0;
1801	case ETH_SS_STATS:
1802		return gbe_dev->num_et_stats;
1803	default:
1804		return -EINVAL;
1805	}
1806}
1807
1808static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1809{
1810	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1811	u32  __iomem *p_stats_entry;
1812	int i;
1813
1814	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1815		if (gbe_dev->et_stats[i].type == stats_mod) {
1816			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1817			gbe_dev->hw_stats[i] = 0;
1818			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1819		}
1820	}
1821}
1822
1823static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1824					     int et_stats_entry)
1825{
1826	void __iomem *base = NULL;
1827	u32  __iomem *p_stats_entry;
1828	u32 curr, delta;
1829
1830	/* The hw_stats_regs pointers are already
1831	 * properly set to point to the right base:
1832	 */
1833	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1834	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1835	curr = readl(p_stats_entry);
1836	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1837	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1838	gbe_dev->hw_stats[et_stats_entry] += delta;
1839}
1840
1841static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1842{
1843	int i;
1844
1845	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1846		gbe_update_hw_stats_entry(gbe_dev, i);
1847
1848		if (data)
1849			data[i] = gbe_dev->hw_stats[i];
1850	}
1851}
1852
1853static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1854					       int stats_mod)
1855{
1856	u32 val;
1857
1858	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1859
1860	switch (stats_mod) {
1861	case GBE_STATSA_MODULE:
1862	case GBE_STATSB_MODULE:
1863		val &= ~GBE_STATS_CD_SEL;
1864		break;
1865	case GBE_STATSC_MODULE:
1866	case GBE_STATSD_MODULE:
1867		val |= GBE_STATS_CD_SEL;
1868		break;
1869	default:
1870		return;
1871	}
1872
1873	/* make the stat module visible */
1874	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1875}
1876
1877static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1878{
1879	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1880	gbe_reset_mod_stats(gbe_dev, stats_mod);
1881}
1882
1883static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1884{
1885	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1886	int et_entry, j, pair;
1887
1888	for (pair = 0; pair < 2; pair++) {
1889		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1890						      GBE_STATSC_MODULE :
1891						      GBE_STATSA_MODULE));
1892
1893		for (j = 0; j < half_num_et_stats; j++) {
1894			et_entry = pair * half_num_et_stats + j;
1895			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1896
1897			if (data)
1898				data[et_entry] = gbe_dev->hw_stats[et_entry];
1899		}
1900	}
1901}
1902
1903static void keystone_get_ethtool_stats(struct net_device *ndev,
1904				       struct ethtool_stats *stats,
1905				       uint64_t *data)
1906{
1907	struct netcp_intf *netcp = netdev_priv(ndev);
1908	struct gbe_intf *gbe_intf;
1909	struct gbe_priv *gbe_dev;
1910
1911	gbe_intf = keystone_get_intf_data(netcp);
1912	if (!gbe_intf)
1913		return;
1914
1915	gbe_dev = gbe_intf->gbe_dev;
1916	spin_lock_bh(&gbe_dev->hw_stats_lock);
1917	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1918		gbe_update_stats_ver14(gbe_dev, data);
1919	else
1920		gbe_update_stats(gbe_dev, data);
1921	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1922}
1923
1924static int keystone_get_link_ksettings(struct net_device *ndev,
1925				       struct ethtool_link_ksettings *cmd)
1926{
1927	struct netcp_intf *netcp = netdev_priv(ndev);
1928	struct phy_device *phy = ndev->phydev;
1929	struct gbe_intf *gbe_intf;
1930	int ret;
1931
1932	if (!phy)
1933		return -EINVAL;
1934
1935	gbe_intf = keystone_get_intf_data(netcp);
1936	if (!gbe_intf)
1937		return -EINVAL;
1938
1939	if (!gbe_intf->slave)
1940		return -EINVAL;
1941
1942	ret = phy_ethtool_ksettings_get(phy, cmd);
1943	if (!ret)
1944		cmd->base.port = gbe_intf->slave->phy_port_t;
1945
1946	return ret;
1947}
1948
1949static int keystone_set_link_ksettings(struct net_device *ndev,
1950				       const struct ethtool_link_ksettings *cmd)
1951{
1952	struct netcp_intf *netcp = netdev_priv(ndev);
1953	struct phy_device *phy = ndev->phydev;
1954	struct gbe_intf *gbe_intf;
1955	u8 port = cmd->base.port;
1956	u32 advertising, supported;
1957	u32 features;
1958
1959	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1960						cmd->link_modes.advertising);
1961	ethtool_convert_link_mode_to_legacy_u32(&supported,
1962						cmd->link_modes.supported);
1963	features = advertising & supported;
1964
1965	if (!phy)
1966		return -EINVAL;
1967
1968	gbe_intf = keystone_get_intf_data(netcp);
1969	if (!gbe_intf)
1970		return -EINVAL;
1971
1972	if (!gbe_intf->slave)
1973		return -EINVAL;
1974
1975	if (port != gbe_intf->slave->phy_port_t) {
1976		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1977			return -EINVAL;
1978
1979		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1980			return -EINVAL;
1981
1982		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1983			return -EINVAL;
1984
1985		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1986			return -EINVAL;
1987
1988		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1989			return -EINVAL;
1990	}
1991
1992	gbe_intf->slave->phy_port_t = port;
1993	return phy_ethtool_ksettings_set(phy, cmd);
1994}
1995
1996#if IS_ENABLED(CONFIG_TI_CPTS)
1997static int keystone_get_ts_info(struct net_device *ndev,
1998				struct ethtool_ts_info *info)
1999{
2000	struct netcp_intf *netcp = netdev_priv(ndev);
2001	struct gbe_intf *gbe_intf;
2002
2003	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2004	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2005		return -EINVAL;
2006
2007	info->so_timestamping =
2008		SOF_TIMESTAMPING_TX_HARDWARE |
2009		SOF_TIMESTAMPING_TX_SOFTWARE |
2010		SOF_TIMESTAMPING_RX_HARDWARE |
2011		SOF_TIMESTAMPING_RX_SOFTWARE |
2012		SOF_TIMESTAMPING_SOFTWARE |
2013		SOF_TIMESTAMPING_RAW_HARDWARE;
2014	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2015	info->tx_types =
2016		(1 << HWTSTAMP_TX_OFF) |
2017		(1 << HWTSTAMP_TX_ON);
2018	info->rx_filters =
2019		(1 << HWTSTAMP_FILTER_NONE) |
2020		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2021		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2022	return 0;
2023}
2024#else
2025static int keystone_get_ts_info(struct net_device *ndev,
2026				struct ethtool_ts_info *info)
2027{
2028	info->so_timestamping =
2029		SOF_TIMESTAMPING_TX_SOFTWARE |
2030		SOF_TIMESTAMPING_RX_SOFTWARE |
2031		SOF_TIMESTAMPING_SOFTWARE;
2032	info->phc_index = -1;
2033	info->tx_types = 0;
2034	info->rx_filters = 0;
2035	return 0;
2036}
2037#endif /* CONFIG_TI_CPTS */
2038
2039static const struct ethtool_ops keystone_ethtool_ops = {
2040	.get_drvinfo		= keystone_get_drvinfo,
2041	.get_link		= ethtool_op_get_link,
2042	.get_msglevel		= keystone_get_msglevel,
2043	.set_msglevel		= keystone_set_msglevel,
2044	.get_strings		= keystone_get_stat_strings,
2045	.get_sset_count		= keystone_get_sset_count,
2046	.get_ethtool_stats	= keystone_get_ethtool_stats,
2047	.get_link_ksettings	= keystone_get_link_ksettings,
2048	.set_link_ksettings	= keystone_set_link_ksettings,
2049	.get_ts_info		= keystone_get_ts_info,
2050};
2051
2052#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
2053			 ((mac)[2] << 16) | ((mac)[3] << 24))
2054#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
2055
2056static void gbe_set_slave_mac(struct gbe_slave *slave,
2057			      struct gbe_intf *gbe_intf)
2058{
2059	struct net_device *ndev = gbe_intf->ndev;
2060
2061	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2062	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2063}
2064
2065static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2066{
2067	if (priv->host_port == 0)
2068		return slave_num + 1;
2069
2070	return slave_num;
2071}
2072
2073static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2074					  struct net_device *ndev,
2075					  struct gbe_slave *slave,
2076					  int up)
2077{
2078	struct phy_device *phy = slave->phy;
2079	u32 mac_control = 0;
2080
2081	if (up) {
2082		mac_control = slave->mac_control;
2083		if (phy && (phy->speed == SPEED_1000)) {
2084			mac_control |= MACSL_GIG_MODE;
2085			mac_control &= ~MACSL_XGIG_MODE;
2086		} else if (phy && (phy->speed == SPEED_10000)) {
2087			mac_control |= MACSL_XGIG_MODE;
2088			mac_control &= ~MACSL_GIG_MODE;
2089		}
2090
2091		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2092						 mac_control));
2093
2094		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2095				     ALE_PORT_STATE,
2096				     ALE_PORT_STATE_FORWARD);
2097
2098		if (ndev && slave->open &&
2099		    slave->link_interface != SGMII_LINK_MAC_PHY &&
2100		    slave->link_interface != XGMII_LINK_MAC_PHY)
 
2101			netif_carrier_on(ndev);
2102	} else {
2103		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2104						 mac_control));
2105		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2106				     ALE_PORT_STATE,
2107				     ALE_PORT_STATE_DISABLE);
2108		if (ndev &&
2109		    slave->link_interface != SGMII_LINK_MAC_PHY &&
2110		    slave->link_interface != XGMII_LINK_MAC_PHY)
 
2111			netif_carrier_off(ndev);
2112	}
2113
2114	if (phy)
2115		phy_print_status(phy);
2116}
2117
2118static bool gbe_phy_link_status(struct gbe_slave *slave)
2119{
2120	 return !slave->phy || slave->phy->link;
2121}
2122
 
 
 
 
 
 
 
 
 
 
2123static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2124					  struct gbe_slave *slave,
2125					  struct net_device *ndev)
2126{
2127	int sp = slave->slave_num;
2128	int phy_link_state, sgmii_link_state = 1, link_state;
2129
2130	if (!slave->open)
2131		return;
2132
2133	if (!SLAVE_LINK_IS_XGMII(slave)) {
2134		sgmii_link_state =
2135			netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2136	}
 
 
2137
2138	phy_link_state = gbe_phy_link_status(slave);
2139	link_state = phy_link_state & sgmii_link_state;
2140
2141	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2142		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2143					      link_state);
2144}
2145
2146static void xgbe_adjust_link(struct net_device *ndev)
2147{
2148	struct netcp_intf *netcp = netdev_priv(ndev);
2149	struct gbe_intf *gbe_intf;
2150
2151	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2152	if (!gbe_intf)
2153		return;
2154
2155	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2156				      ndev);
2157}
2158
2159static void gbe_adjust_link(struct net_device *ndev)
2160{
2161	struct netcp_intf *netcp = netdev_priv(ndev);
2162	struct gbe_intf *gbe_intf;
2163
2164	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2165	if (!gbe_intf)
2166		return;
2167
2168	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2169				      ndev);
2170}
2171
2172static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2173{
2174	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2175	struct gbe_slave *slave;
2176
2177	for_each_sec_slave(slave, gbe_dev)
2178		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2179}
2180
2181/* Reset EMAC
2182 * Soft reset is set and polled until clear, or until a timeout occurs
2183 */
2184static int gbe_port_reset(struct gbe_slave *slave)
2185{
2186	u32 i, v;
2187
2188	/* Set the soft reset bit */
2189	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2190
2191	/* Wait for the bit to clear */
2192	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2193		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2194		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2195			return 0;
2196	}
2197
2198	/* Timeout on the reset */
2199	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2200}
2201
2202/* Configure EMAC */
2203static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2204			    int max_rx_len)
2205{
2206	void __iomem *rx_maxlen_reg;
2207	u32 xgmii_mode;
2208
2209	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2210		max_rx_len = NETCP_MAX_FRAME_SIZE;
2211
2212	/* Enable correct MII mode at SS level */
2213	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
2214	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2215		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2216		xgmii_mode |= (1 << slave->slave_num);
2217		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2218	}
2219
2220	if (IS_SS_ID_MU(gbe_dev))
2221		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2222	else
2223		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2224
2225	writel(max_rx_len, rx_maxlen_reg);
2226	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2227}
2228
2229static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2230			      struct gbe_slave *slave, bool set)
2231{
2232	if (SLAVE_LINK_IS_XGMII(slave))
2233		return;
2234
2235	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2236			    slave->slave_num, set);
2237}
2238
2239static void gbe_slave_stop(struct gbe_intf *intf)
2240{
2241	struct gbe_priv *gbe_dev = intf->gbe_dev;
2242	struct gbe_slave *slave = intf->slave;
2243
2244	gbe_sgmii_rtreset(gbe_dev, slave, true);
 
2245	gbe_port_reset(slave);
2246	/* Disable forwarding */
2247	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2248			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2249	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2250			   1 << slave->port_num, 0, 0);
2251
2252	if (!slave->phy)
2253		return;
2254
2255	phy_stop(slave->phy);
2256	phy_disconnect(slave->phy);
2257	slave->phy = NULL;
2258}
2259
2260static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2261{
2262	if (SLAVE_LINK_IS_XGMII(slave))
2263		return;
2264
2265	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2266	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2267			   slave->link_interface);
2268}
2269
2270static int gbe_slave_open(struct gbe_intf *gbe_intf)
2271{
2272	struct gbe_priv *priv = gbe_intf->gbe_dev;
2273	struct gbe_slave *slave = gbe_intf->slave;
2274	phy_interface_t phy_mode;
2275	bool has_phy = false;
 
2276
2277	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2278
2279	gbe_sgmii_config(priv, slave);
 
2280	gbe_port_reset(slave);
2281	gbe_sgmii_rtreset(priv, slave, false);
 
2282	gbe_port_config(priv, slave, priv->rx_packet_max);
2283	gbe_set_slave_mac(slave, gbe_intf);
 
 
 
 
 
 
 
2284	/* enable forwarding */
2285	cpsw_ale_control_set(priv->ale, slave->port_num,
2286			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2287	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2288			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2289
2290	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2291		has_phy = true;
2292		phy_mode = PHY_INTERFACE_MODE_SGMII;
2293		slave->phy_port_t = PORT_MII;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2294	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2295		has_phy = true;
2296		phy_mode = PHY_INTERFACE_MODE_NA;
2297		slave->phy_port_t = PORT_FIBRE;
2298	}
2299
2300	if (has_phy) {
2301		if (priv->ss_version == XGBE_SS_VERSION_10)
2302			hndlr = xgbe_adjust_link;
2303
2304		slave->phy = of_phy_connect(gbe_intf->ndev,
2305					    slave->phy_node,
2306					    hndlr, 0,
2307					    phy_mode);
2308		if (!slave->phy) {
2309			dev_err(priv->dev, "phy not found on slave %d\n",
2310				slave->slave_num);
2311			return -ENODEV;
2312		}
2313		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2314			phydev_name(slave->phy));
2315		phy_start(slave->phy);
2316		phy_read_status(slave->phy);
2317	}
2318	return 0;
2319}
2320
2321static void gbe_init_host_port(struct gbe_priv *priv)
2322{
2323	int bypass_en = 1;
2324
2325	/* Host Tx Pri */
2326	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2327		writel(HOST_TX_PRI_MAP_DEFAULT,
2328		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2329
2330	/* Max length register */
2331	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2332						  rx_maxlen));
2333
2334	cpsw_ale_start(priv->ale);
2335
2336	if (priv->enable_ale)
2337		bypass_en = 0;
2338
2339	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2340
2341	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2342
2343	cpsw_ale_control_set(priv->ale, priv->host_port,
2344			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2345
2346	cpsw_ale_control_set(priv->ale, 0,
2347			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2348			     GBE_PORT_MASK(priv->ale_ports));
2349
2350	cpsw_ale_control_set(priv->ale, 0,
2351			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2352			     GBE_PORT_MASK(priv->ale_ports - 1));
2353
2354	cpsw_ale_control_set(priv->ale, 0,
2355			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2356			     GBE_PORT_MASK(priv->ale_ports));
2357
2358	cpsw_ale_control_set(priv->ale, 0,
2359			     ALE_PORT_UNTAGGED_EGRESS,
2360			     GBE_PORT_MASK(priv->ale_ports));
2361}
2362
2363static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2364{
2365	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2366	u16 vlan_id;
2367
2368	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2369			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2370			   ALE_MCAST_FWD_2);
2371	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2372		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2373				   GBE_PORT_MASK(gbe_dev->ale_ports),
2374				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2375	}
2376}
2377
2378static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2379{
2380	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2381	u16 vlan_id;
2382
2383	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2384
2385	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2386		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2387				   ALE_VLAN, vlan_id);
2388}
2389
2390static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2391{
2392	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2393	u16 vlan_id;
2394
2395	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2396
2397	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2398		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2399	}
2400}
2401
2402static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2403{
2404	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2405	u16 vlan_id;
2406
2407	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2408
2409	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2410		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2411				   ALE_VLAN, vlan_id);
2412	}
2413}
2414
2415static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2416{
2417	struct gbe_intf *gbe_intf = intf_priv;
2418	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2419
2420	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2421		naddr->addr, naddr->type);
2422
2423	switch (naddr->type) {
2424	case ADDR_MCAST:
2425	case ADDR_BCAST:
2426		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2427		break;
2428	case ADDR_UCAST:
2429	case ADDR_DEV:
2430		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2431		break;
2432	case ADDR_ANY:
2433		/* nothing to do for promiscuous */
2434	default:
2435		break;
2436	}
2437
2438	return 0;
2439}
2440
2441static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2442{
2443	struct gbe_intf *gbe_intf = intf_priv;
2444	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2445
2446	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2447		naddr->addr, naddr->type);
2448
2449	switch (naddr->type) {
2450	case ADDR_MCAST:
2451	case ADDR_BCAST:
2452		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2453		break;
2454	case ADDR_UCAST:
2455	case ADDR_DEV:
2456		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2457		break;
2458	case ADDR_ANY:
2459		/* nothing to do for promiscuous */
2460	default:
2461		break;
2462	}
2463
2464	return 0;
2465}
2466
2467static int gbe_add_vid(void *intf_priv, int vid)
2468{
2469	struct gbe_intf *gbe_intf = intf_priv;
2470	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2471
2472	set_bit(vid, gbe_intf->active_vlans);
2473
2474	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2475			  GBE_PORT_MASK(gbe_dev->ale_ports),
2476			  GBE_MASK_NO_PORTS,
2477			  GBE_PORT_MASK(gbe_dev->ale_ports),
2478			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2479
2480	return 0;
2481}
2482
2483static int gbe_del_vid(void *intf_priv, int vid)
2484{
2485	struct gbe_intf *gbe_intf = intf_priv;
2486	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2487
2488	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2489	clear_bit(vid, gbe_intf->active_vlans);
2490	return 0;
2491}
2492
2493#if IS_ENABLED(CONFIG_TI_CPTS)
2494#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2495#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2496
2497static void gbe_txtstamp(void *context, struct sk_buff *skb)
2498{
2499	struct gbe_intf *gbe_intf = context;
2500	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2501
2502	cpts_tx_timestamp(gbe_dev->cpts, skb);
2503}
2504
2505static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2506			      const struct netcp_packet *p_info)
2507{
2508	struct sk_buff *skb = p_info->skb;
2509	unsigned int class = ptp_classify_raw(skb);
2510
2511	if (class == PTP_CLASS_NONE)
2512		return false;
2513
2514	switch (class) {
2515	case PTP_CLASS_V1_IPV4:
2516	case PTP_CLASS_V1_IPV6:
2517	case PTP_CLASS_V2_IPV4:
2518	case PTP_CLASS_V2_IPV6:
2519	case PTP_CLASS_V2_L2:
2520	case (PTP_CLASS_V2_VLAN | PTP_CLASS_L2):
2521	case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV4):
2522	case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV6):
2523		return true;
2524	}
2525
2526	return false;
2527}
2528
2529static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2530				 struct netcp_packet *p_info)
2531{
2532	struct phy_device *phydev = p_info->skb->dev->phydev;
2533	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2534
2535	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2536	    !cpts_is_tx_enabled(gbe_dev->cpts))
2537		return 0;
2538
2539	/* If phy has the txtstamp api, assume it will do it.
2540	 * We mark it here because skb_tx_timestamp() is called
2541	 * after all the txhooks are called.
2542	 */
2543	if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2544		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2545		return 0;
2546	}
2547
2548	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2549		p_info->txtstamp = gbe_txtstamp;
2550		p_info->ts_context = (void *)gbe_intf;
2551		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2552	}
2553
2554	return 0;
2555}
2556
2557static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2558{
2559	struct phy_device *phydev = p_info->skb->dev->phydev;
2560	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2561
2562	if (p_info->rxtstamp_complete)
2563		return 0;
2564
2565	if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2566		p_info->rxtstamp_complete = true;
2567		return 0;
2568	}
2569
2570	cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
 
 
2571	p_info->rxtstamp_complete = true;
2572
2573	return 0;
2574}
2575
2576static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2577{
2578	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2579	struct cpts *cpts = gbe_dev->cpts;
2580	struct hwtstamp_config cfg;
2581
2582	if (!cpts)
2583		return -EOPNOTSUPP;
2584
2585	cfg.flags = 0;
2586	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2587		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2588	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
2589			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
2590
2591	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2592}
2593
2594static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2595{
2596	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2597	struct gbe_slave *slave = gbe_intf->slave;
2598	u32 ts_en, seq_id, ctl;
2599
2600	if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
2601	    !cpts_is_tx_enabled(gbe_dev->cpts)) {
2602		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2603		return;
2604	}
2605
2606	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2607	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2608	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2609		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2610		(slave->ts_ctl.uni ?  TS_UNI_EN :
2611			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2612
2613	if (cpts_is_tx_enabled(gbe_dev->cpts))
2614		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2615
2616	if (cpts_is_rx_enabled(gbe_dev->cpts))
2617		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2618
2619	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2620	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2621	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2622}
2623
2624static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2625{
2626	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2627	struct cpts *cpts = gbe_dev->cpts;
2628	struct hwtstamp_config cfg;
2629
2630	if (!cpts)
2631		return -EOPNOTSUPP;
2632
2633	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2634		return -EFAULT;
2635
2636	/* reserved for future extensions */
2637	if (cfg.flags)
2638		return -EINVAL;
2639
2640	switch (cfg.tx_type) {
2641	case HWTSTAMP_TX_OFF:
2642		cpts_tx_enable(cpts, 0);
2643		break;
2644	case HWTSTAMP_TX_ON:
2645		cpts_tx_enable(cpts, 1);
2646		break;
2647	default:
2648		return -ERANGE;
2649	}
2650
2651	switch (cfg.rx_filter) {
2652	case HWTSTAMP_FILTER_NONE:
2653		cpts_rx_enable(cpts, 0);
2654		break;
2655	case HWTSTAMP_FILTER_ALL:
2656	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2657	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2658	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2659		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2660		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2661		break;
2662	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2663	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2664	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2665	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2666	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2667	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2668	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2669	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2670	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2671		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
2672		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2673		break;
2674	default:
2675		return -ERANGE;
2676	}
2677
2678	gbe_hwtstamp(gbe_intf);
2679
2680	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2681}
2682
2683static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2684{
2685	if (!gbe_dev->cpts)
2686		return;
2687
2688	if (gbe_dev->cpts_registered > 0)
2689		goto done;
2690
2691	if (cpts_register(gbe_dev->cpts)) {
2692		dev_err(gbe_dev->dev, "error registering cpts device\n");
2693		return;
2694	}
2695
2696done:
2697	++gbe_dev->cpts_registered;
2698}
2699
2700static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2701{
2702	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2703		return;
2704
2705	if (--gbe_dev->cpts_registered)
2706		return;
2707
2708	cpts_unregister(gbe_dev->cpts);
2709}
2710#else
2711static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2712					struct netcp_packet *p_info)
2713{
2714	return 0;
2715}
2716
2717static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2718			       struct netcp_packet *p_info)
2719{
2720	return 0;
2721}
2722
2723static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2724			       struct ifreq *ifr, int cmd)
2725{
2726	return -EOPNOTSUPP;
2727}
2728
2729static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2730{
2731}
2732
2733static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2734{
2735}
2736
2737static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2738{
2739	return -EOPNOTSUPP;
2740}
2741
2742static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2743{
2744	return -EOPNOTSUPP;
2745}
2746#endif /* CONFIG_TI_CPTS */
2747
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2748static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2749{
2750	struct gbe_intf *gbe_intf = intf_priv;
2751	struct phy_device *phy = gbe_intf->slave->phy;
2752
2753	if (!phy || !phy->drv->hwtstamp) {
2754		switch (cmd) {
2755		case SIOCGHWTSTAMP:
2756			return gbe_hwtstamp_get(gbe_intf, req);
2757		case SIOCSHWTSTAMP:
2758			return gbe_hwtstamp_set(gbe_intf, req);
2759		}
2760	}
2761
2762	if (phy)
2763		return phy_mii_ioctl(phy, req, cmd);
2764
2765	return -EOPNOTSUPP;
2766}
2767
2768static void netcp_ethss_timer(unsigned long arg)
2769{
2770	struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2771	struct gbe_intf *gbe_intf;
2772	struct gbe_slave *slave;
2773
2774	/* Check & update SGMII link state of interfaces */
2775	for_each_intf(gbe_intf, gbe_dev) {
2776		if (!gbe_intf->slave->open)
2777			continue;
2778		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2779					      gbe_intf->ndev);
2780	}
2781
2782	/* Check & update SGMII link state of secondary ports */
2783	for_each_sec_slave(slave, gbe_dev) {
2784		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2785	}
2786
2787	/* A timer runs as a BH, no need to block them */
2788	spin_lock(&gbe_dev->hw_stats_lock);
2789
2790	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2791		gbe_update_stats_ver14(gbe_dev, NULL);
2792	else
2793		gbe_update_stats(gbe_dev, NULL);
2794
2795	spin_unlock(&gbe_dev->hw_stats_lock);
2796
2797	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2798	add_timer(&gbe_dev->timer);
2799}
2800
2801static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2802{
2803	struct gbe_intf *gbe_intf = data;
2804
2805	p_info->tx_pipe = &gbe_intf->tx_pipe;
2806
2807	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2808}
2809
2810static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2811{
2812	struct gbe_intf *gbe_intf = data;
2813
2814	return gbe_rxtstamp(gbe_intf, p_info);
2815}
2816
2817static int gbe_open(void *intf_priv, struct net_device *ndev)
2818{
2819	struct gbe_intf *gbe_intf = intf_priv;
2820	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2821	struct netcp_intf *netcp = netdev_priv(ndev);
2822	struct gbe_slave *slave = gbe_intf->slave;
2823	int port_num = slave->port_num;
2824	u32 reg;
2825	int ret;
2826
2827	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2828	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2829		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2830		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2831
2832	/* For 10G and on NetCP 1.5, use directed to port */
2833	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2834		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2835
2836	if (gbe_dev->enable_ale)
2837		gbe_intf->tx_pipe.switch_to_port = 0;
2838	else
2839		gbe_intf->tx_pipe.switch_to_port = port_num;
2840
2841	dev_dbg(gbe_dev->dev,
2842		"opened TX channel %s: %p with to port %d, flags %d\n",
2843		gbe_intf->tx_pipe.dma_chan_name,
2844		gbe_intf->tx_pipe.dma_channel,
2845		gbe_intf->tx_pipe.switch_to_port,
2846		gbe_intf->tx_pipe.flags);
2847
2848	gbe_slave_stop(gbe_intf);
2849
2850	/* disable priority elevation and enable statistics on all ports */
2851	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2852
2853	/* Control register */
2854	writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
 
 
 
 
 
2855
2856	/* All statistics enabled and STAT AB visible by default */
2857	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2858						    stat_port_en));
2859
2860	ret = gbe_slave_open(gbe_intf);
2861	if (ret)
2862		goto fail;
2863
2864	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2865	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2866
2867	slave->open = true;
2868	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2869
2870	gbe_register_cpts(gbe_dev);
2871
2872	return 0;
2873
2874fail:
2875	gbe_slave_stop(gbe_intf);
2876	return ret;
2877}
2878
2879static int gbe_close(void *intf_priv, struct net_device *ndev)
2880{
2881	struct gbe_intf *gbe_intf = intf_priv;
2882	struct netcp_intf *netcp = netdev_priv(ndev);
2883	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2884
2885	gbe_unregister_cpts(gbe_dev);
2886
2887	gbe_slave_stop(gbe_intf);
2888
2889	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2890	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2891
2892	gbe_intf->slave->open = false;
2893	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2894	return 0;
2895}
2896
2897#if IS_ENABLED(CONFIG_TI_CPTS)
2898static void init_slave_ts_ctl(struct gbe_slave *slave)
2899{
2900	slave->ts_ctl.uni = 1;
2901	slave->ts_ctl.dst_port_map =
2902		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2903	slave->ts_ctl.maddr_map =
2904		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2905}
2906
2907#else
2908static void init_slave_ts_ctl(struct gbe_slave *slave)
2909{
2910}
2911#endif /* CONFIG_TI_CPTS */
2912
2913static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2914		      struct device_node *node)
2915{
2916	int port_reg_num;
2917	u32 port_reg_ofs, emac_reg_ofs;
2918	u32 port_reg_blk_sz, emac_reg_blk_sz;
2919
2920	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2921		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2922		return -EINVAL;
2923	}
2924
2925	if (of_property_read_u32(node, "link-interface",
2926				 &slave->link_interface)) {
2927		dev_warn(gbe_dev->dev,
2928			 "missing link-interface value defaulting to 1G mac-phy link\n");
2929		slave->link_interface = SGMII_LINK_MAC_PHY;
2930	}
2931
 
2932	slave->open = false;
2933	slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
 
 
 
2934	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2935
2936	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2937		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2938	else
2939		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2940
2941	/* Emac regs memmap are contiguous but port regs are not */
2942	port_reg_num = slave->slave_num;
2943	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2944		if (slave->slave_num > 1) {
2945			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2946			port_reg_num -= 2;
2947		} else {
2948			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2949		}
2950		emac_reg_ofs = GBE13_EMAC_OFFSET;
2951		port_reg_blk_sz = 0x30;
2952		emac_reg_blk_sz = 0x40;
2953	} else if (IS_SS_ID_MU(gbe_dev)) {
2954		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2955		emac_reg_ofs = GBENU_EMAC_OFFSET;
2956		port_reg_blk_sz = 0x1000;
2957		emac_reg_blk_sz = 0x1000;
2958	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2959		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2960		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2961		port_reg_blk_sz = 0x30;
2962		emac_reg_blk_sz = 0x40;
2963	} else {
2964		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2965			gbe_dev->ss_version);
2966		return -EINVAL;
2967	}
2968
2969	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2970				(port_reg_blk_sz * port_reg_num);
2971	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2972				(emac_reg_blk_sz * slave->slave_num);
2973
2974	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2975		/* Initialize  slave port register offsets */
2976		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2977		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2978		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2979		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2980		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2981		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2982		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2983		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2984		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2985
2986		/* Initialize EMAC register offsets */
2987		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2988		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2989		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2990
2991	} else if (IS_SS_ID_MU(gbe_dev)) {
2992		/* Initialize  slave port register offsets */
2993		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2994		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
 
2995		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2996		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2997		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2998		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2999		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3000		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3001		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3002		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3003
3004		/* Initialize EMAC register offsets */
3005		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3006		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3007
3008	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
3009		/* Initialize  slave port register offsets */
3010		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3011		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3012		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3013		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3014		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3015		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3016		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3017		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3018		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3019
3020		/* Initialize EMAC register offsets */
3021		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3022		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3023		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3024	}
3025
3026	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3027
3028	init_slave_ts_ctl(slave);
3029	return 0;
3030}
3031
3032static void init_secondary_ports(struct gbe_priv *gbe_dev,
3033				 struct device_node *node)
3034{
3035	struct device *dev = gbe_dev->dev;
3036	phy_interface_t phy_mode;
3037	struct gbe_priv **priv;
3038	struct device_node *port;
3039	struct gbe_slave *slave;
3040	bool mac_phy_link = false;
3041
3042	for_each_child_of_node(node, port) {
3043		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3044		if (!slave) {
3045			dev_err(dev,
3046				"memomry alloc failed for secondary port(%s), skipping...\n",
3047				port->name);
3048			continue;
3049		}
3050
3051		if (init_slave(gbe_dev, slave, port)) {
3052			dev_err(dev,
3053				"Failed to initialize secondary port(%s), skipping...\n",
3054				port->name);
3055			devm_kfree(dev, slave);
3056			continue;
3057		}
3058
3059		gbe_sgmii_config(gbe_dev, slave);
 
3060		gbe_port_reset(slave);
3061		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3062		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3063		gbe_dev->num_slaves++;
3064		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3065		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3066			mac_phy_link = true;
3067
3068		slave->open = true;
3069		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3070			of_node_put(port);
3071			break;
3072		}
3073	}
3074
3075	/* of_phy_connect() is needed only for MAC-PHY interface */
3076	if (!mac_phy_link)
3077		return;
3078
3079	/* Allocate dummy netdev device for attaching to phy device */
3080	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3081					NET_NAME_UNKNOWN, ether_setup);
3082	if (!gbe_dev->dummy_ndev) {
3083		dev_err(dev,
3084			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3085		return;
3086	}
3087	priv = netdev_priv(gbe_dev->dummy_ndev);
3088	*priv = gbe_dev;
3089
3090	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3091		phy_mode = PHY_INTERFACE_MODE_SGMII;
3092		slave->phy_port_t = PORT_MII;
 
 
 
3093	} else {
3094		phy_mode = PHY_INTERFACE_MODE_NA;
3095		slave->phy_port_t = PORT_FIBRE;
3096	}
3097
3098	for_each_sec_slave(slave, gbe_dev) {
3099		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
 
3100		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3101			continue;
3102		slave->phy =
3103			of_phy_connect(gbe_dev->dummy_ndev,
3104				       slave->phy_node,
3105				       gbe_adjust_link_sec_slaves,
3106				       0, phy_mode);
3107		if (!slave->phy) {
3108			dev_err(dev, "phy not found for slave %d\n",
3109				slave->slave_num);
3110			slave->phy = NULL;
3111		} else {
3112			dev_dbg(dev, "phy found: id is: 0x%s\n",
3113				phydev_name(slave->phy));
3114			phy_start(slave->phy);
3115			phy_read_status(slave->phy);
3116		}
3117	}
3118}
3119
3120static void free_secondary_ports(struct gbe_priv *gbe_dev)
3121{
3122	struct gbe_slave *slave;
3123
3124	while (!list_empty(&gbe_dev->secondary_slaves)) {
3125		slave = first_sec_slave(gbe_dev);
3126
3127		if (slave->phy)
3128			phy_disconnect(slave->phy);
3129		list_del(&slave->slave_list);
3130	}
3131	if (gbe_dev->dummy_ndev)
3132		free_netdev(gbe_dev->dummy_ndev);
3133}
3134
3135static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3136				 struct device_node *node)
3137{
3138	struct resource res;
3139	void __iomem *regs;
3140	int ret, i;
3141
3142	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3143	if (ret) {
3144		dev_err(gbe_dev->dev,
3145			"Can't xlate xgbe of node(%s) ss address at %d\n",
3146			node->name, XGBE_SS_REG_INDEX);
3147		return ret;
3148	}
3149
3150	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3151	if (IS_ERR(regs)) {
3152		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3153		return PTR_ERR(regs);
3154	}
3155	gbe_dev->ss_regs = regs;
3156
3157	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3158	if (ret) {
3159		dev_err(gbe_dev->dev,
3160			"Can't xlate xgbe of node(%s) sm address at %d\n",
3161			node->name, XGBE_SM_REG_INDEX);
3162		return ret;
3163	}
3164
3165	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3166	if (IS_ERR(regs)) {
3167		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3168		return PTR_ERR(regs);
3169	}
3170	gbe_dev->switch_regs = regs;
3171
3172	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3173	if (ret) {
3174		dev_err(gbe_dev->dev,
3175			"Can't xlate xgbe serdes of node(%s) address at %d\n",
3176			node->name, XGBE_SERDES_REG_INDEX);
3177		return ret;
3178	}
3179
3180	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3181	if (IS_ERR(regs)) {
3182		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3183		return PTR_ERR(regs);
3184	}
3185	gbe_dev->xgbe_serdes_regs = regs;
3186
3187	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3188	gbe_dev->et_stats = xgbe10_et_stats;
3189	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3190
3191	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3192					 gbe_dev->num_et_stats * sizeof(u64),
3193					 GFP_KERNEL);
3194	if (!gbe_dev->hw_stats) {
3195		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3196		return -ENOMEM;
3197	}
3198
3199	gbe_dev->hw_stats_prev =
3200		devm_kzalloc(gbe_dev->dev,
3201			     gbe_dev->num_et_stats * sizeof(u32),
3202			     GFP_KERNEL);
3203	if (!gbe_dev->hw_stats_prev) {
3204		dev_err(gbe_dev->dev,
3205			"hw_stats_prev memory allocation failed\n");
3206		return -ENOMEM;
3207	}
3208
3209	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3210	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3211					XGBE10_SGMII_MODULE_OFFSET;
3212	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3213
3214	for (i = 0; i < gbe_dev->max_num_ports; i++)
3215		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3216			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3217
3218	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3219	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3220	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3221	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3222	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3223	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3224
3225	/* Subsystem registers */
3226	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3227	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3228
3229	/* Switch module registers */
3230	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3231	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3232	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3233	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3234	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3235
3236	/* Host port registers */
3237	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3238	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3239	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3240	return 0;
3241}
3242
3243static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3244				    struct device_node *node)
3245{
3246	struct resource res;
3247	void __iomem *regs;
3248	int ret;
3249
3250	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3251	if (ret) {
3252		dev_err(gbe_dev->dev,
3253			"Can't translate of node(%s) of gbe ss address at %d\n",
3254			node->name, GBE_SS_REG_INDEX);
3255		return ret;
3256	}
3257
3258	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3259	if (IS_ERR(regs)) {
3260		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3261		return PTR_ERR(regs);
3262	}
3263	gbe_dev->ss_regs = regs;
3264	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3265	return 0;
3266}
3267
3268static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3269				struct device_node *node)
3270{
3271	struct resource res;
3272	void __iomem *regs;
3273	int i, ret;
3274
3275	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3276	if (ret) {
3277		dev_err(gbe_dev->dev,
3278			"Can't translate of gbe node(%s) address at index %d\n",
3279			node->name, GBE_SGMII34_REG_INDEX);
3280		return ret;
3281	}
3282
3283	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3284	if (IS_ERR(regs)) {
3285		dev_err(gbe_dev->dev,
3286			"Failed to map gbe sgmii port34 register base\n");
3287		return PTR_ERR(regs);
3288	}
3289	gbe_dev->sgmii_port34_regs = regs;
3290
3291	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3292	if (ret) {
3293		dev_err(gbe_dev->dev,
3294			"Can't translate of gbe node(%s) address at index %d\n",
3295			node->name, GBE_SM_REG_INDEX);
3296		return ret;
3297	}
3298
3299	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3300	if (IS_ERR(regs)) {
3301		dev_err(gbe_dev->dev,
3302			"Failed to map gbe switch module register base\n");
3303		return PTR_ERR(regs);
3304	}
3305	gbe_dev->switch_regs = regs;
3306
3307	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3308	gbe_dev->et_stats = gbe13_et_stats;
3309	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3310
3311	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3312					 gbe_dev->num_et_stats * sizeof(u64),
3313					 GFP_KERNEL);
3314	if (!gbe_dev->hw_stats) {
3315		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3316		return -ENOMEM;
3317	}
3318
3319	gbe_dev->hw_stats_prev =
3320		devm_kzalloc(gbe_dev->dev,
3321			     gbe_dev->num_et_stats * sizeof(u32),
3322			     GFP_KERNEL);
3323	if (!gbe_dev->hw_stats_prev) {
3324		dev_err(gbe_dev->dev,
3325			"hw_stats_prev memory allocation failed\n");
3326		return -ENOMEM;
3327	}
3328
3329	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3330	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3331
3332	/* K2HK has only 2 hw stats modules visible at a time, so
3333	 * module 0 & 2 points to one base and
3334	 * module 1 & 3 points to the other base
3335	 */
3336	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3337		gbe_dev->hw_stats_regs[i] =
3338			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3339			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3340	}
3341
3342	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3343	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3344	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3345	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3346	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3347	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3348
3349	/* Subsystem registers */
3350	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3351
3352	/* Switch module registers */
3353	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3354	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3355	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3356	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3357	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3358	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3359
3360	/* Host port registers */
3361	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3362	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3363	return 0;
3364}
3365
3366static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3367				struct device_node *node)
3368{
3369	struct resource res;
3370	void __iomem *regs;
3371	int i, ret;
3372
3373	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3374	gbe_dev->et_stats = gbenu_et_stats;
3375
3376	if (IS_SS_ID_NU(gbe_dev))
3377		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3378			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3379	else
3380		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3381					GBENU_ET_STATS_PORT_SIZE;
3382
3383	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3384					 gbe_dev->num_et_stats * sizeof(u64),
3385					 GFP_KERNEL);
3386	if (!gbe_dev->hw_stats) {
3387		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3388		return -ENOMEM;
3389	}
3390
3391	gbe_dev->hw_stats_prev =
3392		devm_kzalloc(gbe_dev->dev,
3393			     gbe_dev->num_et_stats * sizeof(u32),
3394			     GFP_KERNEL);
3395	if (!gbe_dev->hw_stats_prev) {
3396		dev_err(gbe_dev->dev,
3397			"hw_stats_prev memory allocation failed\n");
3398		return -ENOMEM;
3399	}
3400
3401	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3402	if (ret) {
3403		dev_err(gbe_dev->dev,
3404			"Can't translate of gbenu node(%s) addr at index %d\n",
3405			node->name, GBENU_SM_REG_INDEX);
3406		return ret;
3407	}
3408
3409	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3410	if (IS_ERR(regs)) {
3411		dev_err(gbe_dev->dev,
3412			"Failed to map gbenu switch module register base\n");
3413		return PTR_ERR(regs);
3414	}
3415	gbe_dev->switch_regs = regs;
3416
3417	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
 
 
3418
3419	/* Although sgmii modules are mem mapped to one contiguous
3420	 * region on GBENU devices, setting sgmii_port34_regs allows
3421	 * consistent code when accessing sgmii api
3422	 */
3423	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3424				     (2 * GBENU_SGMII_MODULE_SIZE);
3425
3426	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3427
3428	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3429		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3430			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3431
3432	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3433	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3434	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3435	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3436	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3437	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3438
3439	/* Subsystem registers */
3440	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
 
 
3441
3442	/* Switch module registers */
3443	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3444	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3445	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3446	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3447
3448	/* Host port registers */
3449	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3450	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3451
3452	/* For NU only.  2U does not need tx_pri_map.
3453	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3454	 * while 2U has only 1 such thread
3455	 */
3456	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3457	return 0;
3458}
3459
3460static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3461		     struct device_node *node, void **inst_priv)
3462{
3463	struct device_node *interfaces, *interface;
3464	struct device_node *secondary_ports;
3465	struct cpsw_ale_params ale_params;
3466	struct gbe_priv *gbe_dev;
3467	u32 slave_num;
3468	int i, ret = 0;
3469
3470	if (!node) {
3471		dev_err(dev, "device tree info unavailable\n");
3472		return -ENODEV;
3473	}
3474
3475	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3476	if (!gbe_dev)
3477		return -ENOMEM;
3478
3479	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3480	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3481		gbe_dev->max_num_slaves = 4;
3482	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3483		gbe_dev->max_num_slaves = 8;
3484	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3485		gbe_dev->max_num_slaves = 1;
 
3486	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3487		gbe_dev->max_num_slaves = 2;
3488	} else {
3489		dev_err(dev, "device tree node for unknown device\n");
3490		return -EINVAL;
3491	}
3492	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3493
3494	gbe_dev->dev = dev;
3495	gbe_dev->netcp_device = netcp_device;
3496	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3497
3498	/* init the hw stats lock */
3499	spin_lock_init(&gbe_dev->hw_stats_lock);
3500
3501	if (of_find_property(node, "enable-ale", NULL)) {
3502		gbe_dev->enable_ale = true;
3503		dev_info(dev, "ALE enabled\n");
3504	} else {
3505		gbe_dev->enable_ale = false;
3506		dev_dbg(dev, "ALE bypass enabled*\n");
3507	}
3508
3509	ret = of_property_read_u32(node, "tx-queue",
3510				   &gbe_dev->tx_queue_id);
3511	if (ret < 0) {
3512		dev_err(dev, "missing tx_queue parameter\n");
3513		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3514	}
3515
3516	ret = of_property_read_string(node, "tx-channel",
3517				      &gbe_dev->dma_chan_name);
3518	if (ret < 0) {
3519		dev_err(dev, "missing \"tx-channel\" parameter\n");
3520		return -EINVAL;
3521	}
3522
3523	if (!strcmp(node->name, "gbe")) {
3524		ret = get_gbe_resource_version(gbe_dev, node);
3525		if (ret)
3526			return ret;
3527
3528		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3529
3530		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3531			ret = set_gbe_ethss14_priv(gbe_dev, node);
3532		else if (IS_SS_ID_MU(gbe_dev))
3533			ret = set_gbenu_ethss_priv(gbe_dev, node);
3534		else
3535			ret = -ENODEV;
3536
3537	} else if (!strcmp(node->name, "xgbe")) {
3538		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3539		if (ret)
3540			return ret;
3541		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3542					     gbe_dev->ss_regs);
3543	} else {
3544		dev_err(dev, "unknown GBE node(%s)\n", node->name);
3545		ret = -ENODEV;
3546	}
3547
3548	if (ret)
3549		return ret;
3550
3551	interfaces = of_get_child_by_name(node, "interfaces");
3552	if (!interfaces)
3553		dev_err(dev, "could not find interfaces\n");
3554
3555	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3556				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3557	if (ret)
 
3558		return ret;
 
3559
3560	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3561	if (ret)
 
3562		return ret;
 
3563
3564	/* Create network interfaces */
3565	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3566	for_each_child_of_node(interfaces, interface) {
3567		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3568		if (ret) {
3569			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
3570				interface->name);
3571			continue;
3572		}
3573		gbe_dev->num_slaves++;
3574		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3575			of_node_put(interface);
3576			break;
3577		}
3578	}
3579	of_node_put(interfaces);
3580
3581	if (!gbe_dev->num_slaves)
3582		dev_warn(dev, "No network interface configured\n");
3583
3584	/* Initialize Secondary slave ports */
3585	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3586	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3587	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3588		init_secondary_ports(gbe_dev, secondary_ports);
3589	of_node_put(secondary_ports);
3590
3591	if (!gbe_dev->num_slaves) {
3592		dev_err(dev,
3593			"No network interface or secondary ports configured\n");
3594		ret = -ENODEV;
3595		goto free_sec_ports;
3596	}
3597
3598	memset(&ale_params, 0, sizeof(ale_params));
3599	ale_params.dev		= gbe_dev->dev;
3600	ale_params.ale_regs	= gbe_dev->ale_reg;
3601	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3602	ale_params.ale_entries	= gbe_dev->ale_entries;
3603	ale_params.ale_ports	= gbe_dev->ale_ports;
 
 
 
 
 
 
 
3604
3605	gbe_dev->ale = cpsw_ale_create(&ale_params);
3606	if (!gbe_dev->ale) {
3607		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3608		ret = -ENODEV;
3609		goto free_sec_ports;
3610	} else {
3611		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3612	}
3613
3614	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
 
 
 
 
 
 
3615	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3616		ret = PTR_ERR(gbe_dev->cpts);
3617		goto free_sec_ports;
3618	}
3619
3620	/* initialize host port */
3621	gbe_init_host_port(gbe_dev);
3622
3623	spin_lock_bh(&gbe_dev->hw_stats_lock);
3624	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3625		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3626			gbe_reset_mod_stats_ver14(gbe_dev, i);
3627		else
3628			gbe_reset_mod_stats(gbe_dev, i);
3629	}
3630	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3631
3632	init_timer(&gbe_dev->timer);
3633	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
3634	gbe_dev->timer.function = netcp_ethss_timer;
3635	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3636	add_timer(&gbe_dev->timer);
3637	*inst_priv = gbe_dev;
3638	return 0;
3639
3640free_sec_ports:
3641	free_secondary_ports(gbe_dev);
3642	return ret;
3643}
3644
3645static int gbe_attach(void *inst_priv, struct net_device *ndev,
3646		      struct device_node *node, void **intf_priv)
3647{
3648	struct gbe_priv *gbe_dev = inst_priv;
3649	struct gbe_intf *gbe_intf;
3650	int ret;
3651
3652	if (!node) {
3653		dev_err(gbe_dev->dev, "interface node not available\n");
3654		return -ENODEV;
3655	}
3656
3657	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3658	if (!gbe_intf)
3659		return -ENOMEM;
3660
3661	gbe_intf->ndev = ndev;
3662	gbe_intf->dev = gbe_dev->dev;
3663	gbe_intf->gbe_dev = gbe_dev;
3664
3665	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3666					sizeof(*gbe_intf->slave),
3667					GFP_KERNEL);
3668	if (!gbe_intf->slave) {
3669		ret = -ENOMEM;
3670		goto fail;
3671	}
3672
3673	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3674		ret = -ENODEV;
3675		goto fail;
3676	}
3677
3678	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3679	ndev->ethtool_ops = &keystone_ethtool_ops;
3680	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3681	*intf_priv = gbe_intf;
3682	return 0;
3683
3684fail:
3685	if (gbe_intf->slave)
3686		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3687	if (gbe_intf)
3688		devm_kfree(gbe_dev->dev, gbe_intf);
3689	return ret;
3690}
3691
3692static int gbe_release(void *intf_priv)
3693{
3694	struct gbe_intf *gbe_intf = intf_priv;
3695
3696	gbe_intf->ndev->ethtool_ops = NULL;
3697	list_del(&gbe_intf->gbe_intf_list);
3698	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3699	devm_kfree(gbe_intf->dev, gbe_intf);
3700	return 0;
3701}
3702
3703static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3704{
3705	struct gbe_priv *gbe_dev = inst_priv;
3706
3707	del_timer_sync(&gbe_dev->timer);
3708	cpts_release(gbe_dev->cpts);
3709	cpsw_ale_stop(gbe_dev->ale);
3710	cpsw_ale_destroy(gbe_dev->ale);
3711	netcp_txpipe_close(&gbe_dev->tx_pipe);
3712	free_secondary_ports(gbe_dev);
3713
3714	if (!list_empty(&gbe_dev->gbe_intf_head))
3715		dev_alert(gbe_dev->dev,
3716			  "unreleased ethss interfaces present\n");
3717
3718	return 0;
3719}
3720
3721static struct netcp_module gbe_module = {
3722	.name		= GBE_MODULE_NAME,
3723	.owner		= THIS_MODULE,
3724	.primary	= true,
3725	.probe		= gbe_probe,
3726	.open		= gbe_open,
3727	.close		= gbe_close,
3728	.remove		= gbe_remove,
3729	.attach		= gbe_attach,
3730	.release	= gbe_release,
3731	.add_addr	= gbe_add_addr,
3732	.del_addr	= gbe_del_addr,
3733	.add_vid	= gbe_add_vid,
3734	.del_vid	= gbe_del_vid,
3735	.ioctl		= gbe_ioctl,
3736};
3737
3738static struct netcp_module xgbe_module = {
3739	.name		= XGBE_MODULE_NAME,
3740	.owner		= THIS_MODULE,
3741	.primary	= true,
3742	.probe		= gbe_probe,
3743	.open		= gbe_open,
3744	.close		= gbe_close,
3745	.remove		= gbe_remove,
3746	.attach		= gbe_attach,
3747	.release	= gbe_release,
3748	.add_addr	= gbe_add_addr,
3749	.del_addr	= gbe_del_addr,
3750	.add_vid	= gbe_add_vid,
3751	.del_vid	= gbe_del_vid,
3752	.ioctl		= gbe_ioctl,
3753};
3754
3755static int __init keystone_gbe_init(void)
3756{
3757	int ret;
3758
3759	ret = netcp_register_module(&gbe_module);
3760	if (ret)
3761		return ret;
3762
3763	ret = netcp_register_module(&xgbe_module);
3764	if (ret)
3765		return ret;
3766
3767	return 0;
3768}
3769module_init(keystone_gbe_init);
3770
3771static void __exit keystone_gbe_exit(void)
3772{
3773	netcp_unregister_module(&gbe_module);
3774	netcp_unregister_module(&xgbe_module);
3775}
3776module_exit(keystone_gbe_exit);
3777
3778MODULE_LICENSE("GPL v2");
3779MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3780MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");