Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Keystone GBE and XGBE subsystem code
   3 *
   4 * Copyright (C) 2014 Texas Instruments Incorporated
   5 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   6 *		Sandeep Paulraj <s-paulraj@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *		Wingman Kwok <w-kwok2@ti.com>
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License as
  13 * published by the Free Software Foundation version 2.
  14 *
  15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16 * kind, whether express or implied; without even the implied warranty
  17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 */
  20
  21#include <linux/io.h>
  22#include <linux/module.h>
  23#include <linux/of_mdio.h>
  24#include <linux/of_address.h>
  25#include <linux/if_vlan.h>
  26#include <linux/ptp_classify.h>
  27#include <linux/net_tstamp.h>
  28#include <linux/ethtool.h>
  29
  30#include "cpsw.h"
  31#include "cpsw_ale.h"
  32#include "netcp.h"
  33#include "cpts.h"
  34
  35#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
  36#define NETCP_DRIVER_VERSION		"v1.0"
  37
  38#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
  39#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
  40#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
  41#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
  42
  43/* 1G Ethernet SS defines */
  44#define GBE_MODULE_NAME			"netcp-gbe"
  45#define GBE_SS_VERSION_14		0x4ed21104
  46
  47#define GBE_SS_REG_INDEX		0
  48#define GBE_SGMII34_REG_INDEX		1
  49#define GBE_SM_REG_INDEX		2
  50/* offset relative to base of GBE_SS_REG_INDEX */
  51#define GBE13_SGMII_MODULE_OFFSET	0x100
  52/* offset relative to base of GBE_SM_REG_INDEX */
  53#define GBE13_HOST_PORT_OFFSET		0x34
  54#define GBE13_SLAVE_PORT_OFFSET		0x60
  55#define GBE13_EMAC_OFFSET		0x100
  56#define GBE13_SLAVE_PORT2_OFFSET	0x200
  57#define GBE13_HW_STATS_OFFSET		0x300
  58#define GBE13_CPTS_OFFSET		0x500
  59#define GBE13_ALE_OFFSET		0x600
  60#define GBE13_HOST_PORT_NUM		0
  61#define GBE13_NUM_ALE_ENTRIES		1024
  62
  63/* 1G Ethernet NU SS defines */
  64#define GBENU_MODULE_NAME		"netcp-gbenu"
  65#define GBE_SS_ID_NU			0x4ee6
  66#define GBE_SS_ID_2U			0x4ee8
  67
  68#define IS_SS_ID_MU(d) \
  69	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  70	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  71
  72#define IS_SS_ID_NU(d) \
  73	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  74
  75#define GBENU_SS_REG_INDEX		0
  76#define GBENU_SM_REG_INDEX		1
  77#define GBENU_SGMII_MODULE_OFFSET	0x100
  78#define GBENU_HOST_PORT_OFFSET		0x1000
  79#define GBENU_SLAVE_PORT_OFFSET		0x2000
  80#define GBENU_EMAC_OFFSET		0x2330
  81#define GBENU_HW_STATS_OFFSET		0x1a000
  82#define GBENU_CPTS_OFFSET		0x1d000
  83#define GBENU_ALE_OFFSET		0x1e000
  84#define GBENU_HOST_PORT_NUM		0
 
  85#define GBENU_SGMII_MODULE_SIZE		0x100
  86
  87/* 10G Ethernet SS defines */
  88#define XGBE_MODULE_NAME		"netcp-xgbe"
  89#define XGBE_SS_VERSION_10		0x4ee42100
  90
  91#define XGBE_SS_REG_INDEX		0
  92#define XGBE_SM_REG_INDEX		1
  93#define XGBE_SERDES_REG_INDEX		2
  94
  95/* offset relative to base of XGBE_SS_REG_INDEX */
  96#define XGBE10_SGMII_MODULE_OFFSET	0x100
  97#define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
  98/* offset relative to base of XGBE_SM_REG_INDEX */
  99#define XGBE10_HOST_PORT_OFFSET		0x34
 100#define XGBE10_SLAVE_PORT_OFFSET	0x64
 101#define XGBE10_EMAC_OFFSET		0x400
 102#define XGBE10_CPTS_OFFSET		0x600
 103#define XGBE10_ALE_OFFSET		0x700
 104#define XGBE10_HW_STATS_OFFSET		0x800
 105#define XGBE10_HOST_PORT_NUM		0
 106#define XGBE10_NUM_ALE_ENTRIES		2048
 107
 108#define	GBE_TIMER_INTERVAL			(HZ / 2)
 109
 110/* Soft reset register values */
 111#define SOFT_RESET_MASK				BIT(0)
 112#define SOFT_RESET				BIT(0)
 113#define DEVICE_EMACSL_RESET_POLL_COUNT		100
 114#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
 115
 116#define MACSL_RX_ENABLE_CSF			BIT(23)
 117#define MACSL_ENABLE_EXT_CTL			BIT(18)
 118#define MACSL_XGMII_ENABLE			BIT(13)
 119#define MACSL_XGIG_MODE				BIT(8)
 120#define MACSL_GIG_MODE				BIT(7)
 121#define MACSL_GMII_ENABLE			BIT(5)
 122#define MACSL_FULLDUPLEX			BIT(0)
 123
 124#define GBE_CTL_P0_ENABLE			BIT(2)
 125#define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
 126#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
 127#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
 128#define GBE_STATS_CD_SEL			BIT(28)
 129
 130#define GBE_PORT_MASK(x)			(BIT(x) - 1)
 131#define GBE_MASK_NO_PORTS			0
 132
 133#define GBE_DEF_1G_MAC_CONTROL					\
 134		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
 135		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 136
 137#define GBE_DEF_10G_MAC_CONTROL				\
 138		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
 139		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 140
 141#define GBE_STATSA_MODULE			0
 142#define GBE_STATSB_MODULE			1
 143#define GBE_STATSC_MODULE			2
 144#define GBE_STATSD_MODULE			3
 145
 146#define GBENU_STATS0_MODULE			0
 147#define GBENU_STATS1_MODULE			1
 148#define GBENU_STATS2_MODULE			2
 149#define GBENU_STATS3_MODULE			3
 150#define GBENU_STATS4_MODULE			4
 151#define GBENU_STATS5_MODULE			5
 152#define GBENU_STATS6_MODULE			6
 153#define GBENU_STATS7_MODULE			7
 154#define GBENU_STATS8_MODULE			8
 155
 156#define XGBE_STATS0_MODULE			0
 157#define XGBE_STATS1_MODULE			1
 158#define XGBE_STATS2_MODULE			2
 159
 160/* s: 0-based slave_port */
 161#define SGMII_BASE(d, s) \
 162	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
 163
 164#define GBE_TX_QUEUE				648
 165#define	GBE_TXHOOK_ORDER			0
 166#define	GBE_RXHOOK_ORDER			0
 167#define GBE_DEFAULT_ALE_AGEOUT			30
 168#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
 169#define NETCP_LINK_STATE_INVALID		-1
 170
 171#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 172		offsetof(struct gbe##_##rb, rn)
 173#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 174		offsetof(struct gbenu##_##rb, rn)
 175#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 176		offsetof(struct xgbe##_##rb, rn)
 177#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
 178
 179#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
 180
 181#if IS_ENABLED(CONFIG_TI_CPTS)
 182/* Px_TS_CTL register fields */
 183#define TS_RX_ANX_F_EN				BIT(0)
 184#define TS_RX_VLAN_LT1_EN			BIT(1)
 185#define TS_RX_VLAN_LT2_EN			BIT(2)
 186#define TS_RX_ANX_D_EN				BIT(3)
 187#define TS_TX_ANX_F_EN				BIT(4)
 188#define TS_TX_VLAN_LT1_EN			BIT(5)
 189#define TS_TX_VLAN_LT2_EN			BIT(6)
 190#define TS_TX_ANX_D_EN				BIT(7)
 191#define TS_LT2_EN				BIT(8)
 192#define TS_RX_ANX_E_EN				BIT(9)
 193#define TS_TX_ANX_E_EN				BIT(10)
 194#define TS_MSG_TYPE_EN_SHIFT			16
 195#define TS_MSG_TYPE_EN_MASK			0xffff
 196
 197/* Px_TS_SEQ_LTYPE register fields */
 198#define TS_SEQ_ID_OFS_SHIFT			16
 199#define TS_SEQ_ID_OFS_MASK			0x3f
 200
 201/* Px_TS_CTL_LTYPE2 register fields */
 202#define TS_107					BIT(16)
 203#define TS_129					BIT(17)
 204#define TS_130					BIT(18)
 205#define TS_131					BIT(19)
 206#define TS_132					BIT(20)
 207#define TS_319					BIT(21)
 208#define TS_320					BIT(22)
 209#define TS_TTL_NONZERO				BIT(23)
 210#define TS_UNI_EN				BIT(24)
 211#define TS_UNI_EN_SHIFT				24
 212
 213#define TS_TX_ANX_ALL_EN	 \
 214	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
 215
 216#define TS_RX_ANX_ALL_EN	 \
 217	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
 218
 219#define TS_CTL_DST_PORT				TS_319
 220#define TS_CTL_DST_PORT_SHIFT			21
 221
 222#define TS_CTL_MADDR_ALL	\
 223	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
 224
 225#define TS_CTL_MADDR_SHIFT			16
 226
 227/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
 228#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 229#endif /* CONFIG_TI_CPTS */
 230
 231struct xgbe_ss_regs {
 232	u32	id_ver;
 233	u32	synce_count;
 234	u32	synce_mux;
 235	u32	control;
 236};
 237
 238struct xgbe_switch_regs {
 239	u32	id_ver;
 240	u32	control;
 241	u32	emcontrol;
 242	u32	stat_port_en;
 243	u32	ptype;
 244	u32	soft_idle;
 245	u32	thru_rate;
 246	u32	gap_thresh;
 247	u32	tx_start_wds;
 248	u32	flow_control;
 249	u32	cppi_thresh;
 250};
 251
 252struct xgbe_port_regs {
 253	u32	blk_cnt;
 254	u32	port_vlan;
 255	u32	tx_pri_map;
 256	u32	sa_lo;
 257	u32	sa_hi;
 258	u32	ts_ctl;
 259	u32	ts_seq_ltype;
 260	u32	ts_vlan;
 261	u32	ts_ctl_ltype2;
 262	u32	ts_ctl2;
 263	u32	control;
 264};
 265
 266struct xgbe_host_port_regs {
 267	u32	blk_cnt;
 268	u32	port_vlan;
 269	u32	tx_pri_map;
 270	u32	src_id;
 271	u32	rx_pri_map;
 272	u32	rx_maxlen;
 273};
 274
 275struct xgbe_emac_regs {
 276	u32	id_ver;
 277	u32	mac_control;
 278	u32	mac_status;
 279	u32	soft_reset;
 280	u32	rx_maxlen;
 281	u32	__reserved_0;
 282	u32	rx_pause;
 283	u32	tx_pause;
 284	u32	em_control;
 285	u32	__reserved_1;
 286	u32	tx_gap;
 287	u32	rsvd[4];
 288};
 289
 290struct xgbe_host_hw_stats {
 291	u32	rx_good_frames;
 292	u32	rx_broadcast_frames;
 293	u32	rx_multicast_frames;
 294	u32	__rsvd_0[3];
 295	u32	rx_oversized_frames;
 296	u32	__rsvd_1;
 297	u32	rx_undersized_frames;
 298	u32	__rsvd_2;
 299	u32	overrun_type4;
 300	u32	overrun_type5;
 301	u32	rx_bytes;
 302	u32	tx_good_frames;
 303	u32	tx_broadcast_frames;
 304	u32	tx_multicast_frames;
 305	u32	__rsvd_3[9];
 306	u32	tx_bytes;
 307	u32	tx_64byte_frames;
 308	u32	tx_65_to_127byte_frames;
 309	u32	tx_128_to_255byte_frames;
 310	u32	tx_256_to_511byte_frames;
 311	u32	tx_512_to_1023byte_frames;
 312	u32	tx_1024byte_frames;
 313	u32	net_bytes;
 314	u32	rx_sof_overruns;
 315	u32	rx_mof_overruns;
 316	u32	rx_dma_overruns;
 317};
 318
 319struct xgbe_hw_stats {
 320	u32	rx_good_frames;
 321	u32	rx_broadcast_frames;
 322	u32	rx_multicast_frames;
 323	u32	rx_pause_frames;
 324	u32	rx_crc_errors;
 325	u32	rx_align_code_errors;
 326	u32	rx_oversized_frames;
 327	u32	rx_jabber_frames;
 328	u32	rx_undersized_frames;
 329	u32	rx_fragments;
 330	u32	overrun_type4;
 331	u32	overrun_type5;
 332	u32	rx_bytes;
 333	u32	tx_good_frames;
 334	u32	tx_broadcast_frames;
 335	u32	tx_multicast_frames;
 336	u32	tx_pause_frames;
 337	u32	tx_deferred_frames;
 338	u32	tx_collision_frames;
 339	u32	tx_single_coll_frames;
 340	u32	tx_mult_coll_frames;
 341	u32	tx_excessive_collisions;
 342	u32	tx_late_collisions;
 343	u32	tx_underrun;
 344	u32	tx_carrier_sense_errors;
 345	u32	tx_bytes;
 346	u32	tx_64byte_frames;
 347	u32	tx_65_to_127byte_frames;
 348	u32	tx_128_to_255byte_frames;
 349	u32	tx_256_to_511byte_frames;
 350	u32	tx_512_to_1023byte_frames;
 351	u32	tx_1024byte_frames;
 352	u32	net_bytes;
 353	u32	rx_sof_overruns;
 354	u32	rx_mof_overruns;
 355	u32	rx_dma_overruns;
 356};
 357
 358struct gbenu_ss_regs {
 359	u32	id_ver;
 360	u32	synce_count;		/* NU */
 361	u32	synce_mux;		/* NU */
 362	u32	control;		/* 2U */
 363	u32	__rsvd_0[2];		/* 2U */
 364	u32	rgmii_status;		/* 2U */
 365	u32	ss_status;		/* 2U */
 366};
 367
 368struct gbenu_switch_regs {
 369	u32	id_ver;
 370	u32	control;
 371	u32	__rsvd_0[2];
 372	u32	emcontrol;
 373	u32	stat_port_en;
 374	u32	ptype;			/* NU */
 375	u32	soft_idle;
 376	u32	thru_rate;		/* NU */
 377	u32	gap_thresh;		/* NU */
 378	u32	tx_start_wds;		/* NU */
 379	u32	eee_prescale;		/* 2U */
 380	u32	tx_g_oflow_thresh_set;	/* NU */
 381	u32	tx_g_oflow_thresh_clr;	/* NU */
 382	u32	tx_g_buf_thresh_set_l;	/* NU */
 383	u32	tx_g_buf_thresh_set_h;	/* NU */
 384	u32	tx_g_buf_thresh_clr_l;	/* NU */
 385	u32	tx_g_buf_thresh_clr_h;	/* NU */
 386};
 387
 388struct gbenu_port_regs {
 389	u32	__rsvd_0;
 390	u32	control;
 391	u32	max_blks;		/* 2U */
 392	u32	mem_align1;
 393	u32	blk_cnt;
 394	u32	port_vlan;
 395	u32	tx_pri_map;		/* NU */
 396	u32	pri_ctl;		/* 2U */
 397	u32	rx_pri_map;
 398	u32	rx_maxlen;
 399	u32	tx_blks_pri;		/* NU */
 400	u32	__rsvd_1;
 401	u32	idle2lpi;		/* 2U */
 402	u32	lpi2idle;		/* 2U */
 403	u32	eee_status;		/* 2U */
 404	u32	__rsvd_2;
 405	u32	__rsvd_3[176];		/* NU: more to add */
 406	u32	__rsvd_4[2];
 407	u32	sa_lo;
 408	u32	sa_hi;
 409	u32	ts_ctl;
 410	u32	ts_seq_ltype;
 411	u32	ts_vlan;
 412	u32	ts_ctl_ltype2;
 413	u32	ts_ctl2;
 414};
 415
 416struct gbenu_host_port_regs {
 417	u32	__rsvd_0;
 418	u32	control;
 419	u32	flow_id_offset;		/* 2U */
 420	u32	__rsvd_1;
 421	u32	blk_cnt;
 422	u32	port_vlan;
 423	u32	tx_pri_map;		/* NU */
 424	u32	pri_ctl;
 425	u32	rx_pri_map;
 426	u32	rx_maxlen;
 427	u32	tx_blks_pri;		/* NU */
 428	u32	__rsvd_2;
 429	u32	idle2lpi;		/* 2U */
 430	u32	lpi2wake;		/* 2U */
 431	u32	eee_status;		/* 2U */
 432	u32	__rsvd_3;
 433	u32	__rsvd_4[184];		/* NU */
 434	u32	host_blks_pri;		/* NU */
 435};
 436
 437struct gbenu_emac_regs {
 438	u32	mac_control;
 439	u32	mac_status;
 440	u32	soft_reset;
 441	u32	boff_test;
 442	u32	rx_pause;
 443	u32	__rsvd_0[11];		/* NU */
 444	u32	tx_pause;
 445	u32	__rsvd_1[11];		/* NU */
 446	u32	em_control;
 447	u32	tx_gap;
 448};
 449
 450/* Some hw stat regs are applicable to slave port only.
 451 * This is handled by gbenu_et_stats struct.  Also some
 452 * are for SS version NU and some are for 2U.
 453 */
 454struct gbenu_hw_stats {
 455	u32	rx_good_frames;
 456	u32	rx_broadcast_frames;
 457	u32	rx_multicast_frames;
 458	u32	rx_pause_frames;		/* slave */
 459	u32	rx_crc_errors;
 460	u32	rx_align_code_errors;		/* slave */
 461	u32	rx_oversized_frames;
 462	u32	rx_jabber_frames;		/* slave */
 463	u32	rx_undersized_frames;
 464	u32	rx_fragments;			/* slave */
 465	u32	ale_drop;
 466	u32	ale_overrun_drop;
 467	u32	rx_bytes;
 468	u32	tx_good_frames;
 469	u32	tx_broadcast_frames;
 470	u32	tx_multicast_frames;
 471	u32	tx_pause_frames;		/* slave */
 472	u32	tx_deferred_frames;		/* slave */
 473	u32	tx_collision_frames;		/* slave */
 474	u32	tx_single_coll_frames;		/* slave */
 475	u32	tx_mult_coll_frames;		/* slave */
 476	u32	tx_excessive_collisions;	/* slave */
 477	u32	tx_late_collisions;		/* slave */
 478	u32	rx_ipg_error;			/* slave 10G only */
 479	u32	tx_carrier_sense_errors;	/* slave */
 480	u32	tx_bytes;
 481	u32	tx_64B_frames;
 482	u32	tx_65_to_127B_frames;
 483	u32	tx_128_to_255B_frames;
 484	u32	tx_256_to_511B_frames;
 485	u32	tx_512_to_1023B_frames;
 486	u32	tx_1024B_frames;
 487	u32	net_bytes;
 488	u32	rx_bottom_fifo_drop;
 489	u32	rx_port_mask_drop;
 490	u32	rx_top_fifo_drop;
 491	u32	ale_rate_limit_drop;
 492	u32	ale_vid_ingress_drop;
 493	u32	ale_da_eq_sa_drop;
 494	u32	__rsvd_0[3];
 495	u32	ale_unknown_ucast;
 496	u32	ale_unknown_ucast_bytes;
 497	u32	ale_unknown_mcast;
 498	u32	ale_unknown_mcast_bytes;
 499	u32	ale_unknown_bcast;
 500	u32	ale_unknown_bcast_bytes;
 501	u32	ale_pol_match;
 502	u32	ale_pol_match_red;		/* NU */
 503	u32	ale_pol_match_yellow;		/* NU */
 504	u32	__rsvd_1[44];
 505	u32	tx_mem_protect_err;
 506	/* following NU only */
 507	u32	tx_pri0;
 508	u32	tx_pri1;
 509	u32	tx_pri2;
 510	u32	tx_pri3;
 511	u32	tx_pri4;
 512	u32	tx_pri5;
 513	u32	tx_pri6;
 514	u32	tx_pri7;
 515	u32	tx_pri0_bcnt;
 516	u32	tx_pri1_bcnt;
 517	u32	tx_pri2_bcnt;
 518	u32	tx_pri3_bcnt;
 519	u32	tx_pri4_bcnt;
 520	u32	tx_pri5_bcnt;
 521	u32	tx_pri6_bcnt;
 522	u32	tx_pri7_bcnt;
 523	u32	tx_pri0_drop;
 524	u32	tx_pri1_drop;
 525	u32	tx_pri2_drop;
 526	u32	tx_pri3_drop;
 527	u32	tx_pri4_drop;
 528	u32	tx_pri5_drop;
 529	u32	tx_pri6_drop;
 530	u32	tx_pri7_drop;
 531	u32	tx_pri0_drop_bcnt;
 532	u32	tx_pri1_drop_bcnt;
 533	u32	tx_pri2_drop_bcnt;
 534	u32	tx_pri3_drop_bcnt;
 535	u32	tx_pri4_drop_bcnt;
 536	u32	tx_pri5_drop_bcnt;
 537	u32	tx_pri6_drop_bcnt;
 538	u32	tx_pri7_drop_bcnt;
 539};
 540
 541#define GBENU_HW_STATS_REG_MAP_SZ	0x200
 542
 543struct gbe_ss_regs {
 544	u32	id_ver;
 545	u32	synce_count;
 546	u32	synce_mux;
 547};
 548
 549struct gbe_ss_regs_ofs {
 550	u16	id_ver;
 551	u16	control;
 552};
 553
 554struct gbe_switch_regs {
 555	u32	id_ver;
 556	u32	control;
 557	u32	soft_reset;
 558	u32	stat_port_en;
 559	u32	ptype;
 560	u32	soft_idle;
 561	u32	thru_rate;
 562	u32	gap_thresh;
 563	u32	tx_start_wds;
 564	u32	flow_control;
 565};
 566
 567struct gbe_switch_regs_ofs {
 568	u16	id_ver;
 569	u16	control;
 570	u16	soft_reset;
 571	u16	emcontrol;
 572	u16	stat_port_en;
 573	u16	ptype;
 574	u16	flow_control;
 575};
 576
 577struct gbe_port_regs {
 578	u32	max_blks;
 579	u32	blk_cnt;
 580	u32	port_vlan;
 581	u32	tx_pri_map;
 582	u32	sa_lo;
 583	u32	sa_hi;
 584	u32	ts_ctl;
 585	u32	ts_seq_ltype;
 586	u32	ts_vlan;
 587	u32	ts_ctl_ltype2;
 588	u32	ts_ctl2;
 589};
 590
 591struct gbe_port_regs_ofs {
 592	u16	port_vlan;
 593	u16	tx_pri_map;
 594	u16	sa_lo;
 595	u16	sa_hi;
 596	u16	ts_ctl;
 597	u16	ts_seq_ltype;
 598	u16	ts_vlan;
 599	u16	ts_ctl_ltype2;
 600	u16	ts_ctl2;
 601	u16	rx_maxlen;	/* 2U, NU */
 602};
 603
 604struct gbe_host_port_regs {
 605	u32	src_id;
 606	u32	port_vlan;
 607	u32	rx_pri_map;
 608	u32	rx_maxlen;
 609};
 610
 611struct gbe_host_port_regs_ofs {
 612	u16	port_vlan;
 613	u16	tx_pri_map;
 614	u16	rx_maxlen;
 615};
 616
 617struct gbe_emac_regs {
 618	u32	id_ver;
 619	u32	mac_control;
 620	u32	mac_status;
 621	u32	soft_reset;
 622	u32	rx_maxlen;
 623	u32	__reserved_0;
 624	u32	rx_pause;
 625	u32	tx_pause;
 626	u32	__reserved_1;
 627	u32	rx_pri_map;
 628	u32	rsvd[6];
 629};
 630
 631struct gbe_emac_regs_ofs {
 632	u16	mac_control;
 633	u16	soft_reset;
 634	u16	rx_maxlen;
 635};
 636
 637struct gbe_hw_stats {
 638	u32	rx_good_frames;
 639	u32	rx_broadcast_frames;
 640	u32	rx_multicast_frames;
 641	u32	rx_pause_frames;
 642	u32	rx_crc_errors;
 643	u32	rx_align_code_errors;
 644	u32	rx_oversized_frames;
 645	u32	rx_jabber_frames;
 646	u32	rx_undersized_frames;
 647	u32	rx_fragments;
 648	u32	__pad_0[2];
 649	u32	rx_bytes;
 650	u32	tx_good_frames;
 651	u32	tx_broadcast_frames;
 652	u32	tx_multicast_frames;
 653	u32	tx_pause_frames;
 654	u32	tx_deferred_frames;
 655	u32	tx_collision_frames;
 656	u32	tx_single_coll_frames;
 657	u32	tx_mult_coll_frames;
 658	u32	tx_excessive_collisions;
 659	u32	tx_late_collisions;
 660	u32	tx_underrun;
 661	u32	tx_carrier_sense_errors;
 662	u32	tx_bytes;
 663	u32	tx_64byte_frames;
 664	u32	tx_65_to_127byte_frames;
 665	u32	tx_128_to_255byte_frames;
 666	u32	tx_256_to_511byte_frames;
 667	u32	tx_512_to_1023byte_frames;
 668	u32	tx_1024byte_frames;
 669	u32	net_bytes;
 670	u32	rx_sof_overruns;
 671	u32	rx_mof_overruns;
 672	u32	rx_dma_overruns;
 673};
 674
 675#define GBE_MAX_HW_STAT_MODS			9
 676#define GBE_HW_STATS_REG_MAP_SZ			0x100
 677
 678struct ts_ctl {
 679	int     uni;
 680	u8      dst_port_map;
 681	u8      maddr_map;
 682	u8      ts_mcast_type;
 683};
 684
 685struct gbe_slave {
 686	void __iomem			*port_regs;
 687	void __iomem			*emac_regs;
 688	struct gbe_port_regs_ofs	port_regs_ofs;
 689	struct gbe_emac_regs_ofs	emac_regs_ofs;
 690	int				slave_num; /* 0 based logical number */
 691	int				port_num;  /* actual port number */
 692	atomic_t			link_state;
 693	bool				open;
 694	struct phy_device		*phy;
 695	u32				link_interface;
 696	u32				mac_control;
 697	u8				phy_port_t;
 698	struct device_node		*phy_node;
 699	struct ts_ctl                   ts_ctl;
 700	struct list_head		slave_list;
 701};
 702
 703struct gbe_priv {
 704	struct device			*dev;
 705	struct netcp_device		*netcp_device;
 706	struct timer_list		timer;
 707	u32				num_slaves;
 708	u32				ale_entries;
 709	u32				ale_ports;
 710	bool				enable_ale;
 711	u8				max_num_slaves;
 712	u8				max_num_ports; /* max_num_slaves + 1 */
 713	u8				num_stats_mods;
 714	struct netcp_tx_pipe		tx_pipe;
 715
 716	int				host_port;
 717	u32				rx_packet_max;
 718	u32				ss_version;
 719	u32				stats_en_mask;
 720
 721	void __iomem			*ss_regs;
 722	void __iomem			*switch_regs;
 723	void __iomem			*host_port_regs;
 724	void __iomem			*ale_reg;
 725	void __iomem                    *cpts_reg;
 726	void __iomem			*sgmii_port_regs;
 727	void __iomem			*sgmii_port34_regs;
 728	void __iomem			*xgbe_serdes_regs;
 729	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
 730
 731	struct gbe_ss_regs_ofs		ss_regs_ofs;
 732	struct gbe_switch_regs_ofs	switch_regs_ofs;
 733	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
 734
 735	struct cpsw_ale			*ale;
 736	unsigned int			tx_queue_id;
 737	const char			*dma_chan_name;
 738
 739	struct list_head		gbe_intf_head;
 740	struct list_head		secondary_slaves;
 741	struct net_device		*dummy_ndev;
 742
 743	u64				*hw_stats;
 744	u32				*hw_stats_prev;
 745	const struct netcp_ethtool_stat *et_stats;
 746	int				num_et_stats;
 747	/*  Lock for updating the hwstats */
 748	spinlock_t			hw_stats_lock;
 749
 750	int                             cpts_registered;
 751	struct cpts                     *cpts;
 752};
 753
 754struct gbe_intf {
 755	struct net_device	*ndev;
 756	struct device		*dev;
 757	struct gbe_priv		*gbe_dev;
 758	struct netcp_tx_pipe	tx_pipe;
 759	struct gbe_slave	*slave;
 760	struct list_head	gbe_intf_list;
 761	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 762};
 763
 764static struct netcp_module gbe_module;
 765static struct netcp_module xgbe_module;
 766
 767/* Statistic management */
 768struct netcp_ethtool_stat {
 769	char desc[ETH_GSTRING_LEN];
 770	int type;
 771	u32 size;
 772	int offset;
 773};
 774
 775#define GBE_STATSA_INFO(field)						\
 776{									\
 777	"GBE_A:"#field, GBE_STATSA_MODULE,				\
 778	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 779	offsetof(struct gbe_hw_stats, field)				\
 780}
 781
 782#define GBE_STATSB_INFO(field)						\
 783{									\
 784	"GBE_B:"#field, GBE_STATSB_MODULE,				\
 785	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 786	offsetof(struct gbe_hw_stats, field)				\
 787}
 788
 789#define GBE_STATSC_INFO(field)						\
 790{									\
 791	"GBE_C:"#field, GBE_STATSC_MODULE,				\
 792	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 793	offsetof(struct gbe_hw_stats, field)				\
 794}
 795
 796#define GBE_STATSD_INFO(field)						\
 797{									\
 798	"GBE_D:"#field, GBE_STATSD_MODULE,				\
 799	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 800	offsetof(struct gbe_hw_stats, field)				\
 801}
 802
 803static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 804	/* GBE module A */
 805	GBE_STATSA_INFO(rx_good_frames),
 806	GBE_STATSA_INFO(rx_broadcast_frames),
 807	GBE_STATSA_INFO(rx_multicast_frames),
 808	GBE_STATSA_INFO(rx_pause_frames),
 809	GBE_STATSA_INFO(rx_crc_errors),
 810	GBE_STATSA_INFO(rx_align_code_errors),
 811	GBE_STATSA_INFO(rx_oversized_frames),
 812	GBE_STATSA_INFO(rx_jabber_frames),
 813	GBE_STATSA_INFO(rx_undersized_frames),
 814	GBE_STATSA_INFO(rx_fragments),
 815	GBE_STATSA_INFO(rx_bytes),
 816	GBE_STATSA_INFO(tx_good_frames),
 817	GBE_STATSA_INFO(tx_broadcast_frames),
 818	GBE_STATSA_INFO(tx_multicast_frames),
 819	GBE_STATSA_INFO(tx_pause_frames),
 820	GBE_STATSA_INFO(tx_deferred_frames),
 821	GBE_STATSA_INFO(tx_collision_frames),
 822	GBE_STATSA_INFO(tx_single_coll_frames),
 823	GBE_STATSA_INFO(tx_mult_coll_frames),
 824	GBE_STATSA_INFO(tx_excessive_collisions),
 825	GBE_STATSA_INFO(tx_late_collisions),
 826	GBE_STATSA_INFO(tx_underrun),
 827	GBE_STATSA_INFO(tx_carrier_sense_errors),
 828	GBE_STATSA_INFO(tx_bytes),
 829	GBE_STATSA_INFO(tx_64byte_frames),
 830	GBE_STATSA_INFO(tx_65_to_127byte_frames),
 831	GBE_STATSA_INFO(tx_128_to_255byte_frames),
 832	GBE_STATSA_INFO(tx_256_to_511byte_frames),
 833	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
 834	GBE_STATSA_INFO(tx_1024byte_frames),
 835	GBE_STATSA_INFO(net_bytes),
 836	GBE_STATSA_INFO(rx_sof_overruns),
 837	GBE_STATSA_INFO(rx_mof_overruns),
 838	GBE_STATSA_INFO(rx_dma_overruns),
 839	/* GBE module B */
 840	GBE_STATSB_INFO(rx_good_frames),
 841	GBE_STATSB_INFO(rx_broadcast_frames),
 842	GBE_STATSB_INFO(rx_multicast_frames),
 843	GBE_STATSB_INFO(rx_pause_frames),
 844	GBE_STATSB_INFO(rx_crc_errors),
 845	GBE_STATSB_INFO(rx_align_code_errors),
 846	GBE_STATSB_INFO(rx_oversized_frames),
 847	GBE_STATSB_INFO(rx_jabber_frames),
 848	GBE_STATSB_INFO(rx_undersized_frames),
 849	GBE_STATSB_INFO(rx_fragments),
 850	GBE_STATSB_INFO(rx_bytes),
 851	GBE_STATSB_INFO(tx_good_frames),
 852	GBE_STATSB_INFO(tx_broadcast_frames),
 853	GBE_STATSB_INFO(tx_multicast_frames),
 854	GBE_STATSB_INFO(tx_pause_frames),
 855	GBE_STATSB_INFO(tx_deferred_frames),
 856	GBE_STATSB_INFO(tx_collision_frames),
 857	GBE_STATSB_INFO(tx_single_coll_frames),
 858	GBE_STATSB_INFO(tx_mult_coll_frames),
 859	GBE_STATSB_INFO(tx_excessive_collisions),
 860	GBE_STATSB_INFO(tx_late_collisions),
 861	GBE_STATSB_INFO(tx_underrun),
 862	GBE_STATSB_INFO(tx_carrier_sense_errors),
 863	GBE_STATSB_INFO(tx_bytes),
 864	GBE_STATSB_INFO(tx_64byte_frames),
 865	GBE_STATSB_INFO(tx_65_to_127byte_frames),
 866	GBE_STATSB_INFO(tx_128_to_255byte_frames),
 867	GBE_STATSB_INFO(tx_256_to_511byte_frames),
 868	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
 869	GBE_STATSB_INFO(tx_1024byte_frames),
 870	GBE_STATSB_INFO(net_bytes),
 871	GBE_STATSB_INFO(rx_sof_overruns),
 872	GBE_STATSB_INFO(rx_mof_overruns),
 873	GBE_STATSB_INFO(rx_dma_overruns),
 874	/* GBE module C */
 875	GBE_STATSC_INFO(rx_good_frames),
 876	GBE_STATSC_INFO(rx_broadcast_frames),
 877	GBE_STATSC_INFO(rx_multicast_frames),
 878	GBE_STATSC_INFO(rx_pause_frames),
 879	GBE_STATSC_INFO(rx_crc_errors),
 880	GBE_STATSC_INFO(rx_align_code_errors),
 881	GBE_STATSC_INFO(rx_oversized_frames),
 882	GBE_STATSC_INFO(rx_jabber_frames),
 883	GBE_STATSC_INFO(rx_undersized_frames),
 884	GBE_STATSC_INFO(rx_fragments),
 885	GBE_STATSC_INFO(rx_bytes),
 886	GBE_STATSC_INFO(tx_good_frames),
 887	GBE_STATSC_INFO(tx_broadcast_frames),
 888	GBE_STATSC_INFO(tx_multicast_frames),
 889	GBE_STATSC_INFO(tx_pause_frames),
 890	GBE_STATSC_INFO(tx_deferred_frames),
 891	GBE_STATSC_INFO(tx_collision_frames),
 892	GBE_STATSC_INFO(tx_single_coll_frames),
 893	GBE_STATSC_INFO(tx_mult_coll_frames),
 894	GBE_STATSC_INFO(tx_excessive_collisions),
 895	GBE_STATSC_INFO(tx_late_collisions),
 896	GBE_STATSC_INFO(tx_underrun),
 897	GBE_STATSC_INFO(tx_carrier_sense_errors),
 898	GBE_STATSC_INFO(tx_bytes),
 899	GBE_STATSC_INFO(tx_64byte_frames),
 900	GBE_STATSC_INFO(tx_65_to_127byte_frames),
 901	GBE_STATSC_INFO(tx_128_to_255byte_frames),
 902	GBE_STATSC_INFO(tx_256_to_511byte_frames),
 903	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
 904	GBE_STATSC_INFO(tx_1024byte_frames),
 905	GBE_STATSC_INFO(net_bytes),
 906	GBE_STATSC_INFO(rx_sof_overruns),
 907	GBE_STATSC_INFO(rx_mof_overruns),
 908	GBE_STATSC_INFO(rx_dma_overruns),
 909	/* GBE module D */
 910	GBE_STATSD_INFO(rx_good_frames),
 911	GBE_STATSD_INFO(rx_broadcast_frames),
 912	GBE_STATSD_INFO(rx_multicast_frames),
 913	GBE_STATSD_INFO(rx_pause_frames),
 914	GBE_STATSD_INFO(rx_crc_errors),
 915	GBE_STATSD_INFO(rx_align_code_errors),
 916	GBE_STATSD_INFO(rx_oversized_frames),
 917	GBE_STATSD_INFO(rx_jabber_frames),
 918	GBE_STATSD_INFO(rx_undersized_frames),
 919	GBE_STATSD_INFO(rx_fragments),
 920	GBE_STATSD_INFO(rx_bytes),
 921	GBE_STATSD_INFO(tx_good_frames),
 922	GBE_STATSD_INFO(tx_broadcast_frames),
 923	GBE_STATSD_INFO(tx_multicast_frames),
 924	GBE_STATSD_INFO(tx_pause_frames),
 925	GBE_STATSD_INFO(tx_deferred_frames),
 926	GBE_STATSD_INFO(tx_collision_frames),
 927	GBE_STATSD_INFO(tx_single_coll_frames),
 928	GBE_STATSD_INFO(tx_mult_coll_frames),
 929	GBE_STATSD_INFO(tx_excessive_collisions),
 930	GBE_STATSD_INFO(tx_late_collisions),
 931	GBE_STATSD_INFO(tx_underrun),
 932	GBE_STATSD_INFO(tx_carrier_sense_errors),
 933	GBE_STATSD_INFO(tx_bytes),
 934	GBE_STATSD_INFO(tx_64byte_frames),
 935	GBE_STATSD_INFO(tx_65_to_127byte_frames),
 936	GBE_STATSD_INFO(tx_128_to_255byte_frames),
 937	GBE_STATSD_INFO(tx_256_to_511byte_frames),
 938	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
 939	GBE_STATSD_INFO(tx_1024byte_frames),
 940	GBE_STATSD_INFO(net_bytes),
 941	GBE_STATSD_INFO(rx_sof_overruns),
 942	GBE_STATSD_INFO(rx_mof_overruns),
 943	GBE_STATSD_INFO(rx_dma_overruns),
 944};
 945
 946/* This is the size of entries in GBENU_STATS_HOST */
 947#define GBENU_ET_STATS_HOST_SIZE	52
 948
 949#define GBENU_STATS_HOST(field)					\
 950{								\
 951	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
 952	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 953	offsetof(struct gbenu_hw_stats, field)			\
 954}
 955
 956/* This is the size of entries in GBENU_STATS_PORT */
 957#define GBENU_ET_STATS_PORT_SIZE	65
 958
 959#define GBENU_STATS_P1(field)					\
 960{								\
 961	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
 962	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 963	offsetof(struct gbenu_hw_stats, field)			\
 964}
 965
 966#define GBENU_STATS_P2(field)					\
 967{								\
 968	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
 969	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 970	offsetof(struct gbenu_hw_stats, field)			\
 971}
 972
 973#define GBENU_STATS_P3(field)					\
 974{								\
 975	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
 976	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 977	offsetof(struct gbenu_hw_stats, field)			\
 978}
 979
 980#define GBENU_STATS_P4(field)					\
 981{								\
 982	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
 983	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 984	offsetof(struct gbenu_hw_stats, field)			\
 985}
 986
 987#define GBENU_STATS_P5(field)					\
 988{								\
 989	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
 990	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 991	offsetof(struct gbenu_hw_stats, field)			\
 992}
 993
 994#define GBENU_STATS_P6(field)					\
 995{								\
 996	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
 997	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 998	offsetof(struct gbenu_hw_stats, field)			\
 999}
1000
1001#define GBENU_STATS_P7(field)					\
1002{								\
1003	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1004	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1005	offsetof(struct gbenu_hw_stats, field)			\
1006}
1007
1008#define GBENU_STATS_P8(field)					\
1009{								\
1010	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1011	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1012	offsetof(struct gbenu_hw_stats, field)			\
1013}
1014
1015static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1016	/* GBENU Host Module */
1017	GBENU_STATS_HOST(rx_good_frames),
1018	GBENU_STATS_HOST(rx_broadcast_frames),
1019	GBENU_STATS_HOST(rx_multicast_frames),
1020	GBENU_STATS_HOST(rx_crc_errors),
1021	GBENU_STATS_HOST(rx_oversized_frames),
1022	GBENU_STATS_HOST(rx_undersized_frames),
1023	GBENU_STATS_HOST(ale_drop),
1024	GBENU_STATS_HOST(ale_overrun_drop),
1025	GBENU_STATS_HOST(rx_bytes),
1026	GBENU_STATS_HOST(tx_good_frames),
1027	GBENU_STATS_HOST(tx_broadcast_frames),
1028	GBENU_STATS_HOST(tx_multicast_frames),
1029	GBENU_STATS_HOST(tx_bytes),
1030	GBENU_STATS_HOST(tx_64B_frames),
1031	GBENU_STATS_HOST(tx_65_to_127B_frames),
1032	GBENU_STATS_HOST(tx_128_to_255B_frames),
1033	GBENU_STATS_HOST(tx_256_to_511B_frames),
1034	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1035	GBENU_STATS_HOST(tx_1024B_frames),
1036	GBENU_STATS_HOST(net_bytes),
1037	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1038	GBENU_STATS_HOST(rx_port_mask_drop),
1039	GBENU_STATS_HOST(rx_top_fifo_drop),
1040	GBENU_STATS_HOST(ale_rate_limit_drop),
1041	GBENU_STATS_HOST(ale_vid_ingress_drop),
1042	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1043	GBENU_STATS_HOST(ale_unknown_ucast),
1044	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1045	GBENU_STATS_HOST(ale_unknown_mcast),
1046	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1047	GBENU_STATS_HOST(ale_unknown_bcast),
1048	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1049	GBENU_STATS_HOST(ale_pol_match),
1050	GBENU_STATS_HOST(ale_pol_match_red),
1051	GBENU_STATS_HOST(ale_pol_match_yellow),
1052	GBENU_STATS_HOST(tx_mem_protect_err),
1053	GBENU_STATS_HOST(tx_pri0_drop),
1054	GBENU_STATS_HOST(tx_pri1_drop),
1055	GBENU_STATS_HOST(tx_pri2_drop),
1056	GBENU_STATS_HOST(tx_pri3_drop),
1057	GBENU_STATS_HOST(tx_pri4_drop),
1058	GBENU_STATS_HOST(tx_pri5_drop),
1059	GBENU_STATS_HOST(tx_pri6_drop),
1060	GBENU_STATS_HOST(tx_pri7_drop),
1061	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1062	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1063	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1064	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1065	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1066	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1067	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1068	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1069	/* GBENU Module 1 */
1070	GBENU_STATS_P1(rx_good_frames),
1071	GBENU_STATS_P1(rx_broadcast_frames),
1072	GBENU_STATS_P1(rx_multicast_frames),
1073	GBENU_STATS_P1(rx_pause_frames),
1074	GBENU_STATS_P1(rx_crc_errors),
1075	GBENU_STATS_P1(rx_align_code_errors),
1076	GBENU_STATS_P1(rx_oversized_frames),
1077	GBENU_STATS_P1(rx_jabber_frames),
1078	GBENU_STATS_P1(rx_undersized_frames),
1079	GBENU_STATS_P1(rx_fragments),
1080	GBENU_STATS_P1(ale_drop),
1081	GBENU_STATS_P1(ale_overrun_drop),
1082	GBENU_STATS_P1(rx_bytes),
1083	GBENU_STATS_P1(tx_good_frames),
1084	GBENU_STATS_P1(tx_broadcast_frames),
1085	GBENU_STATS_P1(tx_multicast_frames),
1086	GBENU_STATS_P1(tx_pause_frames),
1087	GBENU_STATS_P1(tx_deferred_frames),
1088	GBENU_STATS_P1(tx_collision_frames),
1089	GBENU_STATS_P1(tx_single_coll_frames),
1090	GBENU_STATS_P1(tx_mult_coll_frames),
1091	GBENU_STATS_P1(tx_excessive_collisions),
1092	GBENU_STATS_P1(tx_late_collisions),
1093	GBENU_STATS_P1(rx_ipg_error),
1094	GBENU_STATS_P1(tx_carrier_sense_errors),
1095	GBENU_STATS_P1(tx_bytes),
1096	GBENU_STATS_P1(tx_64B_frames),
1097	GBENU_STATS_P1(tx_65_to_127B_frames),
1098	GBENU_STATS_P1(tx_128_to_255B_frames),
1099	GBENU_STATS_P1(tx_256_to_511B_frames),
1100	GBENU_STATS_P1(tx_512_to_1023B_frames),
1101	GBENU_STATS_P1(tx_1024B_frames),
1102	GBENU_STATS_P1(net_bytes),
1103	GBENU_STATS_P1(rx_bottom_fifo_drop),
1104	GBENU_STATS_P1(rx_port_mask_drop),
1105	GBENU_STATS_P1(rx_top_fifo_drop),
1106	GBENU_STATS_P1(ale_rate_limit_drop),
1107	GBENU_STATS_P1(ale_vid_ingress_drop),
1108	GBENU_STATS_P1(ale_da_eq_sa_drop),
1109	GBENU_STATS_P1(ale_unknown_ucast),
1110	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1111	GBENU_STATS_P1(ale_unknown_mcast),
1112	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1113	GBENU_STATS_P1(ale_unknown_bcast),
1114	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1115	GBENU_STATS_P1(ale_pol_match),
1116	GBENU_STATS_P1(ale_pol_match_red),
1117	GBENU_STATS_P1(ale_pol_match_yellow),
1118	GBENU_STATS_P1(tx_mem_protect_err),
1119	GBENU_STATS_P1(tx_pri0_drop),
1120	GBENU_STATS_P1(tx_pri1_drop),
1121	GBENU_STATS_P1(tx_pri2_drop),
1122	GBENU_STATS_P1(tx_pri3_drop),
1123	GBENU_STATS_P1(tx_pri4_drop),
1124	GBENU_STATS_P1(tx_pri5_drop),
1125	GBENU_STATS_P1(tx_pri6_drop),
1126	GBENU_STATS_P1(tx_pri7_drop),
1127	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1128	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1129	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1130	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1131	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1132	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1133	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1134	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1135	/* GBENU Module 2 */
1136	GBENU_STATS_P2(rx_good_frames),
1137	GBENU_STATS_P2(rx_broadcast_frames),
1138	GBENU_STATS_P2(rx_multicast_frames),
1139	GBENU_STATS_P2(rx_pause_frames),
1140	GBENU_STATS_P2(rx_crc_errors),
1141	GBENU_STATS_P2(rx_align_code_errors),
1142	GBENU_STATS_P2(rx_oversized_frames),
1143	GBENU_STATS_P2(rx_jabber_frames),
1144	GBENU_STATS_P2(rx_undersized_frames),
1145	GBENU_STATS_P2(rx_fragments),
1146	GBENU_STATS_P2(ale_drop),
1147	GBENU_STATS_P2(ale_overrun_drop),
1148	GBENU_STATS_P2(rx_bytes),
1149	GBENU_STATS_P2(tx_good_frames),
1150	GBENU_STATS_P2(tx_broadcast_frames),
1151	GBENU_STATS_P2(tx_multicast_frames),
1152	GBENU_STATS_P2(tx_pause_frames),
1153	GBENU_STATS_P2(tx_deferred_frames),
1154	GBENU_STATS_P2(tx_collision_frames),
1155	GBENU_STATS_P2(tx_single_coll_frames),
1156	GBENU_STATS_P2(tx_mult_coll_frames),
1157	GBENU_STATS_P2(tx_excessive_collisions),
1158	GBENU_STATS_P2(tx_late_collisions),
1159	GBENU_STATS_P2(rx_ipg_error),
1160	GBENU_STATS_P2(tx_carrier_sense_errors),
1161	GBENU_STATS_P2(tx_bytes),
1162	GBENU_STATS_P2(tx_64B_frames),
1163	GBENU_STATS_P2(tx_65_to_127B_frames),
1164	GBENU_STATS_P2(tx_128_to_255B_frames),
1165	GBENU_STATS_P2(tx_256_to_511B_frames),
1166	GBENU_STATS_P2(tx_512_to_1023B_frames),
1167	GBENU_STATS_P2(tx_1024B_frames),
1168	GBENU_STATS_P2(net_bytes),
1169	GBENU_STATS_P2(rx_bottom_fifo_drop),
1170	GBENU_STATS_P2(rx_port_mask_drop),
1171	GBENU_STATS_P2(rx_top_fifo_drop),
1172	GBENU_STATS_P2(ale_rate_limit_drop),
1173	GBENU_STATS_P2(ale_vid_ingress_drop),
1174	GBENU_STATS_P2(ale_da_eq_sa_drop),
1175	GBENU_STATS_P2(ale_unknown_ucast),
1176	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1177	GBENU_STATS_P2(ale_unknown_mcast),
1178	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1179	GBENU_STATS_P2(ale_unknown_bcast),
1180	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1181	GBENU_STATS_P2(ale_pol_match),
1182	GBENU_STATS_P2(ale_pol_match_red),
1183	GBENU_STATS_P2(ale_pol_match_yellow),
1184	GBENU_STATS_P2(tx_mem_protect_err),
1185	GBENU_STATS_P2(tx_pri0_drop),
1186	GBENU_STATS_P2(tx_pri1_drop),
1187	GBENU_STATS_P2(tx_pri2_drop),
1188	GBENU_STATS_P2(tx_pri3_drop),
1189	GBENU_STATS_P2(tx_pri4_drop),
1190	GBENU_STATS_P2(tx_pri5_drop),
1191	GBENU_STATS_P2(tx_pri6_drop),
1192	GBENU_STATS_P2(tx_pri7_drop),
1193	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1194	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1195	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1196	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1197	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1198	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1199	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1200	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1201	/* GBENU Module 3 */
1202	GBENU_STATS_P3(rx_good_frames),
1203	GBENU_STATS_P3(rx_broadcast_frames),
1204	GBENU_STATS_P3(rx_multicast_frames),
1205	GBENU_STATS_P3(rx_pause_frames),
1206	GBENU_STATS_P3(rx_crc_errors),
1207	GBENU_STATS_P3(rx_align_code_errors),
1208	GBENU_STATS_P3(rx_oversized_frames),
1209	GBENU_STATS_P3(rx_jabber_frames),
1210	GBENU_STATS_P3(rx_undersized_frames),
1211	GBENU_STATS_P3(rx_fragments),
1212	GBENU_STATS_P3(ale_drop),
1213	GBENU_STATS_P3(ale_overrun_drop),
1214	GBENU_STATS_P3(rx_bytes),
1215	GBENU_STATS_P3(tx_good_frames),
1216	GBENU_STATS_P3(tx_broadcast_frames),
1217	GBENU_STATS_P3(tx_multicast_frames),
1218	GBENU_STATS_P3(tx_pause_frames),
1219	GBENU_STATS_P3(tx_deferred_frames),
1220	GBENU_STATS_P3(tx_collision_frames),
1221	GBENU_STATS_P3(tx_single_coll_frames),
1222	GBENU_STATS_P3(tx_mult_coll_frames),
1223	GBENU_STATS_P3(tx_excessive_collisions),
1224	GBENU_STATS_P3(tx_late_collisions),
1225	GBENU_STATS_P3(rx_ipg_error),
1226	GBENU_STATS_P3(tx_carrier_sense_errors),
1227	GBENU_STATS_P3(tx_bytes),
1228	GBENU_STATS_P3(tx_64B_frames),
1229	GBENU_STATS_P3(tx_65_to_127B_frames),
1230	GBENU_STATS_P3(tx_128_to_255B_frames),
1231	GBENU_STATS_P3(tx_256_to_511B_frames),
1232	GBENU_STATS_P3(tx_512_to_1023B_frames),
1233	GBENU_STATS_P3(tx_1024B_frames),
1234	GBENU_STATS_P3(net_bytes),
1235	GBENU_STATS_P3(rx_bottom_fifo_drop),
1236	GBENU_STATS_P3(rx_port_mask_drop),
1237	GBENU_STATS_P3(rx_top_fifo_drop),
1238	GBENU_STATS_P3(ale_rate_limit_drop),
1239	GBENU_STATS_P3(ale_vid_ingress_drop),
1240	GBENU_STATS_P3(ale_da_eq_sa_drop),
1241	GBENU_STATS_P3(ale_unknown_ucast),
1242	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1243	GBENU_STATS_P3(ale_unknown_mcast),
1244	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1245	GBENU_STATS_P3(ale_unknown_bcast),
1246	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1247	GBENU_STATS_P3(ale_pol_match),
1248	GBENU_STATS_P3(ale_pol_match_red),
1249	GBENU_STATS_P3(ale_pol_match_yellow),
1250	GBENU_STATS_P3(tx_mem_protect_err),
1251	GBENU_STATS_P3(tx_pri0_drop),
1252	GBENU_STATS_P3(tx_pri1_drop),
1253	GBENU_STATS_P3(tx_pri2_drop),
1254	GBENU_STATS_P3(tx_pri3_drop),
1255	GBENU_STATS_P3(tx_pri4_drop),
1256	GBENU_STATS_P3(tx_pri5_drop),
1257	GBENU_STATS_P3(tx_pri6_drop),
1258	GBENU_STATS_P3(tx_pri7_drop),
1259	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1260	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1261	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1262	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1263	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1264	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1265	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1266	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1267	/* GBENU Module 4 */
1268	GBENU_STATS_P4(rx_good_frames),
1269	GBENU_STATS_P4(rx_broadcast_frames),
1270	GBENU_STATS_P4(rx_multicast_frames),
1271	GBENU_STATS_P4(rx_pause_frames),
1272	GBENU_STATS_P4(rx_crc_errors),
1273	GBENU_STATS_P4(rx_align_code_errors),
1274	GBENU_STATS_P4(rx_oversized_frames),
1275	GBENU_STATS_P4(rx_jabber_frames),
1276	GBENU_STATS_P4(rx_undersized_frames),
1277	GBENU_STATS_P4(rx_fragments),
1278	GBENU_STATS_P4(ale_drop),
1279	GBENU_STATS_P4(ale_overrun_drop),
1280	GBENU_STATS_P4(rx_bytes),
1281	GBENU_STATS_P4(tx_good_frames),
1282	GBENU_STATS_P4(tx_broadcast_frames),
1283	GBENU_STATS_P4(tx_multicast_frames),
1284	GBENU_STATS_P4(tx_pause_frames),
1285	GBENU_STATS_P4(tx_deferred_frames),
1286	GBENU_STATS_P4(tx_collision_frames),
1287	GBENU_STATS_P4(tx_single_coll_frames),
1288	GBENU_STATS_P4(tx_mult_coll_frames),
1289	GBENU_STATS_P4(tx_excessive_collisions),
1290	GBENU_STATS_P4(tx_late_collisions),
1291	GBENU_STATS_P4(rx_ipg_error),
1292	GBENU_STATS_P4(tx_carrier_sense_errors),
1293	GBENU_STATS_P4(tx_bytes),
1294	GBENU_STATS_P4(tx_64B_frames),
1295	GBENU_STATS_P4(tx_65_to_127B_frames),
1296	GBENU_STATS_P4(tx_128_to_255B_frames),
1297	GBENU_STATS_P4(tx_256_to_511B_frames),
1298	GBENU_STATS_P4(tx_512_to_1023B_frames),
1299	GBENU_STATS_P4(tx_1024B_frames),
1300	GBENU_STATS_P4(net_bytes),
1301	GBENU_STATS_P4(rx_bottom_fifo_drop),
1302	GBENU_STATS_P4(rx_port_mask_drop),
1303	GBENU_STATS_P4(rx_top_fifo_drop),
1304	GBENU_STATS_P4(ale_rate_limit_drop),
1305	GBENU_STATS_P4(ale_vid_ingress_drop),
1306	GBENU_STATS_P4(ale_da_eq_sa_drop),
1307	GBENU_STATS_P4(ale_unknown_ucast),
1308	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1309	GBENU_STATS_P4(ale_unknown_mcast),
1310	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1311	GBENU_STATS_P4(ale_unknown_bcast),
1312	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1313	GBENU_STATS_P4(ale_pol_match),
1314	GBENU_STATS_P4(ale_pol_match_red),
1315	GBENU_STATS_P4(ale_pol_match_yellow),
1316	GBENU_STATS_P4(tx_mem_protect_err),
1317	GBENU_STATS_P4(tx_pri0_drop),
1318	GBENU_STATS_P4(tx_pri1_drop),
1319	GBENU_STATS_P4(tx_pri2_drop),
1320	GBENU_STATS_P4(tx_pri3_drop),
1321	GBENU_STATS_P4(tx_pri4_drop),
1322	GBENU_STATS_P4(tx_pri5_drop),
1323	GBENU_STATS_P4(tx_pri6_drop),
1324	GBENU_STATS_P4(tx_pri7_drop),
1325	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1326	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1327	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1328	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1329	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1330	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1331	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1332	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1333	/* GBENU Module 5 */
1334	GBENU_STATS_P5(rx_good_frames),
1335	GBENU_STATS_P5(rx_broadcast_frames),
1336	GBENU_STATS_P5(rx_multicast_frames),
1337	GBENU_STATS_P5(rx_pause_frames),
1338	GBENU_STATS_P5(rx_crc_errors),
1339	GBENU_STATS_P5(rx_align_code_errors),
1340	GBENU_STATS_P5(rx_oversized_frames),
1341	GBENU_STATS_P5(rx_jabber_frames),
1342	GBENU_STATS_P5(rx_undersized_frames),
1343	GBENU_STATS_P5(rx_fragments),
1344	GBENU_STATS_P5(ale_drop),
1345	GBENU_STATS_P5(ale_overrun_drop),
1346	GBENU_STATS_P5(rx_bytes),
1347	GBENU_STATS_P5(tx_good_frames),
1348	GBENU_STATS_P5(tx_broadcast_frames),
1349	GBENU_STATS_P5(tx_multicast_frames),
1350	GBENU_STATS_P5(tx_pause_frames),
1351	GBENU_STATS_P5(tx_deferred_frames),
1352	GBENU_STATS_P5(tx_collision_frames),
1353	GBENU_STATS_P5(tx_single_coll_frames),
1354	GBENU_STATS_P5(tx_mult_coll_frames),
1355	GBENU_STATS_P5(tx_excessive_collisions),
1356	GBENU_STATS_P5(tx_late_collisions),
1357	GBENU_STATS_P5(rx_ipg_error),
1358	GBENU_STATS_P5(tx_carrier_sense_errors),
1359	GBENU_STATS_P5(tx_bytes),
1360	GBENU_STATS_P5(tx_64B_frames),
1361	GBENU_STATS_P5(tx_65_to_127B_frames),
1362	GBENU_STATS_P5(tx_128_to_255B_frames),
1363	GBENU_STATS_P5(tx_256_to_511B_frames),
1364	GBENU_STATS_P5(tx_512_to_1023B_frames),
1365	GBENU_STATS_P5(tx_1024B_frames),
1366	GBENU_STATS_P5(net_bytes),
1367	GBENU_STATS_P5(rx_bottom_fifo_drop),
1368	GBENU_STATS_P5(rx_port_mask_drop),
1369	GBENU_STATS_P5(rx_top_fifo_drop),
1370	GBENU_STATS_P5(ale_rate_limit_drop),
1371	GBENU_STATS_P5(ale_vid_ingress_drop),
1372	GBENU_STATS_P5(ale_da_eq_sa_drop),
1373	GBENU_STATS_P5(ale_unknown_ucast),
1374	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1375	GBENU_STATS_P5(ale_unknown_mcast),
1376	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1377	GBENU_STATS_P5(ale_unknown_bcast),
1378	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1379	GBENU_STATS_P5(ale_pol_match),
1380	GBENU_STATS_P5(ale_pol_match_red),
1381	GBENU_STATS_P5(ale_pol_match_yellow),
1382	GBENU_STATS_P5(tx_mem_protect_err),
1383	GBENU_STATS_P5(tx_pri0_drop),
1384	GBENU_STATS_P5(tx_pri1_drop),
1385	GBENU_STATS_P5(tx_pri2_drop),
1386	GBENU_STATS_P5(tx_pri3_drop),
1387	GBENU_STATS_P5(tx_pri4_drop),
1388	GBENU_STATS_P5(tx_pri5_drop),
1389	GBENU_STATS_P5(tx_pri6_drop),
1390	GBENU_STATS_P5(tx_pri7_drop),
1391	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1392	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1393	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1394	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1395	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1396	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1397	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1398	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1399	/* GBENU Module 6 */
1400	GBENU_STATS_P6(rx_good_frames),
1401	GBENU_STATS_P6(rx_broadcast_frames),
1402	GBENU_STATS_P6(rx_multicast_frames),
1403	GBENU_STATS_P6(rx_pause_frames),
1404	GBENU_STATS_P6(rx_crc_errors),
1405	GBENU_STATS_P6(rx_align_code_errors),
1406	GBENU_STATS_P6(rx_oversized_frames),
1407	GBENU_STATS_P6(rx_jabber_frames),
1408	GBENU_STATS_P6(rx_undersized_frames),
1409	GBENU_STATS_P6(rx_fragments),
1410	GBENU_STATS_P6(ale_drop),
1411	GBENU_STATS_P6(ale_overrun_drop),
1412	GBENU_STATS_P6(rx_bytes),
1413	GBENU_STATS_P6(tx_good_frames),
1414	GBENU_STATS_P6(tx_broadcast_frames),
1415	GBENU_STATS_P6(tx_multicast_frames),
1416	GBENU_STATS_P6(tx_pause_frames),
1417	GBENU_STATS_P6(tx_deferred_frames),
1418	GBENU_STATS_P6(tx_collision_frames),
1419	GBENU_STATS_P6(tx_single_coll_frames),
1420	GBENU_STATS_P6(tx_mult_coll_frames),
1421	GBENU_STATS_P6(tx_excessive_collisions),
1422	GBENU_STATS_P6(tx_late_collisions),
1423	GBENU_STATS_P6(rx_ipg_error),
1424	GBENU_STATS_P6(tx_carrier_sense_errors),
1425	GBENU_STATS_P6(tx_bytes),
1426	GBENU_STATS_P6(tx_64B_frames),
1427	GBENU_STATS_P6(tx_65_to_127B_frames),
1428	GBENU_STATS_P6(tx_128_to_255B_frames),
1429	GBENU_STATS_P6(tx_256_to_511B_frames),
1430	GBENU_STATS_P6(tx_512_to_1023B_frames),
1431	GBENU_STATS_P6(tx_1024B_frames),
1432	GBENU_STATS_P6(net_bytes),
1433	GBENU_STATS_P6(rx_bottom_fifo_drop),
1434	GBENU_STATS_P6(rx_port_mask_drop),
1435	GBENU_STATS_P6(rx_top_fifo_drop),
1436	GBENU_STATS_P6(ale_rate_limit_drop),
1437	GBENU_STATS_P6(ale_vid_ingress_drop),
1438	GBENU_STATS_P6(ale_da_eq_sa_drop),
1439	GBENU_STATS_P6(ale_unknown_ucast),
1440	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1441	GBENU_STATS_P6(ale_unknown_mcast),
1442	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1443	GBENU_STATS_P6(ale_unknown_bcast),
1444	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1445	GBENU_STATS_P6(ale_pol_match),
1446	GBENU_STATS_P6(ale_pol_match_red),
1447	GBENU_STATS_P6(ale_pol_match_yellow),
1448	GBENU_STATS_P6(tx_mem_protect_err),
1449	GBENU_STATS_P6(tx_pri0_drop),
1450	GBENU_STATS_P6(tx_pri1_drop),
1451	GBENU_STATS_P6(tx_pri2_drop),
1452	GBENU_STATS_P6(tx_pri3_drop),
1453	GBENU_STATS_P6(tx_pri4_drop),
1454	GBENU_STATS_P6(tx_pri5_drop),
1455	GBENU_STATS_P6(tx_pri6_drop),
1456	GBENU_STATS_P6(tx_pri7_drop),
1457	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1458	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1459	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1460	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1461	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1462	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1463	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1464	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1465	/* GBENU Module 7 */
1466	GBENU_STATS_P7(rx_good_frames),
1467	GBENU_STATS_P7(rx_broadcast_frames),
1468	GBENU_STATS_P7(rx_multicast_frames),
1469	GBENU_STATS_P7(rx_pause_frames),
1470	GBENU_STATS_P7(rx_crc_errors),
1471	GBENU_STATS_P7(rx_align_code_errors),
1472	GBENU_STATS_P7(rx_oversized_frames),
1473	GBENU_STATS_P7(rx_jabber_frames),
1474	GBENU_STATS_P7(rx_undersized_frames),
1475	GBENU_STATS_P7(rx_fragments),
1476	GBENU_STATS_P7(ale_drop),
1477	GBENU_STATS_P7(ale_overrun_drop),
1478	GBENU_STATS_P7(rx_bytes),
1479	GBENU_STATS_P7(tx_good_frames),
1480	GBENU_STATS_P7(tx_broadcast_frames),
1481	GBENU_STATS_P7(tx_multicast_frames),
1482	GBENU_STATS_P7(tx_pause_frames),
1483	GBENU_STATS_P7(tx_deferred_frames),
1484	GBENU_STATS_P7(tx_collision_frames),
1485	GBENU_STATS_P7(tx_single_coll_frames),
1486	GBENU_STATS_P7(tx_mult_coll_frames),
1487	GBENU_STATS_P7(tx_excessive_collisions),
1488	GBENU_STATS_P7(tx_late_collisions),
1489	GBENU_STATS_P7(rx_ipg_error),
1490	GBENU_STATS_P7(tx_carrier_sense_errors),
1491	GBENU_STATS_P7(tx_bytes),
1492	GBENU_STATS_P7(tx_64B_frames),
1493	GBENU_STATS_P7(tx_65_to_127B_frames),
1494	GBENU_STATS_P7(tx_128_to_255B_frames),
1495	GBENU_STATS_P7(tx_256_to_511B_frames),
1496	GBENU_STATS_P7(tx_512_to_1023B_frames),
1497	GBENU_STATS_P7(tx_1024B_frames),
1498	GBENU_STATS_P7(net_bytes),
1499	GBENU_STATS_P7(rx_bottom_fifo_drop),
1500	GBENU_STATS_P7(rx_port_mask_drop),
1501	GBENU_STATS_P7(rx_top_fifo_drop),
1502	GBENU_STATS_P7(ale_rate_limit_drop),
1503	GBENU_STATS_P7(ale_vid_ingress_drop),
1504	GBENU_STATS_P7(ale_da_eq_sa_drop),
1505	GBENU_STATS_P7(ale_unknown_ucast),
1506	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1507	GBENU_STATS_P7(ale_unknown_mcast),
1508	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1509	GBENU_STATS_P7(ale_unknown_bcast),
1510	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1511	GBENU_STATS_P7(ale_pol_match),
1512	GBENU_STATS_P7(ale_pol_match_red),
1513	GBENU_STATS_P7(ale_pol_match_yellow),
1514	GBENU_STATS_P7(tx_mem_protect_err),
1515	GBENU_STATS_P7(tx_pri0_drop),
1516	GBENU_STATS_P7(tx_pri1_drop),
1517	GBENU_STATS_P7(tx_pri2_drop),
1518	GBENU_STATS_P7(tx_pri3_drop),
1519	GBENU_STATS_P7(tx_pri4_drop),
1520	GBENU_STATS_P7(tx_pri5_drop),
1521	GBENU_STATS_P7(tx_pri6_drop),
1522	GBENU_STATS_P7(tx_pri7_drop),
1523	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1524	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1525	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1526	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1527	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1528	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1529	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1530	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1531	/* GBENU Module 8 */
1532	GBENU_STATS_P8(rx_good_frames),
1533	GBENU_STATS_P8(rx_broadcast_frames),
1534	GBENU_STATS_P8(rx_multicast_frames),
1535	GBENU_STATS_P8(rx_pause_frames),
1536	GBENU_STATS_P8(rx_crc_errors),
1537	GBENU_STATS_P8(rx_align_code_errors),
1538	GBENU_STATS_P8(rx_oversized_frames),
1539	GBENU_STATS_P8(rx_jabber_frames),
1540	GBENU_STATS_P8(rx_undersized_frames),
1541	GBENU_STATS_P8(rx_fragments),
1542	GBENU_STATS_P8(ale_drop),
1543	GBENU_STATS_P8(ale_overrun_drop),
1544	GBENU_STATS_P8(rx_bytes),
1545	GBENU_STATS_P8(tx_good_frames),
1546	GBENU_STATS_P8(tx_broadcast_frames),
1547	GBENU_STATS_P8(tx_multicast_frames),
1548	GBENU_STATS_P8(tx_pause_frames),
1549	GBENU_STATS_P8(tx_deferred_frames),
1550	GBENU_STATS_P8(tx_collision_frames),
1551	GBENU_STATS_P8(tx_single_coll_frames),
1552	GBENU_STATS_P8(tx_mult_coll_frames),
1553	GBENU_STATS_P8(tx_excessive_collisions),
1554	GBENU_STATS_P8(tx_late_collisions),
1555	GBENU_STATS_P8(rx_ipg_error),
1556	GBENU_STATS_P8(tx_carrier_sense_errors),
1557	GBENU_STATS_P8(tx_bytes),
1558	GBENU_STATS_P8(tx_64B_frames),
1559	GBENU_STATS_P8(tx_65_to_127B_frames),
1560	GBENU_STATS_P8(tx_128_to_255B_frames),
1561	GBENU_STATS_P8(tx_256_to_511B_frames),
1562	GBENU_STATS_P8(tx_512_to_1023B_frames),
1563	GBENU_STATS_P8(tx_1024B_frames),
1564	GBENU_STATS_P8(net_bytes),
1565	GBENU_STATS_P8(rx_bottom_fifo_drop),
1566	GBENU_STATS_P8(rx_port_mask_drop),
1567	GBENU_STATS_P8(rx_top_fifo_drop),
1568	GBENU_STATS_P8(ale_rate_limit_drop),
1569	GBENU_STATS_P8(ale_vid_ingress_drop),
1570	GBENU_STATS_P8(ale_da_eq_sa_drop),
1571	GBENU_STATS_P8(ale_unknown_ucast),
1572	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1573	GBENU_STATS_P8(ale_unknown_mcast),
1574	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1575	GBENU_STATS_P8(ale_unknown_bcast),
1576	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1577	GBENU_STATS_P8(ale_pol_match),
1578	GBENU_STATS_P8(ale_pol_match_red),
1579	GBENU_STATS_P8(ale_pol_match_yellow),
1580	GBENU_STATS_P8(tx_mem_protect_err),
1581	GBENU_STATS_P8(tx_pri0_drop),
1582	GBENU_STATS_P8(tx_pri1_drop),
1583	GBENU_STATS_P8(tx_pri2_drop),
1584	GBENU_STATS_P8(tx_pri3_drop),
1585	GBENU_STATS_P8(tx_pri4_drop),
1586	GBENU_STATS_P8(tx_pri5_drop),
1587	GBENU_STATS_P8(tx_pri6_drop),
1588	GBENU_STATS_P8(tx_pri7_drop),
1589	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1590	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1591	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1592	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1593	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1594	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1595	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1596	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1597};
1598
1599#define XGBE_STATS0_INFO(field)				\
1600{							\
1601	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1602	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1603	offsetof(struct xgbe_hw_stats, field)		\
1604}
1605
1606#define XGBE_STATS1_INFO(field)				\
1607{							\
1608	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1609	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1610	offsetof(struct xgbe_hw_stats, field)		\
1611}
1612
1613#define XGBE_STATS2_INFO(field)				\
1614{							\
1615	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1616	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1617	offsetof(struct xgbe_hw_stats, field)		\
1618}
1619
1620static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1621	/* GBE module 0 */
1622	XGBE_STATS0_INFO(rx_good_frames),
1623	XGBE_STATS0_INFO(rx_broadcast_frames),
1624	XGBE_STATS0_INFO(rx_multicast_frames),
1625	XGBE_STATS0_INFO(rx_oversized_frames),
1626	XGBE_STATS0_INFO(rx_undersized_frames),
1627	XGBE_STATS0_INFO(overrun_type4),
1628	XGBE_STATS0_INFO(overrun_type5),
1629	XGBE_STATS0_INFO(rx_bytes),
1630	XGBE_STATS0_INFO(tx_good_frames),
1631	XGBE_STATS0_INFO(tx_broadcast_frames),
1632	XGBE_STATS0_INFO(tx_multicast_frames),
1633	XGBE_STATS0_INFO(tx_bytes),
1634	XGBE_STATS0_INFO(tx_64byte_frames),
1635	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1636	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1637	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1638	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1639	XGBE_STATS0_INFO(tx_1024byte_frames),
1640	XGBE_STATS0_INFO(net_bytes),
1641	XGBE_STATS0_INFO(rx_sof_overruns),
1642	XGBE_STATS0_INFO(rx_mof_overruns),
1643	XGBE_STATS0_INFO(rx_dma_overruns),
1644	/* XGBE module 1 */
1645	XGBE_STATS1_INFO(rx_good_frames),
1646	XGBE_STATS1_INFO(rx_broadcast_frames),
1647	XGBE_STATS1_INFO(rx_multicast_frames),
1648	XGBE_STATS1_INFO(rx_pause_frames),
1649	XGBE_STATS1_INFO(rx_crc_errors),
1650	XGBE_STATS1_INFO(rx_align_code_errors),
1651	XGBE_STATS1_INFO(rx_oversized_frames),
1652	XGBE_STATS1_INFO(rx_jabber_frames),
1653	XGBE_STATS1_INFO(rx_undersized_frames),
1654	XGBE_STATS1_INFO(rx_fragments),
1655	XGBE_STATS1_INFO(overrun_type4),
1656	XGBE_STATS1_INFO(overrun_type5),
1657	XGBE_STATS1_INFO(rx_bytes),
1658	XGBE_STATS1_INFO(tx_good_frames),
1659	XGBE_STATS1_INFO(tx_broadcast_frames),
1660	XGBE_STATS1_INFO(tx_multicast_frames),
1661	XGBE_STATS1_INFO(tx_pause_frames),
1662	XGBE_STATS1_INFO(tx_deferred_frames),
1663	XGBE_STATS1_INFO(tx_collision_frames),
1664	XGBE_STATS1_INFO(tx_single_coll_frames),
1665	XGBE_STATS1_INFO(tx_mult_coll_frames),
1666	XGBE_STATS1_INFO(tx_excessive_collisions),
1667	XGBE_STATS1_INFO(tx_late_collisions),
1668	XGBE_STATS1_INFO(tx_underrun),
1669	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1670	XGBE_STATS1_INFO(tx_bytes),
1671	XGBE_STATS1_INFO(tx_64byte_frames),
1672	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1673	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1674	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1675	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1676	XGBE_STATS1_INFO(tx_1024byte_frames),
1677	XGBE_STATS1_INFO(net_bytes),
1678	XGBE_STATS1_INFO(rx_sof_overruns),
1679	XGBE_STATS1_INFO(rx_mof_overruns),
1680	XGBE_STATS1_INFO(rx_dma_overruns),
1681	/* XGBE module 2 */
1682	XGBE_STATS2_INFO(rx_good_frames),
1683	XGBE_STATS2_INFO(rx_broadcast_frames),
1684	XGBE_STATS2_INFO(rx_multicast_frames),
1685	XGBE_STATS2_INFO(rx_pause_frames),
1686	XGBE_STATS2_INFO(rx_crc_errors),
1687	XGBE_STATS2_INFO(rx_align_code_errors),
1688	XGBE_STATS2_INFO(rx_oversized_frames),
1689	XGBE_STATS2_INFO(rx_jabber_frames),
1690	XGBE_STATS2_INFO(rx_undersized_frames),
1691	XGBE_STATS2_INFO(rx_fragments),
1692	XGBE_STATS2_INFO(overrun_type4),
1693	XGBE_STATS2_INFO(overrun_type5),
1694	XGBE_STATS2_INFO(rx_bytes),
1695	XGBE_STATS2_INFO(tx_good_frames),
1696	XGBE_STATS2_INFO(tx_broadcast_frames),
1697	XGBE_STATS2_INFO(tx_multicast_frames),
1698	XGBE_STATS2_INFO(tx_pause_frames),
1699	XGBE_STATS2_INFO(tx_deferred_frames),
1700	XGBE_STATS2_INFO(tx_collision_frames),
1701	XGBE_STATS2_INFO(tx_single_coll_frames),
1702	XGBE_STATS2_INFO(tx_mult_coll_frames),
1703	XGBE_STATS2_INFO(tx_excessive_collisions),
1704	XGBE_STATS2_INFO(tx_late_collisions),
1705	XGBE_STATS2_INFO(tx_underrun),
1706	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1707	XGBE_STATS2_INFO(tx_bytes),
1708	XGBE_STATS2_INFO(tx_64byte_frames),
1709	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1710	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1711	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1712	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1713	XGBE_STATS2_INFO(tx_1024byte_frames),
1714	XGBE_STATS2_INFO(net_bytes),
1715	XGBE_STATS2_INFO(rx_sof_overruns),
1716	XGBE_STATS2_INFO(rx_mof_overruns),
1717	XGBE_STATS2_INFO(rx_dma_overruns),
1718};
1719
1720#define for_each_intf(i, priv) \
1721	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1722
1723#define for_each_sec_slave(slave, priv) \
1724	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1725
1726#define first_sec_slave(priv)					\
1727	list_first_entry(&priv->secondary_slaves, \
1728			struct gbe_slave, slave_list)
1729
1730static void keystone_get_drvinfo(struct net_device *ndev,
1731				 struct ethtool_drvinfo *info)
1732{
1733	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1734	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1735}
1736
1737static u32 keystone_get_msglevel(struct net_device *ndev)
1738{
1739	struct netcp_intf *netcp = netdev_priv(ndev);
1740
1741	return netcp->msg_enable;
1742}
1743
1744static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1745{
1746	struct netcp_intf *netcp = netdev_priv(ndev);
1747
1748	netcp->msg_enable = value;
1749}
1750
1751static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1752{
1753	struct gbe_intf *gbe_intf;
1754
1755	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1756	if (!gbe_intf)
1757		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1758
1759	return gbe_intf;
1760}
1761
1762static void keystone_get_stat_strings(struct net_device *ndev,
1763				      uint32_t stringset, uint8_t *data)
1764{
1765	struct netcp_intf *netcp = netdev_priv(ndev);
1766	struct gbe_intf *gbe_intf;
1767	struct gbe_priv *gbe_dev;
1768	int i;
1769
1770	gbe_intf = keystone_get_intf_data(netcp);
1771	if (!gbe_intf)
1772		return;
1773	gbe_dev = gbe_intf->gbe_dev;
1774
1775	switch (stringset) {
1776	case ETH_SS_STATS:
1777		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1778			memcpy(data, gbe_dev->et_stats[i].desc,
1779			       ETH_GSTRING_LEN);
1780			data += ETH_GSTRING_LEN;
1781		}
1782		break;
1783	case ETH_SS_TEST:
1784		break;
1785	}
1786}
1787
1788static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1789{
1790	struct netcp_intf *netcp = netdev_priv(ndev);
1791	struct gbe_intf *gbe_intf;
1792	struct gbe_priv *gbe_dev;
1793
1794	gbe_intf = keystone_get_intf_data(netcp);
1795	if (!gbe_intf)
1796		return -EINVAL;
1797	gbe_dev = gbe_intf->gbe_dev;
1798
1799	switch (stringset) {
1800	case ETH_SS_TEST:
1801		return 0;
1802	case ETH_SS_STATS:
1803		return gbe_dev->num_et_stats;
1804	default:
1805		return -EINVAL;
1806	}
1807}
1808
1809static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1810{
1811	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1812	u32  __iomem *p_stats_entry;
1813	int i;
1814
1815	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1816		if (gbe_dev->et_stats[i].type == stats_mod) {
1817			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1818			gbe_dev->hw_stats[i] = 0;
1819			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1820		}
1821	}
1822}
1823
1824static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1825					     int et_stats_entry)
1826{
1827	void __iomem *base = NULL;
1828	u32  __iomem *p_stats_entry;
1829	u32 curr, delta;
1830
1831	/* The hw_stats_regs pointers are already
1832	 * properly set to point to the right base:
1833	 */
1834	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1835	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1836	curr = readl(p_stats_entry);
1837	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1838	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1839	gbe_dev->hw_stats[et_stats_entry] += delta;
1840}
1841
1842static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1843{
1844	int i;
1845
1846	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1847		gbe_update_hw_stats_entry(gbe_dev, i);
1848
1849		if (data)
1850			data[i] = gbe_dev->hw_stats[i];
1851	}
1852}
1853
1854static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1855					       int stats_mod)
1856{
1857	u32 val;
1858
1859	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1860
1861	switch (stats_mod) {
1862	case GBE_STATSA_MODULE:
1863	case GBE_STATSB_MODULE:
1864		val &= ~GBE_STATS_CD_SEL;
1865		break;
1866	case GBE_STATSC_MODULE:
1867	case GBE_STATSD_MODULE:
1868		val |= GBE_STATS_CD_SEL;
1869		break;
1870	default:
1871		return;
1872	}
1873
1874	/* make the stat module visible */
1875	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1876}
1877
1878static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1879{
1880	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1881	gbe_reset_mod_stats(gbe_dev, stats_mod);
1882}
1883
1884static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1885{
1886	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1887	int et_entry, j, pair;
1888
1889	for (pair = 0; pair < 2; pair++) {
1890		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1891						      GBE_STATSC_MODULE :
1892						      GBE_STATSA_MODULE));
1893
1894		for (j = 0; j < half_num_et_stats; j++) {
1895			et_entry = pair * half_num_et_stats + j;
1896			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1897
1898			if (data)
1899				data[et_entry] = gbe_dev->hw_stats[et_entry];
1900		}
1901	}
1902}
1903
1904static void keystone_get_ethtool_stats(struct net_device *ndev,
1905				       struct ethtool_stats *stats,
1906				       uint64_t *data)
1907{
1908	struct netcp_intf *netcp = netdev_priv(ndev);
1909	struct gbe_intf *gbe_intf;
1910	struct gbe_priv *gbe_dev;
1911
1912	gbe_intf = keystone_get_intf_data(netcp);
1913	if (!gbe_intf)
1914		return;
1915
1916	gbe_dev = gbe_intf->gbe_dev;
1917	spin_lock_bh(&gbe_dev->hw_stats_lock);
1918	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1919		gbe_update_stats_ver14(gbe_dev, data);
1920	else
1921		gbe_update_stats(gbe_dev, data);
1922	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1923}
1924
1925static int keystone_get_link_ksettings(struct net_device *ndev,
1926				       struct ethtool_link_ksettings *cmd)
1927{
1928	struct netcp_intf *netcp = netdev_priv(ndev);
1929	struct phy_device *phy = ndev->phydev;
1930	struct gbe_intf *gbe_intf;
 
1931
1932	if (!phy)
1933		return -EINVAL;
1934
1935	gbe_intf = keystone_get_intf_data(netcp);
1936	if (!gbe_intf)
1937		return -EINVAL;
1938
1939	if (!gbe_intf->slave)
1940		return -EINVAL;
1941
1942	phy_ethtool_ksettings_get(phy, cmd);
1943	cmd->base.port = gbe_intf->slave->phy_port_t;
 
1944
1945	return 0;
1946}
1947
1948static int keystone_set_link_ksettings(struct net_device *ndev,
1949				       const struct ethtool_link_ksettings *cmd)
1950{
1951	struct netcp_intf *netcp = netdev_priv(ndev);
1952	struct phy_device *phy = ndev->phydev;
1953	struct gbe_intf *gbe_intf;
1954	u8 port = cmd->base.port;
1955	u32 advertising, supported;
1956	u32 features;
1957
1958	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1959						cmd->link_modes.advertising);
1960	ethtool_convert_link_mode_to_legacy_u32(&supported,
1961						cmd->link_modes.supported);
1962	features = advertising & supported;
1963
1964	if (!phy)
1965		return -EINVAL;
1966
1967	gbe_intf = keystone_get_intf_data(netcp);
1968	if (!gbe_intf)
1969		return -EINVAL;
1970
1971	if (!gbe_intf->slave)
1972		return -EINVAL;
1973
1974	if (port != gbe_intf->slave->phy_port_t) {
1975		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1976			return -EINVAL;
1977
1978		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1979			return -EINVAL;
1980
1981		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1982			return -EINVAL;
1983
1984		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1985			return -EINVAL;
1986
1987		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1988			return -EINVAL;
1989	}
1990
1991	gbe_intf->slave->phy_port_t = port;
1992	return phy_ethtool_ksettings_set(phy, cmd);
1993}
1994
1995#if IS_ENABLED(CONFIG_TI_CPTS)
1996static int keystone_get_ts_info(struct net_device *ndev,
1997				struct ethtool_ts_info *info)
1998{
1999	struct netcp_intf *netcp = netdev_priv(ndev);
2000	struct gbe_intf *gbe_intf;
2001
2002	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2003	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2004		return -EINVAL;
2005
2006	info->so_timestamping =
2007		SOF_TIMESTAMPING_TX_HARDWARE |
2008		SOF_TIMESTAMPING_TX_SOFTWARE |
2009		SOF_TIMESTAMPING_RX_HARDWARE |
2010		SOF_TIMESTAMPING_RX_SOFTWARE |
2011		SOF_TIMESTAMPING_SOFTWARE |
2012		SOF_TIMESTAMPING_RAW_HARDWARE;
2013	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2014	info->tx_types =
2015		(1 << HWTSTAMP_TX_OFF) |
2016		(1 << HWTSTAMP_TX_ON);
2017	info->rx_filters =
2018		(1 << HWTSTAMP_FILTER_NONE) |
2019		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2020		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2021	return 0;
2022}
2023#else
2024static int keystone_get_ts_info(struct net_device *ndev,
2025				struct ethtool_ts_info *info)
2026{
2027	info->so_timestamping =
2028		SOF_TIMESTAMPING_TX_SOFTWARE |
2029		SOF_TIMESTAMPING_RX_SOFTWARE |
2030		SOF_TIMESTAMPING_SOFTWARE;
2031	info->phc_index = -1;
2032	info->tx_types = 0;
2033	info->rx_filters = 0;
2034	return 0;
2035}
2036#endif /* CONFIG_TI_CPTS */
2037
2038static const struct ethtool_ops keystone_ethtool_ops = {
2039	.get_drvinfo		= keystone_get_drvinfo,
2040	.get_link		= ethtool_op_get_link,
2041	.get_msglevel		= keystone_get_msglevel,
2042	.set_msglevel		= keystone_set_msglevel,
2043	.get_strings		= keystone_get_stat_strings,
2044	.get_sset_count		= keystone_get_sset_count,
2045	.get_ethtool_stats	= keystone_get_ethtool_stats,
2046	.get_link_ksettings	= keystone_get_link_ksettings,
2047	.set_link_ksettings	= keystone_set_link_ksettings,
2048	.get_ts_info		= keystone_get_ts_info,
2049};
2050
 
 
 
 
2051static void gbe_set_slave_mac(struct gbe_slave *slave,
2052			      struct gbe_intf *gbe_intf)
2053{
2054	struct net_device *ndev = gbe_intf->ndev;
2055
2056	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2057	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2058}
2059
2060static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2061{
2062	if (priv->host_port == 0)
2063		return slave_num + 1;
2064
2065	return slave_num;
2066}
2067
2068static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2069					  struct net_device *ndev,
2070					  struct gbe_slave *slave,
2071					  int up)
2072{
2073	struct phy_device *phy = slave->phy;
2074	u32 mac_control = 0;
2075
2076	if (up) {
2077		mac_control = slave->mac_control;
2078		if (phy && (phy->speed == SPEED_1000)) {
2079			mac_control |= MACSL_GIG_MODE;
2080			mac_control &= ~MACSL_XGIG_MODE;
2081		} else if (phy && (phy->speed == SPEED_10000)) {
2082			mac_control |= MACSL_XGIG_MODE;
2083			mac_control &= ~MACSL_GIG_MODE;
2084		}
2085
2086		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2087						 mac_control));
2088
2089		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2090				     ALE_PORT_STATE,
2091				     ALE_PORT_STATE_FORWARD);
2092
2093		if (ndev && slave->open &&
2094		    slave->link_interface != SGMII_LINK_MAC_PHY &&
2095		    slave->link_interface != XGMII_LINK_MAC_PHY)
2096			netif_carrier_on(ndev);
2097	} else {
2098		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2099						 mac_control));
2100		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2101				     ALE_PORT_STATE,
2102				     ALE_PORT_STATE_DISABLE);
2103		if (ndev &&
2104		    slave->link_interface != SGMII_LINK_MAC_PHY &&
2105		    slave->link_interface != XGMII_LINK_MAC_PHY)
2106			netif_carrier_off(ndev);
2107	}
2108
2109	if (phy)
2110		phy_print_status(phy);
2111}
2112
2113static bool gbe_phy_link_status(struct gbe_slave *slave)
2114{
2115	 return !slave->phy || slave->phy->link;
2116}
2117
2118static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2119					  struct gbe_slave *slave,
2120					  struct net_device *ndev)
2121{
2122	int sp = slave->slave_num;
2123	int phy_link_state, sgmii_link_state = 1, link_state;
2124
2125	if (!slave->open)
2126		return;
2127
2128	if (!SLAVE_LINK_IS_XGMII(slave)) {
2129		sgmii_link_state =
2130			netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2131	}
2132
2133	phy_link_state = gbe_phy_link_status(slave);
2134	link_state = phy_link_state & sgmii_link_state;
2135
2136	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2137		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2138					      link_state);
2139}
2140
2141static void xgbe_adjust_link(struct net_device *ndev)
2142{
2143	struct netcp_intf *netcp = netdev_priv(ndev);
2144	struct gbe_intf *gbe_intf;
2145
2146	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2147	if (!gbe_intf)
2148		return;
2149
2150	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2151				      ndev);
2152}
2153
2154static void gbe_adjust_link(struct net_device *ndev)
2155{
2156	struct netcp_intf *netcp = netdev_priv(ndev);
2157	struct gbe_intf *gbe_intf;
2158
2159	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2160	if (!gbe_intf)
2161		return;
2162
2163	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2164				      ndev);
2165}
2166
2167static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2168{
2169	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2170	struct gbe_slave *slave;
2171
2172	for_each_sec_slave(slave, gbe_dev)
2173		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2174}
2175
2176/* Reset EMAC
2177 * Soft reset is set and polled until clear, or until a timeout occurs
2178 */
2179static int gbe_port_reset(struct gbe_slave *slave)
2180{
2181	u32 i, v;
2182
2183	/* Set the soft reset bit */
2184	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2185
2186	/* Wait for the bit to clear */
2187	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2188		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2189		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2190			return 0;
2191	}
2192
2193	/* Timeout on the reset */
2194	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2195}
2196
2197/* Configure EMAC */
2198static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2199			    int max_rx_len)
2200{
2201	void __iomem *rx_maxlen_reg;
2202	u32 xgmii_mode;
2203
2204	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2205		max_rx_len = NETCP_MAX_FRAME_SIZE;
2206
2207	/* Enable correct MII mode at SS level */
2208	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
2209	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2210		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2211		xgmii_mode |= (1 << slave->slave_num);
2212		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2213	}
2214
2215	if (IS_SS_ID_MU(gbe_dev))
2216		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2217	else
2218		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2219
2220	writel(max_rx_len, rx_maxlen_reg);
2221	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2222}
2223
2224static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2225			      struct gbe_slave *slave, bool set)
2226{
2227	if (SLAVE_LINK_IS_XGMII(slave))
2228		return;
2229
2230	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2231			    slave->slave_num, set);
2232}
2233
2234static void gbe_slave_stop(struct gbe_intf *intf)
2235{
2236	struct gbe_priv *gbe_dev = intf->gbe_dev;
2237	struct gbe_slave *slave = intf->slave;
2238
2239	gbe_sgmii_rtreset(gbe_dev, slave, true);
2240	gbe_port_reset(slave);
2241	/* Disable forwarding */
2242	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2243			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2244	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2245			   1 << slave->port_num, 0, 0);
2246
2247	if (!slave->phy)
2248		return;
2249
2250	phy_stop(slave->phy);
2251	phy_disconnect(slave->phy);
2252	slave->phy = NULL;
2253}
2254
2255static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2256{
2257	if (SLAVE_LINK_IS_XGMII(slave))
2258		return;
2259
2260	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2261	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2262			   slave->link_interface);
2263}
2264
2265static int gbe_slave_open(struct gbe_intf *gbe_intf)
2266{
2267	struct gbe_priv *priv = gbe_intf->gbe_dev;
2268	struct gbe_slave *slave = gbe_intf->slave;
2269	phy_interface_t phy_mode;
2270	bool has_phy = false;
2271
2272	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2273
2274	gbe_sgmii_config(priv, slave);
2275	gbe_port_reset(slave);
2276	gbe_sgmii_rtreset(priv, slave, false);
2277	gbe_port_config(priv, slave, priv->rx_packet_max);
2278	gbe_set_slave_mac(slave, gbe_intf);
2279	/* enable forwarding */
2280	cpsw_ale_control_set(priv->ale, slave->port_num,
2281			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2282	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2283			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2284
2285	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2286		has_phy = true;
2287		phy_mode = PHY_INTERFACE_MODE_SGMII;
2288		slave->phy_port_t = PORT_MII;
2289	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2290		has_phy = true;
2291		phy_mode = PHY_INTERFACE_MODE_NA;
2292		slave->phy_port_t = PORT_FIBRE;
2293	}
2294
2295	if (has_phy) {
2296		if (priv->ss_version == XGBE_SS_VERSION_10)
2297			hndlr = xgbe_adjust_link;
2298
2299		slave->phy = of_phy_connect(gbe_intf->ndev,
2300					    slave->phy_node,
2301					    hndlr, 0,
2302					    phy_mode);
2303		if (!slave->phy) {
2304			dev_err(priv->dev, "phy not found on slave %d\n",
2305				slave->slave_num);
2306			return -ENODEV;
2307		}
2308		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2309			phydev_name(slave->phy));
2310		phy_start(slave->phy);
 
2311	}
2312	return 0;
2313}
2314
2315static void gbe_init_host_port(struct gbe_priv *priv)
2316{
2317	int bypass_en = 1;
2318
2319	/* Host Tx Pri */
2320	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2321		writel(HOST_TX_PRI_MAP_DEFAULT,
2322		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2323
2324	/* Max length register */
2325	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2326						  rx_maxlen));
2327
2328	cpsw_ale_start(priv->ale);
2329
2330	if (priv->enable_ale)
2331		bypass_en = 0;
2332
2333	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2334
2335	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2336
2337	cpsw_ale_control_set(priv->ale, priv->host_port,
2338			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2339
2340	cpsw_ale_control_set(priv->ale, 0,
2341			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2342			     GBE_PORT_MASK(priv->ale_ports));
2343
2344	cpsw_ale_control_set(priv->ale, 0,
2345			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2346			     GBE_PORT_MASK(priv->ale_ports - 1));
2347
2348	cpsw_ale_control_set(priv->ale, 0,
2349			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2350			     GBE_PORT_MASK(priv->ale_ports));
2351
2352	cpsw_ale_control_set(priv->ale, 0,
2353			     ALE_PORT_UNTAGGED_EGRESS,
2354			     GBE_PORT_MASK(priv->ale_ports));
2355}
2356
2357static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2358{
2359	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2360	u16 vlan_id;
2361
2362	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2363			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2364			   ALE_MCAST_FWD_2);
2365	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2366		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2367				   GBE_PORT_MASK(gbe_dev->ale_ports),
2368				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2369	}
2370}
2371
2372static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2373{
2374	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2375	u16 vlan_id;
2376
2377	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2378
2379	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2380		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2381				   ALE_VLAN, vlan_id);
2382}
2383
2384static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2385{
2386	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2387	u16 vlan_id;
2388
2389	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2390
2391	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2392		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2393	}
2394}
2395
2396static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2397{
2398	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2399	u16 vlan_id;
2400
2401	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2402
2403	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2404		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2405				   ALE_VLAN, vlan_id);
2406	}
2407}
2408
2409static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2410{
2411	struct gbe_intf *gbe_intf = intf_priv;
2412	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2413
2414	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2415		naddr->addr, naddr->type);
2416
2417	switch (naddr->type) {
2418	case ADDR_MCAST:
2419	case ADDR_BCAST:
2420		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2421		break;
2422	case ADDR_UCAST:
2423	case ADDR_DEV:
2424		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2425		break;
2426	case ADDR_ANY:
2427		/* nothing to do for promiscuous */
2428	default:
2429		break;
2430	}
2431
2432	return 0;
2433}
2434
2435static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2436{
2437	struct gbe_intf *gbe_intf = intf_priv;
2438	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2439
2440	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2441		naddr->addr, naddr->type);
2442
2443	switch (naddr->type) {
2444	case ADDR_MCAST:
2445	case ADDR_BCAST:
2446		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2447		break;
2448	case ADDR_UCAST:
2449	case ADDR_DEV:
2450		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2451		break;
2452	case ADDR_ANY:
2453		/* nothing to do for promiscuous */
2454	default:
2455		break;
2456	}
2457
2458	return 0;
2459}
2460
2461static int gbe_add_vid(void *intf_priv, int vid)
2462{
2463	struct gbe_intf *gbe_intf = intf_priv;
2464	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2465
2466	set_bit(vid, gbe_intf->active_vlans);
2467
2468	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2469			  GBE_PORT_MASK(gbe_dev->ale_ports),
2470			  GBE_MASK_NO_PORTS,
2471			  GBE_PORT_MASK(gbe_dev->ale_ports),
2472			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2473
2474	return 0;
2475}
2476
2477static int gbe_del_vid(void *intf_priv, int vid)
2478{
2479	struct gbe_intf *gbe_intf = intf_priv;
2480	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2481
2482	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2483	clear_bit(vid, gbe_intf->active_vlans);
2484	return 0;
2485}
2486
2487#if IS_ENABLED(CONFIG_TI_CPTS)
2488#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2489#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2490
2491static void gbe_txtstamp(void *context, struct sk_buff *skb)
2492{
2493	struct gbe_intf *gbe_intf = context;
2494	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2495
2496	cpts_tx_timestamp(gbe_dev->cpts, skb);
2497}
2498
2499static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2500			      const struct netcp_packet *p_info)
2501{
2502	struct sk_buff *skb = p_info->skb;
2503
2504	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2505}
2506
2507static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2508				 struct netcp_packet *p_info)
2509{
2510	struct phy_device *phydev = p_info->skb->dev->phydev;
2511	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2512
2513	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2514	    !cpts_is_tx_enabled(gbe_dev->cpts))
2515		return 0;
2516
2517	/* If phy has the txtstamp api, assume it will do it.
2518	 * We mark it here because skb_tx_timestamp() is called
2519	 * after all the txhooks are called.
2520	 */
2521	if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2522		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2523		return 0;
2524	}
2525
2526	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2527		p_info->txtstamp = gbe_txtstamp;
2528		p_info->ts_context = (void *)gbe_intf;
2529		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2530	}
2531
2532	return 0;
2533}
2534
2535static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2536{
2537	struct phy_device *phydev = p_info->skb->dev->phydev;
2538	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2539
2540	if (p_info->rxtstamp_complete)
2541		return 0;
2542
2543	if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2544		p_info->rxtstamp_complete = true;
2545		return 0;
2546	}
2547
2548	cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2549	p_info->rxtstamp_complete = true;
2550
2551	return 0;
2552}
2553
2554static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2555{
2556	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2557	struct cpts *cpts = gbe_dev->cpts;
2558	struct hwtstamp_config cfg;
2559
2560	if (!cpts)
2561		return -EOPNOTSUPP;
2562
2563	cfg.flags = 0;
2564	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2565		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2566	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
2567			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
2568
2569	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2570}
2571
2572static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2573{
2574	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2575	struct gbe_slave *slave = gbe_intf->slave;
2576	u32 ts_en, seq_id, ctl;
2577
2578	if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
2579	    !cpts_is_tx_enabled(gbe_dev->cpts)) {
2580		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2581		return;
2582	}
2583
2584	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2585	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2586	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2587		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2588		(slave->ts_ctl.uni ?  TS_UNI_EN :
2589			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2590
2591	if (cpts_is_tx_enabled(gbe_dev->cpts))
2592		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2593
2594	if (cpts_is_rx_enabled(gbe_dev->cpts))
2595		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2596
2597	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2598	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2599	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2600}
2601
2602static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2603{
2604	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2605	struct cpts *cpts = gbe_dev->cpts;
2606	struct hwtstamp_config cfg;
2607
2608	if (!cpts)
2609		return -EOPNOTSUPP;
2610
2611	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2612		return -EFAULT;
2613
2614	/* reserved for future extensions */
2615	if (cfg.flags)
2616		return -EINVAL;
2617
2618	switch (cfg.tx_type) {
2619	case HWTSTAMP_TX_OFF:
2620		cpts_tx_enable(cpts, 0);
2621		break;
2622	case HWTSTAMP_TX_ON:
2623		cpts_tx_enable(cpts, 1);
2624		break;
2625	default:
2626		return -ERANGE;
2627	}
2628
2629	switch (cfg.rx_filter) {
2630	case HWTSTAMP_FILTER_NONE:
2631		cpts_rx_enable(cpts, 0);
2632		break;
2633	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2634	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2635	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2636		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2637		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2638		break;
2639	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2640	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2641	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2642	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2643	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2644	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2645	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2646	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2647	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2648		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
2649		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2650		break;
2651	default:
2652		return -ERANGE;
2653	}
2654
2655	gbe_hwtstamp(gbe_intf);
2656
2657	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2658}
2659
2660static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2661{
2662	if (!gbe_dev->cpts)
2663		return;
2664
2665	if (gbe_dev->cpts_registered > 0)
2666		goto done;
2667
2668	if (cpts_register(gbe_dev->cpts)) {
2669		dev_err(gbe_dev->dev, "error registering cpts device\n");
2670		return;
2671	}
2672
2673done:
2674	++gbe_dev->cpts_registered;
2675}
2676
2677static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2678{
2679	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2680		return;
2681
2682	if (--gbe_dev->cpts_registered)
2683		return;
2684
2685	cpts_unregister(gbe_dev->cpts);
2686}
2687#else
2688static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2689					struct netcp_packet *p_info)
2690{
2691	return 0;
2692}
2693
2694static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2695			       struct netcp_packet *p_info)
2696{
2697	return 0;
2698}
2699
2700static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2701			       struct ifreq *ifr, int cmd)
2702{
2703	return -EOPNOTSUPP;
2704}
2705
2706static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2707{
2708}
2709
2710static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2711{
2712}
2713
2714static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2715{
2716	return -EOPNOTSUPP;
2717}
2718
2719static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2720{
2721	return -EOPNOTSUPP;
2722}
2723#endif /* CONFIG_TI_CPTS */
2724
2725static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2726{
2727	struct gbe_intf *gbe_intf = intf_priv;
2728	struct phy_device *phy = gbe_intf->slave->phy;
2729
2730	if (!phy || !phy->drv->hwtstamp) {
2731		switch (cmd) {
2732		case SIOCGHWTSTAMP:
2733			return gbe_hwtstamp_get(gbe_intf, req);
2734		case SIOCSHWTSTAMP:
2735			return gbe_hwtstamp_set(gbe_intf, req);
2736		}
2737	}
2738
2739	if (phy)
2740		return phy_mii_ioctl(phy, req, cmd);
2741
2742	return -EOPNOTSUPP;
2743}
2744
2745static void netcp_ethss_timer(struct timer_list *t)
2746{
2747	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2748	struct gbe_intf *gbe_intf;
2749	struct gbe_slave *slave;
2750
2751	/* Check & update SGMII link state of interfaces */
2752	for_each_intf(gbe_intf, gbe_dev) {
2753		if (!gbe_intf->slave->open)
2754			continue;
2755		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2756					      gbe_intf->ndev);
2757	}
2758
2759	/* Check & update SGMII link state of secondary ports */
2760	for_each_sec_slave(slave, gbe_dev) {
2761		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2762	}
2763
2764	/* A timer runs as a BH, no need to block them */
2765	spin_lock(&gbe_dev->hw_stats_lock);
2766
2767	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2768		gbe_update_stats_ver14(gbe_dev, NULL);
2769	else
2770		gbe_update_stats(gbe_dev, NULL);
2771
2772	spin_unlock(&gbe_dev->hw_stats_lock);
2773
2774	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2775	add_timer(&gbe_dev->timer);
2776}
2777
2778static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2779{
2780	struct gbe_intf *gbe_intf = data;
2781
2782	p_info->tx_pipe = &gbe_intf->tx_pipe;
2783
2784	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2785}
2786
2787static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2788{
2789	struct gbe_intf *gbe_intf = data;
2790
2791	return gbe_rxtstamp(gbe_intf, p_info);
2792}
2793
2794static int gbe_open(void *intf_priv, struct net_device *ndev)
2795{
2796	struct gbe_intf *gbe_intf = intf_priv;
2797	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2798	struct netcp_intf *netcp = netdev_priv(ndev);
2799	struct gbe_slave *slave = gbe_intf->slave;
2800	int port_num = slave->port_num;
2801	u32 reg, val;
2802	int ret;
2803
2804	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2805	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2806		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2807		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2808
2809	/* For 10G and on NetCP 1.5, use directed to port */
2810	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2811		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2812
2813	if (gbe_dev->enable_ale)
2814		gbe_intf->tx_pipe.switch_to_port = 0;
2815	else
2816		gbe_intf->tx_pipe.switch_to_port = port_num;
2817
2818	dev_dbg(gbe_dev->dev,
2819		"opened TX channel %s: %p with to port %d, flags %d\n",
2820		gbe_intf->tx_pipe.dma_chan_name,
2821		gbe_intf->tx_pipe.dma_channel,
2822		gbe_intf->tx_pipe.switch_to_port,
2823		gbe_intf->tx_pipe.flags);
2824
2825	gbe_slave_stop(gbe_intf);
2826
2827	/* disable priority elevation and enable statistics on all ports */
2828	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2829
2830	/* Control register */
2831	val = GBE_CTL_P0_ENABLE;
2832	if (IS_SS_ID_MU(gbe_dev)) {
2833		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2834		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2835	}
2836	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2837
2838	/* All statistics enabled and STAT AB visible by default */
2839	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2840						    stat_port_en));
2841
2842	ret = gbe_slave_open(gbe_intf);
2843	if (ret)
2844		goto fail;
2845
2846	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2847	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2848
2849	slave->open = true;
2850	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2851
2852	gbe_register_cpts(gbe_dev);
2853
2854	return 0;
2855
2856fail:
2857	gbe_slave_stop(gbe_intf);
2858	return ret;
2859}
2860
2861static int gbe_close(void *intf_priv, struct net_device *ndev)
2862{
2863	struct gbe_intf *gbe_intf = intf_priv;
2864	struct netcp_intf *netcp = netdev_priv(ndev);
2865	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2866
2867	gbe_unregister_cpts(gbe_dev);
2868
2869	gbe_slave_stop(gbe_intf);
2870
2871	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2872	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2873
2874	gbe_intf->slave->open = false;
2875	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2876	return 0;
2877}
2878
2879#if IS_ENABLED(CONFIG_TI_CPTS)
2880static void init_slave_ts_ctl(struct gbe_slave *slave)
2881{
2882	slave->ts_ctl.uni = 1;
2883	slave->ts_ctl.dst_port_map =
2884		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2885	slave->ts_ctl.maddr_map =
2886		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2887}
2888
2889#else
2890static void init_slave_ts_ctl(struct gbe_slave *slave)
2891{
2892}
2893#endif /* CONFIG_TI_CPTS */
2894
2895static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2896		      struct device_node *node)
2897{
2898	int port_reg_num;
2899	u32 port_reg_ofs, emac_reg_ofs;
2900	u32 port_reg_blk_sz, emac_reg_blk_sz;
2901
2902	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2903		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2904		return -EINVAL;
2905	}
2906
2907	if (of_property_read_u32(node, "link-interface",
2908				 &slave->link_interface)) {
2909		dev_warn(gbe_dev->dev,
2910			 "missing link-interface value defaulting to 1G mac-phy link\n");
2911		slave->link_interface = SGMII_LINK_MAC_PHY;
2912	}
2913
2914	slave->open = false;
2915	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2916	    (slave->link_interface == XGMII_LINK_MAC_PHY))
2917		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2918	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2919
2920	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2921		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2922	else
2923		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2924
2925	/* Emac regs memmap are contiguous but port regs are not */
2926	port_reg_num = slave->slave_num;
2927	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2928		if (slave->slave_num > 1) {
2929			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2930			port_reg_num -= 2;
2931		} else {
2932			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2933		}
2934		emac_reg_ofs = GBE13_EMAC_OFFSET;
2935		port_reg_blk_sz = 0x30;
2936		emac_reg_blk_sz = 0x40;
2937	} else if (IS_SS_ID_MU(gbe_dev)) {
2938		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2939		emac_reg_ofs = GBENU_EMAC_OFFSET;
2940		port_reg_blk_sz = 0x1000;
2941		emac_reg_blk_sz = 0x1000;
2942	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2943		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2944		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2945		port_reg_blk_sz = 0x30;
2946		emac_reg_blk_sz = 0x40;
2947	} else {
2948		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2949			gbe_dev->ss_version);
2950		return -EINVAL;
2951	}
2952
2953	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2954				(port_reg_blk_sz * port_reg_num);
2955	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2956				(emac_reg_blk_sz * slave->slave_num);
2957
2958	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2959		/* Initialize  slave port register offsets */
2960		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2961		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2962		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2963		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2964		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2965		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2966		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2967		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2968		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2969
2970		/* Initialize EMAC register offsets */
2971		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2972		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2973		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2974
2975	} else if (IS_SS_ID_MU(gbe_dev)) {
2976		/* Initialize  slave port register offsets */
2977		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2978		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2979		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2980		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2981		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2982		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2983		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2984		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2985		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2986		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2987
2988		/* Initialize EMAC register offsets */
2989		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2990		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2991
2992	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2993		/* Initialize  slave port register offsets */
2994		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2995		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2996		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2997		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2998		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2999		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3000		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3001		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3002		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3003
3004		/* Initialize EMAC register offsets */
3005		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3006		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3007		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3008	}
3009
3010	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3011
3012	init_slave_ts_ctl(slave);
3013	return 0;
3014}
3015
3016static void init_secondary_ports(struct gbe_priv *gbe_dev,
3017				 struct device_node *node)
3018{
3019	struct device *dev = gbe_dev->dev;
3020	phy_interface_t phy_mode;
3021	struct gbe_priv **priv;
3022	struct device_node *port;
3023	struct gbe_slave *slave;
3024	bool mac_phy_link = false;
3025
3026	for_each_child_of_node(node, port) {
3027		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3028		if (!slave) {
3029			dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
 
3030				port->name);
3031			continue;
3032		}
3033
3034		if (init_slave(gbe_dev, slave, port)) {
3035			dev_err(dev,
3036				"Failed to initialize secondary port(%s), skipping...\n",
3037				port->name);
3038			devm_kfree(dev, slave);
3039			continue;
3040		}
3041
3042		gbe_sgmii_config(gbe_dev, slave);
3043		gbe_port_reset(slave);
3044		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3045		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3046		gbe_dev->num_slaves++;
3047		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3048		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3049			mac_phy_link = true;
3050
3051		slave->open = true;
3052		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3053			of_node_put(port);
3054			break;
3055		}
3056	}
3057
3058	/* of_phy_connect() is needed only for MAC-PHY interface */
3059	if (!mac_phy_link)
3060		return;
3061
3062	/* Allocate dummy netdev device for attaching to phy device */
3063	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3064					NET_NAME_UNKNOWN, ether_setup);
3065	if (!gbe_dev->dummy_ndev) {
3066		dev_err(dev,
3067			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3068		return;
3069	}
3070	priv = netdev_priv(gbe_dev->dummy_ndev);
3071	*priv = gbe_dev;
3072
3073	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3074		phy_mode = PHY_INTERFACE_MODE_SGMII;
3075		slave->phy_port_t = PORT_MII;
3076	} else {
3077		phy_mode = PHY_INTERFACE_MODE_NA;
3078		slave->phy_port_t = PORT_FIBRE;
3079	}
3080
3081	for_each_sec_slave(slave, gbe_dev) {
3082		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3083		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3084			continue;
3085		slave->phy =
3086			of_phy_connect(gbe_dev->dummy_ndev,
3087				       slave->phy_node,
3088				       gbe_adjust_link_sec_slaves,
3089				       0, phy_mode);
3090		if (!slave->phy) {
3091			dev_err(dev, "phy not found for slave %d\n",
3092				slave->slave_num);
3093			slave->phy = NULL;
3094		} else {
3095			dev_dbg(dev, "phy found: id is: 0x%s\n",
3096				phydev_name(slave->phy));
3097			phy_start(slave->phy);
 
3098		}
3099	}
3100}
3101
3102static void free_secondary_ports(struct gbe_priv *gbe_dev)
3103{
3104	struct gbe_slave *slave;
3105
3106	while (!list_empty(&gbe_dev->secondary_slaves)) {
3107		slave = first_sec_slave(gbe_dev);
3108
3109		if (slave->phy)
3110			phy_disconnect(slave->phy);
3111		list_del(&slave->slave_list);
3112	}
3113	if (gbe_dev->dummy_ndev)
3114		free_netdev(gbe_dev->dummy_ndev);
3115}
3116
3117static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3118				 struct device_node *node)
3119{
3120	struct resource res;
3121	void __iomem *regs;
3122	int ret, i;
3123
3124	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3125	if (ret) {
3126		dev_err(gbe_dev->dev,
3127			"Can't xlate xgbe of node(%s) ss address at %d\n",
3128			node->name, XGBE_SS_REG_INDEX);
3129		return ret;
3130	}
3131
3132	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3133	if (IS_ERR(regs)) {
3134		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3135		return PTR_ERR(regs);
3136	}
3137	gbe_dev->ss_regs = regs;
3138
3139	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3140	if (ret) {
3141		dev_err(gbe_dev->dev,
3142			"Can't xlate xgbe of node(%s) sm address at %d\n",
3143			node->name, XGBE_SM_REG_INDEX);
3144		return ret;
3145	}
3146
3147	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3148	if (IS_ERR(regs)) {
3149		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3150		return PTR_ERR(regs);
3151	}
3152	gbe_dev->switch_regs = regs;
3153
3154	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3155	if (ret) {
3156		dev_err(gbe_dev->dev,
3157			"Can't xlate xgbe serdes of node(%s) address at %d\n",
3158			node->name, XGBE_SERDES_REG_INDEX);
3159		return ret;
3160	}
3161
3162	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3163	if (IS_ERR(regs)) {
3164		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3165		return PTR_ERR(regs);
3166	}
3167	gbe_dev->xgbe_serdes_regs = regs;
3168
3169	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3170	gbe_dev->et_stats = xgbe10_et_stats;
3171	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3172
3173	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3174					 gbe_dev->num_et_stats * sizeof(u64),
3175					 GFP_KERNEL);
3176	if (!gbe_dev->hw_stats) {
3177		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3178		return -ENOMEM;
3179	}
3180
3181	gbe_dev->hw_stats_prev =
3182		devm_kzalloc(gbe_dev->dev,
3183			     gbe_dev->num_et_stats * sizeof(u32),
3184			     GFP_KERNEL);
3185	if (!gbe_dev->hw_stats_prev) {
3186		dev_err(gbe_dev->dev,
3187			"hw_stats_prev memory allocation failed\n");
3188		return -ENOMEM;
3189	}
3190
3191	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3192	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3193					XGBE10_SGMII_MODULE_OFFSET;
3194	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3195
3196	for (i = 0; i < gbe_dev->max_num_ports; i++)
3197		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3198			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3199
3200	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3201	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3202	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3203	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3204	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3205	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3206
3207	/* Subsystem registers */
3208	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3209	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3210
3211	/* Switch module registers */
3212	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3213	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3214	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3215	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3216	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3217
3218	/* Host port registers */
3219	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3220	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3221	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3222	return 0;
3223}
3224
3225static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3226				    struct device_node *node)
3227{
3228	struct resource res;
3229	void __iomem *regs;
3230	int ret;
3231
3232	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3233	if (ret) {
3234		dev_err(gbe_dev->dev,
3235			"Can't translate of node(%s) of gbe ss address at %d\n",
3236			node->name, GBE_SS_REG_INDEX);
3237		return ret;
3238	}
3239
3240	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3241	if (IS_ERR(regs)) {
3242		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3243		return PTR_ERR(regs);
3244	}
3245	gbe_dev->ss_regs = regs;
3246	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3247	return 0;
3248}
3249
3250static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3251				struct device_node *node)
3252{
3253	struct resource res;
3254	void __iomem *regs;
3255	int i, ret;
3256
3257	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3258	if (ret) {
3259		dev_err(gbe_dev->dev,
3260			"Can't translate of gbe node(%s) address at index %d\n",
3261			node->name, GBE_SGMII34_REG_INDEX);
3262		return ret;
3263	}
3264
3265	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3266	if (IS_ERR(regs)) {
3267		dev_err(gbe_dev->dev,
3268			"Failed to map gbe sgmii port34 register base\n");
3269		return PTR_ERR(regs);
3270	}
3271	gbe_dev->sgmii_port34_regs = regs;
3272
3273	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3274	if (ret) {
3275		dev_err(gbe_dev->dev,
3276			"Can't translate of gbe node(%s) address at index %d\n",
3277			node->name, GBE_SM_REG_INDEX);
3278		return ret;
3279	}
3280
3281	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3282	if (IS_ERR(regs)) {
3283		dev_err(gbe_dev->dev,
3284			"Failed to map gbe switch module register base\n");
3285		return PTR_ERR(regs);
3286	}
3287	gbe_dev->switch_regs = regs;
3288
3289	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3290	gbe_dev->et_stats = gbe13_et_stats;
3291	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3292
3293	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3294					 gbe_dev->num_et_stats * sizeof(u64),
3295					 GFP_KERNEL);
3296	if (!gbe_dev->hw_stats) {
3297		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3298		return -ENOMEM;
3299	}
3300
3301	gbe_dev->hw_stats_prev =
3302		devm_kzalloc(gbe_dev->dev,
3303			     gbe_dev->num_et_stats * sizeof(u32),
3304			     GFP_KERNEL);
3305	if (!gbe_dev->hw_stats_prev) {
3306		dev_err(gbe_dev->dev,
3307			"hw_stats_prev memory allocation failed\n");
3308		return -ENOMEM;
3309	}
3310
3311	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3312	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3313
3314	/* K2HK has only 2 hw stats modules visible at a time, so
3315	 * module 0 & 2 points to one base and
3316	 * module 1 & 3 points to the other base
3317	 */
3318	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3319		gbe_dev->hw_stats_regs[i] =
3320			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3321			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3322	}
3323
3324	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3325	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3326	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3327	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3328	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3329	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3330
3331	/* Subsystem registers */
3332	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3333
3334	/* Switch module registers */
3335	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3336	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3337	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3338	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3339	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3340	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3341
3342	/* Host port registers */
3343	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3344	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3345	return 0;
3346}
3347
3348static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3349				struct device_node *node)
3350{
3351	struct resource res;
3352	void __iomem *regs;
3353	int i, ret;
3354
3355	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3356	gbe_dev->et_stats = gbenu_et_stats;
3357
3358	if (IS_SS_ID_NU(gbe_dev))
3359		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3360			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3361	else
3362		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3363					GBENU_ET_STATS_PORT_SIZE;
3364
3365	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3366					 gbe_dev->num_et_stats * sizeof(u64),
3367					 GFP_KERNEL);
3368	if (!gbe_dev->hw_stats) {
3369		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3370		return -ENOMEM;
3371	}
3372
3373	gbe_dev->hw_stats_prev =
3374		devm_kzalloc(gbe_dev->dev,
3375			     gbe_dev->num_et_stats * sizeof(u32),
3376			     GFP_KERNEL);
3377	if (!gbe_dev->hw_stats_prev) {
3378		dev_err(gbe_dev->dev,
3379			"hw_stats_prev memory allocation failed\n");
3380		return -ENOMEM;
3381	}
3382
3383	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3384	if (ret) {
3385		dev_err(gbe_dev->dev,
3386			"Can't translate of gbenu node(%s) addr at index %d\n",
3387			node->name, GBENU_SM_REG_INDEX);
3388		return ret;
3389	}
3390
3391	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3392	if (IS_ERR(regs)) {
3393		dev_err(gbe_dev->dev,
3394			"Failed to map gbenu switch module register base\n");
3395		return PTR_ERR(regs);
3396	}
3397	gbe_dev->switch_regs = regs;
3398
3399	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3400
3401	/* Although sgmii modules are mem mapped to one contiguous
3402	 * region on GBENU devices, setting sgmii_port34_regs allows
3403	 * consistent code when accessing sgmii api
3404	 */
3405	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3406				     (2 * GBENU_SGMII_MODULE_SIZE);
3407
3408	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3409
3410	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3411		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3412			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3413
3414	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3415	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3416	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3417	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
 
3418	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3419
3420	/* Subsystem registers */
3421	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3422
3423	/* Switch module registers */
3424	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3425	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3426	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3427	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3428
3429	/* Host port registers */
3430	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3431	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3432
3433	/* For NU only.  2U does not need tx_pri_map.
3434	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3435	 * while 2U has only 1 such thread
3436	 */
3437	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3438	return 0;
3439}
3440
3441static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3442		     struct device_node *node, void **inst_priv)
3443{
3444	struct device_node *interfaces, *interface;
3445	struct device_node *secondary_ports;
3446	struct cpsw_ale_params ale_params;
3447	struct gbe_priv *gbe_dev;
3448	u32 slave_num;
3449	int i, ret = 0;
3450
3451	if (!node) {
3452		dev_err(dev, "device tree info unavailable\n");
3453		return -ENODEV;
3454	}
3455
3456	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3457	if (!gbe_dev)
3458		return -ENOMEM;
3459
3460	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3461	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3462		gbe_dev->max_num_slaves = 4;
3463	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3464		gbe_dev->max_num_slaves = 8;
3465	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3466		gbe_dev->max_num_slaves = 1;
3467	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3468		gbe_dev->max_num_slaves = 2;
3469	} else {
3470		dev_err(dev, "device tree node for unknown device\n");
3471		return -EINVAL;
3472	}
3473	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3474
3475	gbe_dev->dev = dev;
3476	gbe_dev->netcp_device = netcp_device;
3477	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3478
3479	/* init the hw stats lock */
3480	spin_lock_init(&gbe_dev->hw_stats_lock);
3481
3482	if (of_find_property(node, "enable-ale", NULL)) {
3483		gbe_dev->enable_ale = true;
3484		dev_info(dev, "ALE enabled\n");
3485	} else {
3486		gbe_dev->enable_ale = false;
3487		dev_dbg(dev, "ALE bypass enabled*\n");
3488	}
3489
3490	ret = of_property_read_u32(node, "tx-queue",
3491				   &gbe_dev->tx_queue_id);
3492	if (ret < 0) {
3493		dev_err(dev, "missing tx_queue parameter\n");
3494		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3495	}
3496
3497	ret = of_property_read_string(node, "tx-channel",
3498				      &gbe_dev->dma_chan_name);
3499	if (ret < 0) {
3500		dev_err(dev, "missing \"tx-channel\" parameter\n");
3501		return -EINVAL;
3502	}
3503
3504	if (!strcmp(node->name, "gbe")) {
3505		ret = get_gbe_resource_version(gbe_dev, node);
3506		if (ret)
3507			return ret;
3508
3509		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3510
3511		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3512			ret = set_gbe_ethss14_priv(gbe_dev, node);
3513		else if (IS_SS_ID_MU(gbe_dev))
3514			ret = set_gbenu_ethss_priv(gbe_dev, node);
3515		else
3516			ret = -ENODEV;
3517
3518	} else if (!strcmp(node->name, "xgbe")) {
3519		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3520		if (ret)
3521			return ret;
3522		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3523					     gbe_dev->ss_regs);
3524	} else {
3525		dev_err(dev, "unknown GBE node(%s)\n", node->name);
3526		ret = -ENODEV;
3527	}
3528
3529	if (ret)
3530		return ret;
3531
3532	interfaces = of_get_child_by_name(node, "interfaces");
3533	if (!interfaces)
3534		dev_err(dev, "could not find interfaces\n");
3535
3536	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3537				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3538	if (ret)
3539		return ret;
3540
3541	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3542	if (ret)
3543		return ret;
3544
3545	/* Create network interfaces */
3546	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3547	for_each_child_of_node(interfaces, interface) {
3548		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3549		if (ret) {
3550			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
3551				interface->name);
3552			continue;
3553		}
3554		gbe_dev->num_slaves++;
3555		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3556			of_node_put(interface);
3557			break;
3558		}
3559	}
3560	of_node_put(interfaces);
3561
3562	if (!gbe_dev->num_slaves)
3563		dev_warn(dev, "No network interface configured\n");
3564
3565	/* Initialize Secondary slave ports */
3566	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3567	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3568	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3569		init_secondary_ports(gbe_dev, secondary_ports);
3570	of_node_put(secondary_ports);
3571
3572	if (!gbe_dev->num_slaves) {
3573		dev_err(dev,
3574			"No network interface or secondary ports configured\n");
3575		ret = -ENODEV;
3576		goto free_sec_ports;
3577	}
3578
3579	memset(&ale_params, 0, sizeof(ale_params));
3580	ale_params.dev		= gbe_dev->dev;
3581	ale_params.ale_regs	= gbe_dev->ale_reg;
3582	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3583	ale_params.ale_entries	= gbe_dev->ale_entries;
3584	ale_params.ale_ports	= gbe_dev->ale_ports;
3585	if (IS_SS_ID_MU(gbe_dev)) {
3586		ale_params.major_ver_mask = 0x7;
3587		ale_params.nu_switch_ale = true;
3588	}
3589	gbe_dev->ale = cpsw_ale_create(&ale_params);
3590	if (!gbe_dev->ale) {
3591		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3592		ret = -ENODEV;
3593		goto free_sec_ports;
3594	} else {
3595		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3596	}
3597
3598	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
3599	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3600		ret = PTR_ERR(gbe_dev->cpts);
3601		goto free_sec_ports;
3602	}
3603
3604	/* initialize host port */
3605	gbe_init_host_port(gbe_dev);
3606
3607	spin_lock_bh(&gbe_dev->hw_stats_lock);
3608	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3609		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3610			gbe_reset_mod_stats_ver14(gbe_dev, i);
3611		else
3612			gbe_reset_mod_stats(gbe_dev, i);
3613	}
3614	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3615
3616	timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
 
 
3617	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3618	add_timer(&gbe_dev->timer);
3619	*inst_priv = gbe_dev;
3620	return 0;
3621
3622free_sec_ports:
3623	free_secondary_ports(gbe_dev);
3624	return ret;
3625}
3626
3627static int gbe_attach(void *inst_priv, struct net_device *ndev,
3628		      struct device_node *node, void **intf_priv)
3629{
3630	struct gbe_priv *gbe_dev = inst_priv;
3631	struct gbe_intf *gbe_intf;
3632	int ret;
3633
3634	if (!node) {
3635		dev_err(gbe_dev->dev, "interface node not available\n");
3636		return -ENODEV;
3637	}
3638
3639	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3640	if (!gbe_intf)
3641		return -ENOMEM;
3642
3643	gbe_intf->ndev = ndev;
3644	gbe_intf->dev = gbe_dev->dev;
3645	gbe_intf->gbe_dev = gbe_dev;
3646
3647	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3648					sizeof(*gbe_intf->slave),
3649					GFP_KERNEL);
3650	if (!gbe_intf->slave) {
3651		ret = -ENOMEM;
3652		goto fail;
3653	}
3654
3655	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3656		ret = -ENODEV;
3657		goto fail;
3658	}
3659
3660	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3661	ndev->ethtool_ops = &keystone_ethtool_ops;
3662	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3663	*intf_priv = gbe_intf;
3664	return 0;
3665
3666fail:
3667	if (gbe_intf->slave)
3668		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3669	if (gbe_intf)
3670		devm_kfree(gbe_dev->dev, gbe_intf);
3671	return ret;
3672}
3673
3674static int gbe_release(void *intf_priv)
3675{
3676	struct gbe_intf *gbe_intf = intf_priv;
3677
3678	gbe_intf->ndev->ethtool_ops = NULL;
3679	list_del(&gbe_intf->gbe_intf_list);
3680	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3681	devm_kfree(gbe_intf->dev, gbe_intf);
3682	return 0;
3683}
3684
3685static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3686{
3687	struct gbe_priv *gbe_dev = inst_priv;
3688
3689	del_timer_sync(&gbe_dev->timer);
3690	cpts_release(gbe_dev->cpts);
3691	cpsw_ale_stop(gbe_dev->ale);
 
3692	netcp_txpipe_close(&gbe_dev->tx_pipe);
3693	free_secondary_ports(gbe_dev);
3694
3695	if (!list_empty(&gbe_dev->gbe_intf_head))
3696		dev_alert(gbe_dev->dev,
3697			  "unreleased ethss interfaces present\n");
3698
3699	return 0;
3700}
3701
3702static struct netcp_module gbe_module = {
3703	.name		= GBE_MODULE_NAME,
3704	.owner		= THIS_MODULE,
3705	.primary	= true,
3706	.probe		= gbe_probe,
3707	.open		= gbe_open,
3708	.close		= gbe_close,
3709	.remove		= gbe_remove,
3710	.attach		= gbe_attach,
3711	.release	= gbe_release,
3712	.add_addr	= gbe_add_addr,
3713	.del_addr	= gbe_del_addr,
3714	.add_vid	= gbe_add_vid,
3715	.del_vid	= gbe_del_vid,
3716	.ioctl		= gbe_ioctl,
3717};
3718
3719static struct netcp_module xgbe_module = {
3720	.name		= XGBE_MODULE_NAME,
3721	.owner		= THIS_MODULE,
3722	.primary	= true,
3723	.probe		= gbe_probe,
3724	.open		= gbe_open,
3725	.close		= gbe_close,
3726	.remove		= gbe_remove,
3727	.attach		= gbe_attach,
3728	.release	= gbe_release,
3729	.add_addr	= gbe_add_addr,
3730	.del_addr	= gbe_del_addr,
3731	.add_vid	= gbe_add_vid,
3732	.del_vid	= gbe_del_vid,
3733	.ioctl		= gbe_ioctl,
3734};
3735
3736static int __init keystone_gbe_init(void)
3737{
3738	int ret;
3739
3740	ret = netcp_register_module(&gbe_module);
3741	if (ret)
3742		return ret;
3743
3744	ret = netcp_register_module(&xgbe_module);
3745	if (ret)
3746		return ret;
3747
3748	return 0;
3749}
3750module_init(keystone_gbe_init);
3751
3752static void __exit keystone_gbe_exit(void)
3753{
3754	netcp_unregister_module(&gbe_module);
3755	netcp_unregister_module(&xgbe_module);
3756}
3757module_exit(keystone_gbe_exit);
3758
3759MODULE_LICENSE("GPL v2");
3760MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3761MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
v4.6
   1/*
   2 * Keystone GBE and XGBE subsystem code
   3 *
   4 * Copyright (C) 2014 Texas Instruments Incorporated
   5 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   6 *		Sandeep Paulraj <s-paulraj@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *		Wingman Kwok <w-kwok2@ti.com>
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License as
  13 * published by the Free Software Foundation version 2.
  14 *
  15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16 * kind, whether express or implied; without even the implied warranty
  17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 */
  20
  21#include <linux/io.h>
  22#include <linux/module.h>
  23#include <linux/of_mdio.h>
  24#include <linux/of_address.h>
  25#include <linux/if_vlan.h>
 
 
  26#include <linux/ethtool.h>
  27
 
  28#include "cpsw_ale.h"
  29#include "netcp.h"
 
  30
  31#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
  32#define NETCP_DRIVER_VERSION		"v1.0"
  33
  34#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
  35#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
  36#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
  37#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
  38
  39/* 1G Ethernet SS defines */
  40#define GBE_MODULE_NAME			"netcp-gbe"
  41#define GBE_SS_VERSION_14		0x4ed21104
  42
  43#define GBE_SS_REG_INDEX		0
  44#define GBE_SGMII34_REG_INDEX		1
  45#define GBE_SM_REG_INDEX		2
  46/* offset relative to base of GBE_SS_REG_INDEX */
  47#define GBE13_SGMII_MODULE_OFFSET	0x100
  48/* offset relative to base of GBE_SM_REG_INDEX */
  49#define GBE13_HOST_PORT_OFFSET		0x34
  50#define GBE13_SLAVE_PORT_OFFSET		0x60
  51#define GBE13_EMAC_OFFSET		0x100
  52#define GBE13_SLAVE_PORT2_OFFSET	0x200
  53#define GBE13_HW_STATS_OFFSET		0x300
 
  54#define GBE13_ALE_OFFSET		0x600
  55#define GBE13_HOST_PORT_NUM		0
  56#define GBE13_NUM_ALE_ENTRIES		1024
  57
  58/* 1G Ethernet NU SS defines */
  59#define GBENU_MODULE_NAME		"netcp-gbenu"
  60#define GBE_SS_ID_NU			0x4ee6
  61#define GBE_SS_ID_2U			0x4ee8
  62
  63#define IS_SS_ID_MU(d) \
  64	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  65	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  66
  67#define IS_SS_ID_NU(d) \
  68	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  69
  70#define GBENU_SS_REG_INDEX		0
  71#define GBENU_SM_REG_INDEX		1
  72#define GBENU_SGMII_MODULE_OFFSET	0x100
  73#define GBENU_HOST_PORT_OFFSET		0x1000
  74#define GBENU_SLAVE_PORT_OFFSET		0x2000
  75#define GBENU_EMAC_OFFSET		0x2330
  76#define GBENU_HW_STATS_OFFSET		0x1a000
 
  77#define GBENU_ALE_OFFSET		0x1e000
  78#define GBENU_HOST_PORT_NUM		0
  79#define GBENU_NUM_ALE_ENTRIES		1024
  80#define GBENU_SGMII_MODULE_SIZE		0x100
  81
  82/* 10G Ethernet SS defines */
  83#define XGBE_MODULE_NAME		"netcp-xgbe"
  84#define XGBE_SS_VERSION_10		0x4ee42100
  85
  86#define XGBE_SS_REG_INDEX		0
  87#define XGBE_SM_REG_INDEX		1
  88#define XGBE_SERDES_REG_INDEX		2
  89
  90/* offset relative to base of XGBE_SS_REG_INDEX */
  91#define XGBE10_SGMII_MODULE_OFFSET	0x100
 
  92/* offset relative to base of XGBE_SM_REG_INDEX */
  93#define XGBE10_HOST_PORT_OFFSET		0x34
  94#define XGBE10_SLAVE_PORT_OFFSET	0x64
  95#define XGBE10_EMAC_OFFSET		0x400
 
  96#define XGBE10_ALE_OFFSET		0x700
  97#define XGBE10_HW_STATS_OFFSET		0x800
  98#define XGBE10_HOST_PORT_NUM		0
  99#define XGBE10_NUM_ALE_ENTRIES		1024
 100
 101#define	GBE_TIMER_INTERVAL			(HZ / 2)
 102
 103/* Soft reset register values */
 104#define SOFT_RESET_MASK				BIT(0)
 105#define SOFT_RESET				BIT(0)
 106#define DEVICE_EMACSL_RESET_POLL_COUNT		100
 107#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
 108
 109#define MACSL_RX_ENABLE_CSF			BIT(23)
 110#define MACSL_ENABLE_EXT_CTL			BIT(18)
 111#define MACSL_XGMII_ENABLE			BIT(13)
 112#define MACSL_XGIG_MODE				BIT(8)
 113#define MACSL_GIG_MODE				BIT(7)
 114#define MACSL_GMII_ENABLE			BIT(5)
 115#define MACSL_FULLDUPLEX			BIT(0)
 116
 117#define GBE_CTL_P0_ENABLE			BIT(2)
 
 118#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
 119#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
 120#define GBE_STATS_CD_SEL			BIT(28)
 121
 122#define GBE_PORT_MASK(x)			(BIT(x) - 1)
 123#define GBE_MASK_NO_PORTS			0
 124
 125#define GBE_DEF_1G_MAC_CONTROL					\
 126		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
 127		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 128
 129#define GBE_DEF_10G_MAC_CONTROL				\
 130		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
 131		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
 132
 133#define GBE_STATSA_MODULE			0
 134#define GBE_STATSB_MODULE			1
 135#define GBE_STATSC_MODULE			2
 136#define GBE_STATSD_MODULE			3
 137
 138#define GBENU_STATS0_MODULE			0
 139#define GBENU_STATS1_MODULE			1
 140#define GBENU_STATS2_MODULE			2
 141#define GBENU_STATS3_MODULE			3
 142#define GBENU_STATS4_MODULE			4
 143#define GBENU_STATS5_MODULE			5
 144#define GBENU_STATS6_MODULE			6
 145#define GBENU_STATS7_MODULE			7
 146#define GBENU_STATS8_MODULE			8
 147
 148#define XGBE_STATS0_MODULE			0
 149#define XGBE_STATS1_MODULE			1
 150#define XGBE_STATS2_MODULE			2
 151
 152/* s: 0-based slave_port */
 153#define SGMII_BASE(d, s) \
 154	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
 155
 156#define GBE_TX_QUEUE				648
 157#define	GBE_TXHOOK_ORDER			0
 
 158#define GBE_DEFAULT_ALE_AGEOUT			30
 159#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
 160#define NETCP_LINK_STATE_INVALID		-1
 161
 162#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 163		offsetof(struct gbe##_##rb, rn)
 164#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 165		offsetof(struct gbenu##_##rb, rn)
 166#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
 167		offsetof(struct xgbe##_##rb, rn)
 168#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
 169
 170#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172struct xgbe_ss_regs {
 173	u32	id_ver;
 174	u32	synce_count;
 175	u32	synce_mux;
 176	u32	control;
 177};
 178
 179struct xgbe_switch_regs {
 180	u32	id_ver;
 181	u32	control;
 182	u32	emcontrol;
 183	u32	stat_port_en;
 184	u32	ptype;
 185	u32	soft_idle;
 186	u32	thru_rate;
 187	u32	gap_thresh;
 188	u32	tx_start_wds;
 189	u32	flow_control;
 190	u32	cppi_thresh;
 191};
 192
 193struct xgbe_port_regs {
 194	u32	blk_cnt;
 195	u32	port_vlan;
 196	u32	tx_pri_map;
 197	u32	sa_lo;
 198	u32	sa_hi;
 199	u32	ts_ctl;
 200	u32	ts_seq_ltype;
 201	u32	ts_vlan;
 202	u32	ts_ctl_ltype2;
 203	u32	ts_ctl2;
 204	u32	control;
 205};
 206
 207struct xgbe_host_port_regs {
 208	u32	blk_cnt;
 209	u32	port_vlan;
 210	u32	tx_pri_map;
 211	u32	src_id;
 212	u32	rx_pri_map;
 213	u32	rx_maxlen;
 214};
 215
 216struct xgbe_emac_regs {
 217	u32	id_ver;
 218	u32	mac_control;
 219	u32	mac_status;
 220	u32	soft_reset;
 221	u32	rx_maxlen;
 222	u32	__reserved_0;
 223	u32	rx_pause;
 224	u32	tx_pause;
 225	u32	em_control;
 226	u32	__reserved_1;
 227	u32	tx_gap;
 228	u32	rsvd[4];
 229};
 230
 231struct xgbe_host_hw_stats {
 232	u32	rx_good_frames;
 233	u32	rx_broadcast_frames;
 234	u32	rx_multicast_frames;
 235	u32	__rsvd_0[3];
 236	u32	rx_oversized_frames;
 237	u32	__rsvd_1;
 238	u32	rx_undersized_frames;
 239	u32	__rsvd_2;
 240	u32	overrun_type4;
 241	u32	overrun_type5;
 242	u32	rx_bytes;
 243	u32	tx_good_frames;
 244	u32	tx_broadcast_frames;
 245	u32	tx_multicast_frames;
 246	u32	__rsvd_3[9];
 247	u32	tx_bytes;
 248	u32	tx_64byte_frames;
 249	u32	tx_65_to_127byte_frames;
 250	u32	tx_128_to_255byte_frames;
 251	u32	tx_256_to_511byte_frames;
 252	u32	tx_512_to_1023byte_frames;
 253	u32	tx_1024byte_frames;
 254	u32	net_bytes;
 255	u32	rx_sof_overruns;
 256	u32	rx_mof_overruns;
 257	u32	rx_dma_overruns;
 258};
 259
 260struct xgbe_hw_stats {
 261	u32	rx_good_frames;
 262	u32	rx_broadcast_frames;
 263	u32	rx_multicast_frames;
 264	u32	rx_pause_frames;
 265	u32	rx_crc_errors;
 266	u32	rx_align_code_errors;
 267	u32	rx_oversized_frames;
 268	u32	rx_jabber_frames;
 269	u32	rx_undersized_frames;
 270	u32	rx_fragments;
 271	u32	overrun_type4;
 272	u32	overrun_type5;
 273	u32	rx_bytes;
 274	u32	tx_good_frames;
 275	u32	tx_broadcast_frames;
 276	u32	tx_multicast_frames;
 277	u32	tx_pause_frames;
 278	u32	tx_deferred_frames;
 279	u32	tx_collision_frames;
 280	u32	tx_single_coll_frames;
 281	u32	tx_mult_coll_frames;
 282	u32	tx_excessive_collisions;
 283	u32	tx_late_collisions;
 284	u32	tx_underrun;
 285	u32	tx_carrier_sense_errors;
 286	u32	tx_bytes;
 287	u32	tx_64byte_frames;
 288	u32	tx_65_to_127byte_frames;
 289	u32	tx_128_to_255byte_frames;
 290	u32	tx_256_to_511byte_frames;
 291	u32	tx_512_to_1023byte_frames;
 292	u32	tx_1024byte_frames;
 293	u32	net_bytes;
 294	u32	rx_sof_overruns;
 295	u32	rx_mof_overruns;
 296	u32	rx_dma_overruns;
 297};
 298
 299struct gbenu_ss_regs {
 300	u32	id_ver;
 301	u32	synce_count;		/* NU */
 302	u32	synce_mux;		/* NU */
 303	u32	control;		/* 2U */
 304	u32	__rsvd_0[2];		/* 2U */
 305	u32	rgmii_status;		/* 2U */
 306	u32	ss_status;		/* 2U */
 307};
 308
 309struct gbenu_switch_regs {
 310	u32	id_ver;
 311	u32	control;
 312	u32	__rsvd_0[2];
 313	u32	emcontrol;
 314	u32	stat_port_en;
 315	u32	ptype;			/* NU */
 316	u32	soft_idle;
 317	u32	thru_rate;		/* NU */
 318	u32	gap_thresh;		/* NU */
 319	u32	tx_start_wds;		/* NU */
 320	u32	eee_prescale;		/* 2U */
 321	u32	tx_g_oflow_thresh_set;	/* NU */
 322	u32	tx_g_oflow_thresh_clr;	/* NU */
 323	u32	tx_g_buf_thresh_set_l;	/* NU */
 324	u32	tx_g_buf_thresh_set_h;	/* NU */
 325	u32	tx_g_buf_thresh_clr_l;	/* NU */
 326	u32	tx_g_buf_thresh_clr_h;	/* NU */
 327};
 328
 329struct gbenu_port_regs {
 330	u32	__rsvd_0;
 331	u32	control;
 332	u32	max_blks;		/* 2U */
 333	u32	mem_align1;
 334	u32	blk_cnt;
 335	u32	port_vlan;
 336	u32	tx_pri_map;		/* NU */
 337	u32	pri_ctl;		/* 2U */
 338	u32	rx_pri_map;
 339	u32	rx_maxlen;
 340	u32	tx_blks_pri;		/* NU */
 341	u32	__rsvd_1;
 342	u32	idle2lpi;		/* 2U */
 343	u32	lpi2idle;		/* 2U */
 344	u32	eee_status;		/* 2U */
 345	u32	__rsvd_2;
 346	u32	__rsvd_3[176];		/* NU: more to add */
 347	u32	__rsvd_4[2];
 348	u32	sa_lo;
 349	u32	sa_hi;
 350	u32	ts_ctl;
 351	u32	ts_seq_ltype;
 352	u32	ts_vlan;
 353	u32	ts_ctl_ltype2;
 354	u32	ts_ctl2;
 355};
 356
 357struct gbenu_host_port_regs {
 358	u32	__rsvd_0;
 359	u32	control;
 360	u32	flow_id_offset;		/* 2U */
 361	u32	__rsvd_1;
 362	u32	blk_cnt;
 363	u32	port_vlan;
 364	u32	tx_pri_map;		/* NU */
 365	u32	pri_ctl;
 366	u32	rx_pri_map;
 367	u32	rx_maxlen;
 368	u32	tx_blks_pri;		/* NU */
 369	u32	__rsvd_2;
 370	u32	idle2lpi;		/* 2U */
 371	u32	lpi2wake;		/* 2U */
 372	u32	eee_status;		/* 2U */
 373	u32	__rsvd_3;
 374	u32	__rsvd_4[184];		/* NU */
 375	u32	host_blks_pri;		/* NU */
 376};
 377
 378struct gbenu_emac_regs {
 379	u32	mac_control;
 380	u32	mac_status;
 381	u32	soft_reset;
 382	u32	boff_test;
 383	u32	rx_pause;
 384	u32	__rsvd_0[11];		/* NU */
 385	u32	tx_pause;
 386	u32	__rsvd_1[11];		/* NU */
 387	u32	em_control;
 388	u32	tx_gap;
 389};
 390
 391/* Some hw stat regs are applicable to slave port only.
 392 * This is handled by gbenu_et_stats struct.  Also some
 393 * are for SS version NU and some are for 2U.
 394 */
 395struct gbenu_hw_stats {
 396	u32	rx_good_frames;
 397	u32	rx_broadcast_frames;
 398	u32	rx_multicast_frames;
 399	u32	rx_pause_frames;		/* slave */
 400	u32	rx_crc_errors;
 401	u32	rx_align_code_errors;		/* slave */
 402	u32	rx_oversized_frames;
 403	u32	rx_jabber_frames;		/* slave */
 404	u32	rx_undersized_frames;
 405	u32	rx_fragments;			/* slave */
 406	u32	ale_drop;
 407	u32	ale_overrun_drop;
 408	u32	rx_bytes;
 409	u32	tx_good_frames;
 410	u32	tx_broadcast_frames;
 411	u32	tx_multicast_frames;
 412	u32	tx_pause_frames;		/* slave */
 413	u32	tx_deferred_frames;		/* slave */
 414	u32	tx_collision_frames;		/* slave */
 415	u32	tx_single_coll_frames;		/* slave */
 416	u32	tx_mult_coll_frames;		/* slave */
 417	u32	tx_excessive_collisions;	/* slave */
 418	u32	tx_late_collisions;		/* slave */
 419	u32	rx_ipg_error;			/* slave 10G only */
 420	u32	tx_carrier_sense_errors;	/* slave */
 421	u32	tx_bytes;
 422	u32	tx_64B_frames;
 423	u32	tx_65_to_127B_frames;
 424	u32	tx_128_to_255B_frames;
 425	u32	tx_256_to_511B_frames;
 426	u32	tx_512_to_1023B_frames;
 427	u32	tx_1024B_frames;
 428	u32	net_bytes;
 429	u32	rx_bottom_fifo_drop;
 430	u32	rx_port_mask_drop;
 431	u32	rx_top_fifo_drop;
 432	u32	ale_rate_limit_drop;
 433	u32	ale_vid_ingress_drop;
 434	u32	ale_da_eq_sa_drop;
 435	u32	__rsvd_0[3];
 436	u32	ale_unknown_ucast;
 437	u32	ale_unknown_ucast_bytes;
 438	u32	ale_unknown_mcast;
 439	u32	ale_unknown_mcast_bytes;
 440	u32	ale_unknown_bcast;
 441	u32	ale_unknown_bcast_bytes;
 442	u32	ale_pol_match;
 443	u32	ale_pol_match_red;		/* NU */
 444	u32	ale_pol_match_yellow;		/* NU */
 445	u32	__rsvd_1[44];
 446	u32	tx_mem_protect_err;
 447	/* following NU only */
 448	u32	tx_pri0;
 449	u32	tx_pri1;
 450	u32	tx_pri2;
 451	u32	tx_pri3;
 452	u32	tx_pri4;
 453	u32	tx_pri5;
 454	u32	tx_pri6;
 455	u32	tx_pri7;
 456	u32	tx_pri0_bcnt;
 457	u32	tx_pri1_bcnt;
 458	u32	tx_pri2_bcnt;
 459	u32	tx_pri3_bcnt;
 460	u32	tx_pri4_bcnt;
 461	u32	tx_pri5_bcnt;
 462	u32	tx_pri6_bcnt;
 463	u32	tx_pri7_bcnt;
 464	u32	tx_pri0_drop;
 465	u32	tx_pri1_drop;
 466	u32	tx_pri2_drop;
 467	u32	tx_pri3_drop;
 468	u32	tx_pri4_drop;
 469	u32	tx_pri5_drop;
 470	u32	tx_pri6_drop;
 471	u32	tx_pri7_drop;
 472	u32	tx_pri0_drop_bcnt;
 473	u32	tx_pri1_drop_bcnt;
 474	u32	tx_pri2_drop_bcnt;
 475	u32	tx_pri3_drop_bcnt;
 476	u32	tx_pri4_drop_bcnt;
 477	u32	tx_pri5_drop_bcnt;
 478	u32	tx_pri6_drop_bcnt;
 479	u32	tx_pri7_drop_bcnt;
 480};
 481
 482#define GBENU_HW_STATS_REG_MAP_SZ	0x200
 483
 484struct gbe_ss_regs {
 485	u32	id_ver;
 486	u32	synce_count;
 487	u32	synce_mux;
 488};
 489
 490struct gbe_ss_regs_ofs {
 491	u16	id_ver;
 492	u16	control;
 493};
 494
 495struct gbe_switch_regs {
 496	u32	id_ver;
 497	u32	control;
 498	u32	soft_reset;
 499	u32	stat_port_en;
 500	u32	ptype;
 501	u32	soft_idle;
 502	u32	thru_rate;
 503	u32	gap_thresh;
 504	u32	tx_start_wds;
 505	u32	flow_control;
 506};
 507
 508struct gbe_switch_regs_ofs {
 509	u16	id_ver;
 510	u16	control;
 511	u16	soft_reset;
 512	u16	emcontrol;
 513	u16	stat_port_en;
 514	u16	ptype;
 515	u16	flow_control;
 516};
 517
 518struct gbe_port_regs {
 519	u32	max_blks;
 520	u32	blk_cnt;
 521	u32	port_vlan;
 522	u32	tx_pri_map;
 523	u32	sa_lo;
 524	u32	sa_hi;
 525	u32	ts_ctl;
 526	u32	ts_seq_ltype;
 527	u32	ts_vlan;
 528	u32	ts_ctl_ltype2;
 529	u32	ts_ctl2;
 530};
 531
 532struct gbe_port_regs_ofs {
 533	u16	port_vlan;
 534	u16	tx_pri_map;
 535	u16	sa_lo;
 536	u16	sa_hi;
 537	u16	ts_ctl;
 538	u16	ts_seq_ltype;
 539	u16	ts_vlan;
 540	u16	ts_ctl_ltype2;
 541	u16	ts_ctl2;
 542	u16	rx_maxlen;	/* 2U, NU */
 543};
 544
 545struct gbe_host_port_regs {
 546	u32	src_id;
 547	u32	port_vlan;
 548	u32	rx_pri_map;
 549	u32	rx_maxlen;
 550};
 551
 552struct gbe_host_port_regs_ofs {
 553	u16	port_vlan;
 554	u16	tx_pri_map;
 555	u16	rx_maxlen;
 556};
 557
 558struct gbe_emac_regs {
 559	u32	id_ver;
 560	u32	mac_control;
 561	u32	mac_status;
 562	u32	soft_reset;
 563	u32	rx_maxlen;
 564	u32	__reserved_0;
 565	u32	rx_pause;
 566	u32	tx_pause;
 567	u32	__reserved_1;
 568	u32	rx_pri_map;
 569	u32	rsvd[6];
 570};
 571
 572struct gbe_emac_regs_ofs {
 573	u16	mac_control;
 574	u16	soft_reset;
 575	u16	rx_maxlen;
 576};
 577
 578struct gbe_hw_stats {
 579	u32	rx_good_frames;
 580	u32	rx_broadcast_frames;
 581	u32	rx_multicast_frames;
 582	u32	rx_pause_frames;
 583	u32	rx_crc_errors;
 584	u32	rx_align_code_errors;
 585	u32	rx_oversized_frames;
 586	u32	rx_jabber_frames;
 587	u32	rx_undersized_frames;
 588	u32	rx_fragments;
 589	u32	__pad_0[2];
 590	u32	rx_bytes;
 591	u32	tx_good_frames;
 592	u32	tx_broadcast_frames;
 593	u32	tx_multicast_frames;
 594	u32	tx_pause_frames;
 595	u32	tx_deferred_frames;
 596	u32	tx_collision_frames;
 597	u32	tx_single_coll_frames;
 598	u32	tx_mult_coll_frames;
 599	u32	tx_excessive_collisions;
 600	u32	tx_late_collisions;
 601	u32	tx_underrun;
 602	u32	tx_carrier_sense_errors;
 603	u32	tx_bytes;
 604	u32	tx_64byte_frames;
 605	u32	tx_65_to_127byte_frames;
 606	u32	tx_128_to_255byte_frames;
 607	u32	tx_256_to_511byte_frames;
 608	u32	tx_512_to_1023byte_frames;
 609	u32	tx_1024byte_frames;
 610	u32	net_bytes;
 611	u32	rx_sof_overruns;
 612	u32	rx_mof_overruns;
 613	u32	rx_dma_overruns;
 614};
 615
 616#define GBE_MAX_HW_STAT_MODS			9
 617#define GBE_HW_STATS_REG_MAP_SZ			0x100
 618
 
 
 
 
 
 
 
 619struct gbe_slave {
 620	void __iomem			*port_regs;
 621	void __iomem			*emac_regs;
 622	struct gbe_port_regs_ofs	port_regs_ofs;
 623	struct gbe_emac_regs_ofs	emac_regs_ofs;
 624	int				slave_num; /* 0 based logical number */
 625	int				port_num;  /* actual port number */
 626	atomic_t			link_state;
 627	bool				open;
 628	struct phy_device		*phy;
 629	u32				link_interface;
 630	u32				mac_control;
 631	u8				phy_port_t;
 632	struct device_node		*phy_node;
 
 633	struct list_head		slave_list;
 634};
 635
 636struct gbe_priv {
 637	struct device			*dev;
 638	struct netcp_device		*netcp_device;
 639	struct timer_list		timer;
 640	u32				num_slaves;
 641	u32				ale_entries;
 642	u32				ale_ports;
 643	bool				enable_ale;
 644	u8				max_num_slaves;
 645	u8				max_num_ports; /* max_num_slaves + 1 */
 646	u8				num_stats_mods;
 647	struct netcp_tx_pipe		tx_pipe;
 648
 649	int				host_port;
 650	u32				rx_packet_max;
 651	u32				ss_version;
 652	u32				stats_en_mask;
 653
 654	void __iomem			*ss_regs;
 655	void __iomem			*switch_regs;
 656	void __iomem			*host_port_regs;
 657	void __iomem			*ale_reg;
 
 658	void __iomem			*sgmii_port_regs;
 659	void __iomem			*sgmii_port34_regs;
 660	void __iomem			*xgbe_serdes_regs;
 661	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
 662
 663	struct gbe_ss_regs_ofs		ss_regs_ofs;
 664	struct gbe_switch_regs_ofs	switch_regs_ofs;
 665	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
 666
 667	struct cpsw_ale			*ale;
 668	unsigned int			tx_queue_id;
 669	const char			*dma_chan_name;
 670
 671	struct list_head		gbe_intf_head;
 672	struct list_head		secondary_slaves;
 673	struct net_device		*dummy_ndev;
 674
 675	u64				*hw_stats;
 676	u32				*hw_stats_prev;
 677	const struct netcp_ethtool_stat *et_stats;
 678	int				num_et_stats;
 679	/*  Lock for updating the hwstats */
 680	spinlock_t			hw_stats_lock;
 
 
 
 681};
 682
 683struct gbe_intf {
 684	struct net_device	*ndev;
 685	struct device		*dev;
 686	struct gbe_priv		*gbe_dev;
 687	struct netcp_tx_pipe	tx_pipe;
 688	struct gbe_slave	*slave;
 689	struct list_head	gbe_intf_list;
 690	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 691};
 692
 693static struct netcp_module gbe_module;
 694static struct netcp_module xgbe_module;
 695
 696/* Statistic management */
 697struct netcp_ethtool_stat {
 698	char desc[ETH_GSTRING_LEN];
 699	int type;
 700	u32 size;
 701	int offset;
 702};
 703
 704#define GBE_STATSA_INFO(field)						\
 705{									\
 706	"GBE_A:"#field, GBE_STATSA_MODULE,				\
 707	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 708	offsetof(struct gbe_hw_stats, field)				\
 709}
 710
 711#define GBE_STATSB_INFO(field)						\
 712{									\
 713	"GBE_B:"#field, GBE_STATSB_MODULE,				\
 714	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 715	offsetof(struct gbe_hw_stats, field)				\
 716}
 717
 718#define GBE_STATSC_INFO(field)						\
 719{									\
 720	"GBE_C:"#field, GBE_STATSC_MODULE,				\
 721	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 722	offsetof(struct gbe_hw_stats, field)				\
 723}
 724
 725#define GBE_STATSD_INFO(field)						\
 726{									\
 727	"GBE_D:"#field, GBE_STATSD_MODULE,				\
 728	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
 729	offsetof(struct gbe_hw_stats, field)				\
 730}
 731
 732static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 733	/* GBE module A */
 734	GBE_STATSA_INFO(rx_good_frames),
 735	GBE_STATSA_INFO(rx_broadcast_frames),
 736	GBE_STATSA_INFO(rx_multicast_frames),
 737	GBE_STATSA_INFO(rx_pause_frames),
 738	GBE_STATSA_INFO(rx_crc_errors),
 739	GBE_STATSA_INFO(rx_align_code_errors),
 740	GBE_STATSA_INFO(rx_oversized_frames),
 741	GBE_STATSA_INFO(rx_jabber_frames),
 742	GBE_STATSA_INFO(rx_undersized_frames),
 743	GBE_STATSA_INFO(rx_fragments),
 744	GBE_STATSA_INFO(rx_bytes),
 745	GBE_STATSA_INFO(tx_good_frames),
 746	GBE_STATSA_INFO(tx_broadcast_frames),
 747	GBE_STATSA_INFO(tx_multicast_frames),
 748	GBE_STATSA_INFO(tx_pause_frames),
 749	GBE_STATSA_INFO(tx_deferred_frames),
 750	GBE_STATSA_INFO(tx_collision_frames),
 751	GBE_STATSA_INFO(tx_single_coll_frames),
 752	GBE_STATSA_INFO(tx_mult_coll_frames),
 753	GBE_STATSA_INFO(tx_excessive_collisions),
 754	GBE_STATSA_INFO(tx_late_collisions),
 755	GBE_STATSA_INFO(tx_underrun),
 756	GBE_STATSA_INFO(tx_carrier_sense_errors),
 757	GBE_STATSA_INFO(tx_bytes),
 758	GBE_STATSA_INFO(tx_64byte_frames),
 759	GBE_STATSA_INFO(tx_65_to_127byte_frames),
 760	GBE_STATSA_INFO(tx_128_to_255byte_frames),
 761	GBE_STATSA_INFO(tx_256_to_511byte_frames),
 762	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
 763	GBE_STATSA_INFO(tx_1024byte_frames),
 764	GBE_STATSA_INFO(net_bytes),
 765	GBE_STATSA_INFO(rx_sof_overruns),
 766	GBE_STATSA_INFO(rx_mof_overruns),
 767	GBE_STATSA_INFO(rx_dma_overruns),
 768	/* GBE module B */
 769	GBE_STATSB_INFO(rx_good_frames),
 770	GBE_STATSB_INFO(rx_broadcast_frames),
 771	GBE_STATSB_INFO(rx_multicast_frames),
 772	GBE_STATSB_INFO(rx_pause_frames),
 773	GBE_STATSB_INFO(rx_crc_errors),
 774	GBE_STATSB_INFO(rx_align_code_errors),
 775	GBE_STATSB_INFO(rx_oversized_frames),
 776	GBE_STATSB_INFO(rx_jabber_frames),
 777	GBE_STATSB_INFO(rx_undersized_frames),
 778	GBE_STATSB_INFO(rx_fragments),
 779	GBE_STATSB_INFO(rx_bytes),
 780	GBE_STATSB_INFO(tx_good_frames),
 781	GBE_STATSB_INFO(tx_broadcast_frames),
 782	GBE_STATSB_INFO(tx_multicast_frames),
 783	GBE_STATSB_INFO(tx_pause_frames),
 784	GBE_STATSB_INFO(tx_deferred_frames),
 785	GBE_STATSB_INFO(tx_collision_frames),
 786	GBE_STATSB_INFO(tx_single_coll_frames),
 787	GBE_STATSB_INFO(tx_mult_coll_frames),
 788	GBE_STATSB_INFO(tx_excessive_collisions),
 789	GBE_STATSB_INFO(tx_late_collisions),
 790	GBE_STATSB_INFO(tx_underrun),
 791	GBE_STATSB_INFO(tx_carrier_sense_errors),
 792	GBE_STATSB_INFO(tx_bytes),
 793	GBE_STATSB_INFO(tx_64byte_frames),
 794	GBE_STATSB_INFO(tx_65_to_127byte_frames),
 795	GBE_STATSB_INFO(tx_128_to_255byte_frames),
 796	GBE_STATSB_INFO(tx_256_to_511byte_frames),
 797	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
 798	GBE_STATSB_INFO(tx_1024byte_frames),
 799	GBE_STATSB_INFO(net_bytes),
 800	GBE_STATSB_INFO(rx_sof_overruns),
 801	GBE_STATSB_INFO(rx_mof_overruns),
 802	GBE_STATSB_INFO(rx_dma_overruns),
 803	/* GBE module C */
 804	GBE_STATSC_INFO(rx_good_frames),
 805	GBE_STATSC_INFO(rx_broadcast_frames),
 806	GBE_STATSC_INFO(rx_multicast_frames),
 807	GBE_STATSC_INFO(rx_pause_frames),
 808	GBE_STATSC_INFO(rx_crc_errors),
 809	GBE_STATSC_INFO(rx_align_code_errors),
 810	GBE_STATSC_INFO(rx_oversized_frames),
 811	GBE_STATSC_INFO(rx_jabber_frames),
 812	GBE_STATSC_INFO(rx_undersized_frames),
 813	GBE_STATSC_INFO(rx_fragments),
 814	GBE_STATSC_INFO(rx_bytes),
 815	GBE_STATSC_INFO(tx_good_frames),
 816	GBE_STATSC_INFO(tx_broadcast_frames),
 817	GBE_STATSC_INFO(tx_multicast_frames),
 818	GBE_STATSC_INFO(tx_pause_frames),
 819	GBE_STATSC_INFO(tx_deferred_frames),
 820	GBE_STATSC_INFO(tx_collision_frames),
 821	GBE_STATSC_INFO(tx_single_coll_frames),
 822	GBE_STATSC_INFO(tx_mult_coll_frames),
 823	GBE_STATSC_INFO(tx_excessive_collisions),
 824	GBE_STATSC_INFO(tx_late_collisions),
 825	GBE_STATSC_INFO(tx_underrun),
 826	GBE_STATSC_INFO(tx_carrier_sense_errors),
 827	GBE_STATSC_INFO(tx_bytes),
 828	GBE_STATSC_INFO(tx_64byte_frames),
 829	GBE_STATSC_INFO(tx_65_to_127byte_frames),
 830	GBE_STATSC_INFO(tx_128_to_255byte_frames),
 831	GBE_STATSC_INFO(tx_256_to_511byte_frames),
 832	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
 833	GBE_STATSC_INFO(tx_1024byte_frames),
 834	GBE_STATSC_INFO(net_bytes),
 835	GBE_STATSC_INFO(rx_sof_overruns),
 836	GBE_STATSC_INFO(rx_mof_overruns),
 837	GBE_STATSC_INFO(rx_dma_overruns),
 838	/* GBE module D */
 839	GBE_STATSD_INFO(rx_good_frames),
 840	GBE_STATSD_INFO(rx_broadcast_frames),
 841	GBE_STATSD_INFO(rx_multicast_frames),
 842	GBE_STATSD_INFO(rx_pause_frames),
 843	GBE_STATSD_INFO(rx_crc_errors),
 844	GBE_STATSD_INFO(rx_align_code_errors),
 845	GBE_STATSD_INFO(rx_oversized_frames),
 846	GBE_STATSD_INFO(rx_jabber_frames),
 847	GBE_STATSD_INFO(rx_undersized_frames),
 848	GBE_STATSD_INFO(rx_fragments),
 849	GBE_STATSD_INFO(rx_bytes),
 850	GBE_STATSD_INFO(tx_good_frames),
 851	GBE_STATSD_INFO(tx_broadcast_frames),
 852	GBE_STATSD_INFO(tx_multicast_frames),
 853	GBE_STATSD_INFO(tx_pause_frames),
 854	GBE_STATSD_INFO(tx_deferred_frames),
 855	GBE_STATSD_INFO(tx_collision_frames),
 856	GBE_STATSD_INFO(tx_single_coll_frames),
 857	GBE_STATSD_INFO(tx_mult_coll_frames),
 858	GBE_STATSD_INFO(tx_excessive_collisions),
 859	GBE_STATSD_INFO(tx_late_collisions),
 860	GBE_STATSD_INFO(tx_underrun),
 861	GBE_STATSD_INFO(tx_carrier_sense_errors),
 862	GBE_STATSD_INFO(tx_bytes),
 863	GBE_STATSD_INFO(tx_64byte_frames),
 864	GBE_STATSD_INFO(tx_65_to_127byte_frames),
 865	GBE_STATSD_INFO(tx_128_to_255byte_frames),
 866	GBE_STATSD_INFO(tx_256_to_511byte_frames),
 867	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
 868	GBE_STATSD_INFO(tx_1024byte_frames),
 869	GBE_STATSD_INFO(net_bytes),
 870	GBE_STATSD_INFO(rx_sof_overruns),
 871	GBE_STATSD_INFO(rx_mof_overruns),
 872	GBE_STATSD_INFO(rx_dma_overruns),
 873};
 874
 875/* This is the size of entries in GBENU_STATS_HOST */
 876#define GBENU_ET_STATS_HOST_SIZE	52
 877
 878#define GBENU_STATS_HOST(field)					\
 879{								\
 880	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
 881	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 882	offsetof(struct gbenu_hw_stats, field)			\
 883}
 884
 885/* This is the size of entries in GBENU_STATS_PORT */
 886#define GBENU_ET_STATS_PORT_SIZE	65
 887
 888#define GBENU_STATS_P1(field)					\
 889{								\
 890	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
 891	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 892	offsetof(struct gbenu_hw_stats, field)			\
 893}
 894
 895#define GBENU_STATS_P2(field)					\
 896{								\
 897	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
 898	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 899	offsetof(struct gbenu_hw_stats, field)			\
 900}
 901
 902#define GBENU_STATS_P3(field)					\
 903{								\
 904	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
 905	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 906	offsetof(struct gbenu_hw_stats, field)			\
 907}
 908
 909#define GBENU_STATS_P4(field)					\
 910{								\
 911	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
 912	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 913	offsetof(struct gbenu_hw_stats, field)			\
 914}
 915
 916#define GBENU_STATS_P5(field)					\
 917{								\
 918	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
 919	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 920	offsetof(struct gbenu_hw_stats, field)			\
 921}
 922
 923#define GBENU_STATS_P6(field)					\
 924{								\
 925	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
 926	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 927	offsetof(struct gbenu_hw_stats, field)			\
 928}
 929
 930#define GBENU_STATS_P7(field)					\
 931{								\
 932	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
 933	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 934	offsetof(struct gbenu_hw_stats, field)			\
 935}
 936
 937#define GBENU_STATS_P8(field)					\
 938{								\
 939	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
 940	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
 941	offsetof(struct gbenu_hw_stats, field)			\
 942}
 943
 944static const struct netcp_ethtool_stat gbenu_et_stats[] = {
 945	/* GBENU Host Module */
 946	GBENU_STATS_HOST(rx_good_frames),
 947	GBENU_STATS_HOST(rx_broadcast_frames),
 948	GBENU_STATS_HOST(rx_multicast_frames),
 949	GBENU_STATS_HOST(rx_crc_errors),
 950	GBENU_STATS_HOST(rx_oversized_frames),
 951	GBENU_STATS_HOST(rx_undersized_frames),
 952	GBENU_STATS_HOST(ale_drop),
 953	GBENU_STATS_HOST(ale_overrun_drop),
 954	GBENU_STATS_HOST(rx_bytes),
 955	GBENU_STATS_HOST(tx_good_frames),
 956	GBENU_STATS_HOST(tx_broadcast_frames),
 957	GBENU_STATS_HOST(tx_multicast_frames),
 958	GBENU_STATS_HOST(tx_bytes),
 959	GBENU_STATS_HOST(tx_64B_frames),
 960	GBENU_STATS_HOST(tx_65_to_127B_frames),
 961	GBENU_STATS_HOST(tx_128_to_255B_frames),
 962	GBENU_STATS_HOST(tx_256_to_511B_frames),
 963	GBENU_STATS_HOST(tx_512_to_1023B_frames),
 964	GBENU_STATS_HOST(tx_1024B_frames),
 965	GBENU_STATS_HOST(net_bytes),
 966	GBENU_STATS_HOST(rx_bottom_fifo_drop),
 967	GBENU_STATS_HOST(rx_port_mask_drop),
 968	GBENU_STATS_HOST(rx_top_fifo_drop),
 969	GBENU_STATS_HOST(ale_rate_limit_drop),
 970	GBENU_STATS_HOST(ale_vid_ingress_drop),
 971	GBENU_STATS_HOST(ale_da_eq_sa_drop),
 972	GBENU_STATS_HOST(ale_unknown_ucast),
 973	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
 974	GBENU_STATS_HOST(ale_unknown_mcast),
 975	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
 976	GBENU_STATS_HOST(ale_unknown_bcast),
 977	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
 978	GBENU_STATS_HOST(ale_pol_match),
 979	GBENU_STATS_HOST(ale_pol_match_red),
 980	GBENU_STATS_HOST(ale_pol_match_yellow),
 981	GBENU_STATS_HOST(tx_mem_protect_err),
 982	GBENU_STATS_HOST(tx_pri0_drop),
 983	GBENU_STATS_HOST(tx_pri1_drop),
 984	GBENU_STATS_HOST(tx_pri2_drop),
 985	GBENU_STATS_HOST(tx_pri3_drop),
 986	GBENU_STATS_HOST(tx_pri4_drop),
 987	GBENU_STATS_HOST(tx_pri5_drop),
 988	GBENU_STATS_HOST(tx_pri6_drop),
 989	GBENU_STATS_HOST(tx_pri7_drop),
 990	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
 991	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
 992	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
 993	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
 994	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
 995	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
 996	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
 997	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
 998	/* GBENU Module 1 */
 999	GBENU_STATS_P1(rx_good_frames),
1000	GBENU_STATS_P1(rx_broadcast_frames),
1001	GBENU_STATS_P1(rx_multicast_frames),
1002	GBENU_STATS_P1(rx_pause_frames),
1003	GBENU_STATS_P1(rx_crc_errors),
1004	GBENU_STATS_P1(rx_align_code_errors),
1005	GBENU_STATS_P1(rx_oversized_frames),
1006	GBENU_STATS_P1(rx_jabber_frames),
1007	GBENU_STATS_P1(rx_undersized_frames),
1008	GBENU_STATS_P1(rx_fragments),
1009	GBENU_STATS_P1(ale_drop),
1010	GBENU_STATS_P1(ale_overrun_drop),
1011	GBENU_STATS_P1(rx_bytes),
1012	GBENU_STATS_P1(tx_good_frames),
1013	GBENU_STATS_P1(tx_broadcast_frames),
1014	GBENU_STATS_P1(tx_multicast_frames),
1015	GBENU_STATS_P1(tx_pause_frames),
1016	GBENU_STATS_P1(tx_deferred_frames),
1017	GBENU_STATS_P1(tx_collision_frames),
1018	GBENU_STATS_P1(tx_single_coll_frames),
1019	GBENU_STATS_P1(tx_mult_coll_frames),
1020	GBENU_STATS_P1(tx_excessive_collisions),
1021	GBENU_STATS_P1(tx_late_collisions),
1022	GBENU_STATS_P1(rx_ipg_error),
1023	GBENU_STATS_P1(tx_carrier_sense_errors),
1024	GBENU_STATS_P1(tx_bytes),
1025	GBENU_STATS_P1(tx_64B_frames),
1026	GBENU_STATS_P1(tx_65_to_127B_frames),
1027	GBENU_STATS_P1(tx_128_to_255B_frames),
1028	GBENU_STATS_P1(tx_256_to_511B_frames),
1029	GBENU_STATS_P1(tx_512_to_1023B_frames),
1030	GBENU_STATS_P1(tx_1024B_frames),
1031	GBENU_STATS_P1(net_bytes),
1032	GBENU_STATS_P1(rx_bottom_fifo_drop),
1033	GBENU_STATS_P1(rx_port_mask_drop),
1034	GBENU_STATS_P1(rx_top_fifo_drop),
1035	GBENU_STATS_P1(ale_rate_limit_drop),
1036	GBENU_STATS_P1(ale_vid_ingress_drop),
1037	GBENU_STATS_P1(ale_da_eq_sa_drop),
1038	GBENU_STATS_P1(ale_unknown_ucast),
1039	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1040	GBENU_STATS_P1(ale_unknown_mcast),
1041	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1042	GBENU_STATS_P1(ale_unknown_bcast),
1043	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1044	GBENU_STATS_P1(ale_pol_match),
1045	GBENU_STATS_P1(ale_pol_match_red),
1046	GBENU_STATS_P1(ale_pol_match_yellow),
1047	GBENU_STATS_P1(tx_mem_protect_err),
1048	GBENU_STATS_P1(tx_pri0_drop),
1049	GBENU_STATS_P1(tx_pri1_drop),
1050	GBENU_STATS_P1(tx_pri2_drop),
1051	GBENU_STATS_P1(tx_pri3_drop),
1052	GBENU_STATS_P1(tx_pri4_drop),
1053	GBENU_STATS_P1(tx_pri5_drop),
1054	GBENU_STATS_P1(tx_pri6_drop),
1055	GBENU_STATS_P1(tx_pri7_drop),
1056	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1057	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1058	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1059	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1060	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1061	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1062	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1063	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1064	/* GBENU Module 2 */
1065	GBENU_STATS_P2(rx_good_frames),
1066	GBENU_STATS_P2(rx_broadcast_frames),
1067	GBENU_STATS_P2(rx_multicast_frames),
1068	GBENU_STATS_P2(rx_pause_frames),
1069	GBENU_STATS_P2(rx_crc_errors),
1070	GBENU_STATS_P2(rx_align_code_errors),
1071	GBENU_STATS_P2(rx_oversized_frames),
1072	GBENU_STATS_P2(rx_jabber_frames),
1073	GBENU_STATS_P2(rx_undersized_frames),
1074	GBENU_STATS_P2(rx_fragments),
1075	GBENU_STATS_P2(ale_drop),
1076	GBENU_STATS_P2(ale_overrun_drop),
1077	GBENU_STATS_P2(rx_bytes),
1078	GBENU_STATS_P2(tx_good_frames),
1079	GBENU_STATS_P2(tx_broadcast_frames),
1080	GBENU_STATS_P2(tx_multicast_frames),
1081	GBENU_STATS_P2(tx_pause_frames),
1082	GBENU_STATS_P2(tx_deferred_frames),
1083	GBENU_STATS_P2(tx_collision_frames),
1084	GBENU_STATS_P2(tx_single_coll_frames),
1085	GBENU_STATS_P2(tx_mult_coll_frames),
1086	GBENU_STATS_P2(tx_excessive_collisions),
1087	GBENU_STATS_P2(tx_late_collisions),
1088	GBENU_STATS_P2(rx_ipg_error),
1089	GBENU_STATS_P2(tx_carrier_sense_errors),
1090	GBENU_STATS_P2(tx_bytes),
1091	GBENU_STATS_P2(tx_64B_frames),
1092	GBENU_STATS_P2(tx_65_to_127B_frames),
1093	GBENU_STATS_P2(tx_128_to_255B_frames),
1094	GBENU_STATS_P2(tx_256_to_511B_frames),
1095	GBENU_STATS_P2(tx_512_to_1023B_frames),
1096	GBENU_STATS_P2(tx_1024B_frames),
1097	GBENU_STATS_P2(net_bytes),
1098	GBENU_STATS_P2(rx_bottom_fifo_drop),
1099	GBENU_STATS_P2(rx_port_mask_drop),
1100	GBENU_STATS_P2(rx_top_fifo_drop),
1101	GBENU_STATS_P2(ale_rate_limit_drop),
1102	GBENU_STATS_P2(ale_vid_ingress_drop),
1103	GBENU_STATS_P2(ale_da_eq_sa_drop),
1104	GBENU_STATS_P2(ale_unknown_ucast),
1105	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1106	GBENU_STATS_P2(ale_unknown_mcast),
1107	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1108	GBENU_STATS_P2(ale_unknown_bcast),
1109	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1110	GBENU_STATS_P2(ale_pol_match),
1111	GBENU_STATS_P2(ale_pol_match_red),
1112	GBENU_STATS_P2(ale_pol_match_yellow),
1113	GBENU_STATS_P2(tx_mem_protect_err),
1114	GBENU_STATS_P2(tx_pri0_drop),
1115	GBENU_STATS_P2(tx_pri1_drop),
1116	GBENU_STATS_P2(tx_pri2_drop),
1117	GBENU_STATS_P2(tx_pri3_drop),
1118	GBENU_STATS_P2(tx_pri4_drop),
1119	GBENU_STATS_P2(tx_pri5_drop),
1120	GBENU_STATS_P2(tx_pri6_drop),
1121	GBENU_STATS_P2(tx_pri7_drop),
1122	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1123	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1124	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1125	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1126	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1127	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1128	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1129	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1130	/* GBENU Module 3 */
1131	GBENU_STATS_P3(rx_good_frames),
1132	GBENU_STATS_P3(rx_broadcast_frames),
1133	GBENU_STATS_P3(rx_multicast_frames),
1134	GBENU_STATS_P3(rx_pause_frames),
1135	GBENU_STATS_P3(rx_crc_errors),
1136	GBENU_STATS_P3(rx_align_code_errors),
1137	GBENU_STATS_P3(rx_oversized_frames),
1138	GBENU_STATS_P3(rx_jabber_frames),
1139	GBENU_STATS_P3(rx_undersized_frames),
1140	GBENU_STATS_P3(rx_fragments),
1141	GBENU_STATS_P3(ale_drop),
1142	GBENU_STATS_P3(ale_overrun_drop),
1143	GBENU_STATS_P3(rx_bytes),
1144	GBENU_STATS_P3(tx_good_frames),
1145	GBENU_STATS_P3(tx_broadcast_frames),
1146	GBENU_STATS_P3(tx_multicast_frames),
1147	GBENU_STATS_P3(tx_pause_frames),
1148	GBENU_STATS_P3(tx_deferred_frames),
1149	GBENU_STATS_P3(tx_collision_frames),
1150	GBENU_STATS_P3(tx_single_coll_frames),
1151	GBENU_STATS_P3(tx_mult_coll_frames),
1152	GBENU_STATS_P3(tx_excessive_collisions),
1153	GBENU_STATS_P3(tx_late_collisions),
1154	GBENU_STATS_P3(rx_ipg_error),
1155	GBENU_STATS_P3(tx_carrier_sense_errors),
1156	GBENU_STATS_P3(tx_bytes),
1157	GBENU_STATS_P3(tx_64B_frames),
1158	GBENU_STATS_P3(tx_65_to_127B_frames),
1159	GBENU_STATS_P3(tx_128_to_255B_frames),
1160	GBENU_STATS_P3(tx_256_to_511B_frames),
1161	GBENU_STATS_P3(tx_512_to_1023B_frames),
1162	GBENU_STATS_P3(tx_1024B_frames),
1163	GBENU_STATS_P3(net_bytes),
1164	GBENU_STATS_P3(rx_bottom_fifo_drop),
1165	GBENU_STATS_P3(rx_port_mask_drop),
1166	GBENU_STATS_P3(rx_top_fifo_drop),
1167	GBENU_STATS_P3(ale_rate_limit_drop),
1168	GBENU_STATS_P3(ale_vid_ingress_drop),
1169	GBENU_STATS_P3(ale_da_eq_sa_drop),
1170	GBENU_STATS_P3(ale_unknown_ucast),
1171	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1172	GBENU_STATS_P3(ale_unknown_mcast),
1173	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1174	GBENU_STATS_P3(ale_unknown_bcast),
1175	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1176	GBENU_STATS_P3(ale_pol_match),
1177	GBENU_STATS_P3(ale_pol_match_red),
1178	GBENU_STATS_P3(ale_pol_match_yellow),
1179	GBENU_STATS_P3(tx_mem_protect_err),
1180	GBENU_STATS_P3(tx_pri0_drop),
1181	GBENU_STATS_P3(tx_pri1_drop),
1182	GBENU_STATS_P3(tx_pri2_drop),
1183	GBENU_STATS_P3(tx_pri3_drop),
1184	GBENU_STATS_P3(tx_pri4_drop),
1185	GBENU_STATS_P3(tx_pri5_drop),
1186	GBENU_STATS_P3(tx_pri6_drop),
1187	GBENU_STATS_P3(tx_pri7_drop),
1188	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1189	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1190	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1191	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1192	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1193	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1194	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1195	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1196	/* GBENU Module 4 */
1197	GBENU_STATS_P4(rx_good_frames),
1198	GBENU_STATS_P4(rx_broadcast_frames),
1199	GBENU_STATS_P4(rx_multicast_frames),
1200	GBENU_STATS_P4(rx_pause_frames),
1201	GBENU_STATS_P4(rx_crc_errors),
1202	GBENU_STATS_P4(rx_align_code_errors),
1203	GBENU_STATS_P4(rx_oversized_frames),
1204	GBENU_STATS_P4(rx_jabber_frames),
1205	GBENU_STATS_P4(rx_undersized_frames),
1206	GBENU_STATS_P4(rx_fragments),
1207	GBENU_STATS_P4(ale_drop),
1208	GBENU_STATS_P4(ale_overrun_drop),
1209	GBENU_STATS_P4(rx_bytes),
1210	GBENU_STATS_P4(tx_good_frames),
1211	GBENU_STATS_P4(tx_broadcast_frames),
1212	GBENU_STATS_P4(tx_multicast_frames),
1213	GBENU_STATS_P4(tx_pause_frames),
1214	GBENU_STATS_P4(tx_deferred_frames),
1215	GBENU_STATS_P4(tx_collision_frames),
1216	GBENU_STATS_P4(tx_single_coll_frames),
1217	GBENU_STATS_P4(tx_mult_coll_frames),
1218	GBENU_STATS_P4(tx_excessive_collisions),
1219	GBENU_STATS_P4(tx_late_collisions),
1220	GBENU_STATS_P4(rx_ipg_error),
1221	GBENU_STATS_P4(tx_carrier_sense_errors),
1222	GBENU_STATS_P4(tx_bytes),
1223	GBENU_STATS_P4(tx_64B_frames),
1224	GBENU_STATS_P4(tx_65_to_127B_frames),
1225	GBENU_STATS_P4(tx_128_to_255B_frames),
1226	GBENU_STATS_P4(tx_256_to_511B_frames),
1227	GBENU_STATS_P4(tx_512_to_1023B_frames),
1228	GBENU_STATS_P4(tx_1024B_frames),
1229	GBENU_STATS_P4(net_bytes),
1230	GBENU_STATS_P4(rx_bottom_fifo_drop),
1231	GBENU_STATS_P4(rx_port_mask_drop),
1232	GBENU_STATS_P4(rx_top_fifo_drop),
1233	GBENU_STATS_P4(ale_rate_limit_drop),
1234	GBENU_STATS_P4(ale_vid_ingress_drop),
1235	GBENU_STATS_P4(ale_da_eq_sa_drop),
1236	GBENU_STATS_P4(ale_unknown_ucast),
1237	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1238	GBENU_STATS_P4(ale_unknown_mcast),
1239	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1240	GBENU_STATS_P4(ale_unknown_bcast),
1241	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1242	GBENU_STATS_P4(ale_pol_match),
1243	GBENU_STATS_P4(ale_pol_match_red),
1244	GBENU_STATS_P4(ale_pol_match_yellow),
1245	GBENU_STATS_P4(tx_mem_protect_err),
1246	GBENU_STATS_P4(tx_pri0_drop),
1247	GBENU_STATS_P4(tx_pri1_drop),
1248	GBENU_STATS_P4(tx_pri2_drop),
1249	GBENU_STATS_P4(tx_pri3_drop),
1250	GBENU_STATS_P4(tx_pri4_drop),
1251	GBENU_STATS_P4(tx_pri5_drop),
1252	GBENU_STATS_P4(tx_pri6_drop),
1253	GBENU_STATS_P4(tx_pri7_drop),
1254	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1255	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1256	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1257	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1258	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1259	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1260	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1261	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1262	/* GBENU Module 5 */
1263	GBENU_STATS_P5(rx_good_frames),
1264	GBENU_STATS_P5(rx_broadcast_frames),
1265	GBENU_STATS_P5(rx_multicast_frames),
1266	GBENU_STATS_P5(rx_pause_frames),
1267	GBENU_STATS_P5(rx_crc_errors),
1268	GBENU_STATS_P5(rx_align_code_errors),
1269	GBENU_STATS_P5(rx_oversized_frames),
1270	GBENU_STATS_P5(rx_jabber_frames),
1271	GBENU_STATS_P5(rx_undersized_frames),
1272	GBENU_STATS_P5(rx_fragments),
1273	GBENU_STATS_P5(ale_drop),
1274	GBENU_STATS_P5(ale_overrun_drop),
1275	GBENU_STATS_P5(rx_bytes),
1276	GBENU_STATS_P5(tx_good_frames),
1277	GBENU_STATS_P5(tx_broadcast_frames),
1278	GBENU_STATS_P5(tx_multicast_frames),
1279	GBENU_STATS_P5(tx_pause_frames),
1280	GBENU_STATS_P5(tx_deferred_frames),
1281	GBENU_STATS_P5(tx_collision_frames),
1282	GBENU_STATS_P5(tx_single_coll_frames),
1283	GBENU_STATS_P5(tx_mult_coll_frames),
1284	GBENU_STATS_P5(tx_excessive_collisions),
1285	GBENU_STATS_P5(tx_late_collisions),
1286	GBENU_STATS_P5(rx_ipg_error),
1287	GBENU_STATS_P5(tx_carrier_sense_errors),
1288	GBENU_STATS_P5(tx_bytes),
1289	GBENU_STATS_P5(tx_64B_frames),
1290	GBENU_STATS_P5(tx_65_to_127B_frames),
1291	GBENU_STATS_P5(tx_128_to_255B_frames),
1292	GBENU_STATS_P5(tx_256_to_511B_frames),
1293	GBENU_STATS_P5(tx_512_to_1023B_frames),
1294	GBENU_STATS_P5(tx_1024B_frames),
1295	GBENU_STATS_P5(net_bytes),
1296	GBENU_STATS_P5(rx_bottom_fifo_drop),
1297	GBENU_STATS_P5(rx_port_mask_drop),
1298	GBENU_STATS_P5(rx_top_fifo_drop),
1299	GBENU_STATS_P5(ale_rate_limit_drop),
1300	GBENU_STATS_P5(ale_vid_ingress_drop),
1301	GBENU_STATS_P5(ale_da_eq_sa_drop),
1302	GBENU_STATS_P5(ale_unknown_ucast),
1303	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1304	GBENU_STATS_P5(ale_unknown_mcast),
1305	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1306	GBENU_STATS_P5(ale_unknown_bcast),
1307	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1308	GBENU_STATS_P5(ale_pol_match),
1309	GBENU_STATS_P5(ale_pol_match_red),
1310	GBENU_STATS_P5(ale_pol_match_yellow),
1311	GBENU_STATS_P5(tx_mem_protect_err),
1312	GBENU_STATS_P5(tx_pri0_drop),
1313	GBENU_STATS_P5(tx_pri1_drop),
1314	GBENU_STATS_P5(tx_pri2_drop),
1315	GBENU_STATS_P5(tx_pri3_drop),
1316	GBENU_STATS_P5(tx_pri4_drop),
1317	GBENU_STATS_P5(tx_pri5_drop),
1318	GBENU_STATS_P5(tx_pri6_drop),
1319	GBENU_STATS_P5(tx_pri7_drop),
1320	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1321	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1322	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1323	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1324	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1325	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1326	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1327	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1328	/* GBENU Module 6 */
1329	GBENU_STATS_P6(rx_good_frames),
1330	GBENU_STATS_P6(rx_broadcast_frames),
1331	GBENU_STATS_P6(rx_multicast_frames),
1332	GBENU_STATS_P6(rx_pause_frames),
1333	GBENU_STATS_P6(rx_crc_errors),
1334	GBENU_STATS_P6(rx_align_code_errors),
1335	GBENU_STATS_P6(rx_oversized_frames),
1336	GBENU_STATS_P6(rx_jabber_frames),
1337	GBENU_STATS_P6(rx_undersized_frames),
1338	GBENU_STATS_P6(rx_fragments),
1339	GBENU_STATS_P6(ale_drop),
1340	GBENU_STATS_P6(ale_overrun_drop),
1341	GBENU_STATS_P6(rx_bytes),
1342	GBENU_STATS_P6(tx_good_frames),
1343	GBENU_STATS_P6(tx_broadcast_frames),
1344	GBENU_STATS_P6(tx_multicast_frames),
1345	GBENU_STATS_P6(tx_pause_frames),
1346	GBENU_STATS_P6(tx_deferred_frames),
1347	GBENU_STATS_P6(tx_collision_frames),
1348	GBENU_STATS_P6(tx_single_coll_frames),
1349	GBENU_STATS_P6(tx_mult_coll_frames),
1350	GBENU_STATS_P6(tx_excessive_collisions),
1351	GBENU_STATS_P6(tx_late_collisions),
1352	GBENU_STATS_P6(rx_ipg_error),
1353	GBENU_STATS_P6(tx_carrier_sense_errors),
1354	GBENU_STATS_P6(tx_bytes),
1355	GBENU_STATS_P6(tx_64B_frames),
1356	GBENU_STATS_P6(tx_65_to_127B_frames),
1357	GBENU_STATS_P6(tx_128_to_255B_frames),
1358	GBENU_STATS_P6(tx_256_to_511B_frames),
1359	GBENU_STATS_P6(tx_512_to_1023B_frames),
1360	GBENU_STATS_P6(tx_1024B_frames),
1361	GBENU_STATS_P6(net_bytes),
1362	GBENU_STATS_P6(rx_bottom_fifo_drop),
1363	GBENU_STATS_P6(rx_port_mask_drop),
1364	GBENU_STATS_P6(rx_top_fifo_drop),
1365	GBENU_STATS_P6(ale_rate_limit_drop),
1366	GBENU_STATS_P6(ale_vid_ingress_drop),
1367	GBENU_STATS_P6(ale_da_eq_sa_drop),
1368	GBENU_STATS_P6(ale_unknown_ucast),
1369	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1370	GBENU_STATS_P6(ale_unknown_mcast),
1371	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1372	GBENU_STATS_P6(ale_unknown_bcast),
1373	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1374	GBENU_STATS_P6(ale_pol_match),
1375	GBENU_STATS_P6(ale_pol_match_red),
1376	GBENU_STATS_P6(ale_pol_match_yellow),
1377	GBENU_STATS_P6(tx_mem_protect_err),
1378	GBENU_STATS_P6(tx_pri0_drop),
1379	GBENU_STATS_P6(tx_pri1_drop),
1380	GBENU_STATS_P6(tx_pri2_drop),
1381	GBENU_STATS_P6(tx_pri3_drop),
1382	GBENU_STATS_P6(tx_pri4_drop),
1383	GBENU_STATS_P6(tx_pri5_drop),
1384	GBENU_STATS_P6(tx_pri6_drop),
1385	GBENU_STATS_P6(tx_pri7_drop),
1386	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1387	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1388	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1389	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1390	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1391	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1392	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1393	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1394	/* GBENU Module 7 */
1395	GBENU_STATS_P7(rx_good_frames),
1396	GBENU_STATS_P7(rx_broadcast_frames),
1397	GBENU_STATS_P7(rx_multicast_frames),
1398	GBENU_STATS_P7(rx_pause_frames),
1399	GBENU_STATS_P7(rx_crc_errors),
1400	GBENU_STATS_P7(rx_align_code_errors),
1401	GBENU_STATS_P7(rx_oversized_frames),
1402	GBENU_STATS_P7(rx_jabber_frames),
1403	GBENU_STATS_P7(rx_undersized_frames),
1404	GBENU_STATS_P7(rx_fragments),
1405	GBENU_STATS_P7(ale_drop),
1406	GBENU_STATS_P7(ale_overrun_drop),
1407	GBENU_STATS_P7(rx_bytes),
1408	GBENU_STATS_P7(tx_good_frames),
1409	GBENU_STATS_P7(tx_broadcast_frames),
1410	GBENU_STATS_P7(tx_multicast_frames),
1411	GBENU_STATS_P7(tx_pause_frames),
1412	GBENU_STATS_P7(tx_deferred_frames),
1413	GBENU_STATS_P7(tx_collision_frames),
1414	GBENU_STATS_P7(tx_single_coll_frames),
1415	GBENU_STATS_P7(tx_mult_coll_frames),
1416	GBENU_STATS_P7(tx_excessive_collisions),
1417	GBENU_STATS_P7(tx_late_collisions),
1418	GBENU_STATS_P7(rx_ipg_error),
1419	GBENU_STATS_P7(tx_carrier_sense_errors),
1420	GBENU_STATS_P7(tx_bytes),
1421	GBENU_STATS_P7(tx_64B_frames),
1422	GBENU_STATS_P7(tx_65_to_127B_frames),
1423	GBENU_STATS_P7(tx_128_to_255B_frames),
1424	GBENU_STATS_P7(tx_256_to_511B_frames),
1425	GBENU_STATS_P7(tx_512_to_1023B_frames),
1426	GBENU_STATS_P7(tx_1024B_frames),
1427	GBENU_STATS_P7(net_bytes),
1428	GBENU_STATS_P7(rx_bottom_fifo_drop),
1429	GBENU_STATS_P7(rx_port_mask_drop),
1430	GBENU_STATS_P7(rx_top_fifo_drop),
1431	GBENU_STATS_P7(ale_rate_limit_drop),
1432	GBENU_STATS_P7(ale_vid_ingress_drop),
1433	GBENU_STATS_P7(ale_da_eq_sa_drop),
1434	GBENU_STATS_P7(ale_unknown_ucast),
1435	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1436	GBENU_STATS_P7(ale_unknown_mcast),
1437	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1438	GBENU_STATS_P7(ale_unknown_bcast),
1439	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1440	GBENU_STATS_P7(ale_pol_match),
1441	GBENU_STATS_P7(ale_pol_match_red),
1442	GBENU_STATS_P7(ale_pol_match_yellow),
1443	GBENU_STATS_P7(tx_mem_protect_err),
1444	GBENU_STATS_P7(tx_pri0_drop),
1445	GBENU_STATS_P7(tx_pri1_drop),
1446	GBENU_STATS_P7(tx_pri2_drop),
1447	GBENU_STATS_P7(tx_pri3_drop),
1448	GBENU_STATS_P7(tx_pri4_drop),
1449	GBENU_STATS_P7(tx_pri5_drop),
1450	GBENU_STATS_P7(tx_pri6_drop),
1451	GBENU_STATS_P7(tx_pri7_drop),
1452	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1453	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1454	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1455	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1456	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1457	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1458	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1459	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1460	/* GBENU Module 8 */
1461	GBENU_STATS_P8(rx_good_frames),
1462	GBENU_STATS_P8(rx_broadcast_frames),
1463	GBENU_STATS_P8(rx_multicast_frames),
1464	GBENU_STATS_P8(rx_pause_frames),
1465	GBENU_STATS_P8(rx_crc_errors),
1466	GBENU_STATS_P8(rx_align_code_errors),
1467	GBENU_STATS_P8(rx_oversized_frames),
1468	GBENU_STATS_P8(rx_jabber_frames),
1469	GBENU_STATS_P8(rx_undersized_frames),
1470	GBENU_STATS_P8(rx_fragments),
1471	GBENU_STATS_P8(ale_drop),
1472	GBENU_STATS_P8(ale_overrun_drop),
1473	GBENU_STATS_P8(rx_bytes),
1474	GBENU_STATS_P8(tx_good_frames),
1475	GBENU_STATS_P8(tx_broadcast_frames),
1476	GBENU_STATS_P8(tx_multicast_frames),
1477	GBENU_STATS_P8(tx_pause_frames),
1478	GBENU_STATS_P8(tx_deferred_frames),
1479	GBENU_STATS_P8(tx_collision_frames),
1480	GBENU_STATS_P8(tx_single_coll_frames),
1481	GBENU_STATS_P8(tx_mult_coll_frames),
1482	GBENU_STATS_P8(tx_excessive_collisions),
1483	GBENU_STATS_P8(tx_late_collisions),
1484	GBENU_STATS_P8(rx_ipg_error),
1485	GBENU_STATS_P8(tx_carrier_sense_errors),
1486	GBENU_STATS_P8(tx_bytes),
1487	GBENU_STATS_P8(tx_64B_frames),
1488	GBENU_STATS_P8(tx_65_to_127B_frames),
1489	GBENU_STATS_P8(tx_128_to_255B_frames),
1490	GBENU_STATS_P8(tx_256_to_511B_frames),
1491	GBENU_STATS_P8(tx_512_to_1023B_frames),
1492	GBENU_STATS_P8(tx_1024B_frames),
1493	GBENU_STATS_P8(net_bytes),
1494	GBENU_STATS_P8(rx_bottom_fifo_drop),
1495	GBENU_STATS_P8(rx_port_mask_drop),
1496	GBENU_STATS_P8(rx_top_fifo_drop),
1497	GBENU_STATS_P8(ale_rate_limit_drop),
1498	GBENU_STATS_P8(ale_vid_ingress_drop),
1499	GBENU_STATS_P8(ale_da_eq_sa_drop),
1500	GBENU_STATS_P8(ale_unknown_ucast),
1501	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1502	GBENU_STATS_P8(ale_unknown_mcast),
1503	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1504	GBENU_STATS_P8(ale_unknown_bcast),
1505	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1506	GBENU_STATS_P8(ale_pol_match),
1507	GBENU_STATS_P8(ale_pol_match_red),
1508	GBENU_STATS_P8(ale_pol_match_yellow),
1509	GBENU_STATS_P8(tx_mem_protect_err),
1510	GBENU_STATS_P8(tx_pri0_drop),
1511	GBENU_STATS_P8(tx_pri1_drop),
1512	GBENU_STATS_P8(tx_pri2_drop),
1513	GBENU_STATS_P8(tx_pri3_drop),
1514	GBENU_STATS_P8(tx_pri4_drop),
1515	GBENU_STATS_P8(tx_pri5_drop),
1516	GBENU_STATS_P8(tx_pri6_drop),
1517	GBENU_STATS_P8(tx_pri7_drop),
1518	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1519	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1520	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1521	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1522	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1523	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1524	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1525	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1526};
1527
1528#define XGBE_STATS0_INFO(field)				\
1529{							\
1530	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1531	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1532	offsetof(struct xgbe_hw_stats, field)		\
1533}
1534
1535#define XGBE_STATS1_INFO(field)				\
1536{							\
1537	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1538	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1539	offsetof(struct xgbe_hw_stats, field)		\
1540}
1541
1542#define XGBE_STATS2_INFO(field)				\
1543{							\
1544	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1545	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1546	offsetof(struct xgbe_hw_stats, field)		\
1547}
1548
1549static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1550	/* GBE module 0 */
1551	XGBE_STATS0_INFO(rx_good_frames),
1552	XGBE_STATS0_INFO(rx_broadcast_frames),
1553	XGBE_STATS0_INFO(rx_multicast_frames),
1554	XGBE_STATS0_INFO(rx_oversized_frames),
1555	XGBE_STATS0_INFO(rx_undersized_frames),
1556	XGBE_STATS0_INFO(overrun_type4),
1557	XGBE_STATS0_INFO(overrun_type5),
1558	XGBE_STATS0_INFO(rx_bytes),
1559	XGBE_STATS0_INFO(tx_good_frames),
1560	XGBE_STATS0_INFO(tx_broadcast_frames),
1561	XGBE_STATS0_INFO(tx_multicast_frames),
1562	XGBE_STATS0_INFO(tx_bytes),
1563	XGBE_STATS0_INFO(tx_64byte_frames),
1564	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1565	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1566	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1567	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1568	XGBE_STATS0_INFO(tx_1024byte_frames),
1569	XGBE_STATS0_INFO(net_bytes),
1570	XGBE_STATS0_INFO(rx_sof_overruns),
1571	XGBE_STATS0_INFO(rx_mof_overruns),
1572	XGBE_STATS0_INFO(rx_dma_overruns),
1573	/* XGBE module 1 */
1574	XGBE_STATS1_INFO(rx_good_frames),
1575	XGBE_STATS1_INFO(rx_broadcast_frames),
1576	XGBE_STATS1_INFO(rx_multicast_frames),
1577	XGBE_STATS1_INFO(rx_pause_frames),
1578	XGBE_STATS1_INFO(rx_crc_errors),
1579	XGBE_STATS1_INFO(rx_align_code_errors),
1580	XGBE_STATS1_INFO(rx_oversized_frames),
1581	XGBE_STATS1_INFO(rx_jabber_frames),
1582	XGBE_STATS1_INFO(rx_undersized_frames),
1583	XGBE_STATS1_INFO(rx_fragments),
1584	XGBE_STATS1_INFO(overrun_type4),
1585	XGBE_STATS1_INFO(overrun_type5),
1586	XGBE_STATS1_INFO(rx_bytes),
1587	XGBE_STATS1_INFO(tx_good_frames),
1588	XGBE_STATS1_INFO(tx_broadcast_frames),
1589	XGBE_STATS1_INFO(tx_multicast_frames),
1590	XGBE_STATS1_INFO(tx_pause_frames),
1591	XGBE_STATS1_INFO(tx_deferred_frames),
1592	XGBE_STATS1_INFO(tx_collision_frames),
1593	XGBE_STATS1_INFO(tx_single_coll_frames),
1594	XGBE_STATS1_INFO(tx_mult_coll_frames),
1595	XGBE_STATS1_INFO(tx_excessive_collisions),
1596	XGBE_STATS1_INFO(tx_late_collisions),
1597	XGBE_STATS1_INFO(tx_underrun),
1598	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1599	XGBE_STATS1_INFO(tx_bytes),
1600	XGBE_STATS1_INFO(tx_64byte_frames),
1601	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1602	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1603	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1604	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1605	XGBE_STATS1_INFO(tx_1024byte_frames),
1606	XGBE_STATS1_INFO(net_bytes),
1607	XGBE_STATS1_INFO(rx_sof_overruns),
1608	XGBE_STATS1_INFO(rx_mof_overruns),
1609	XGBE_STATS1_INFO(rx_dma_overruns),
1610	/* XGBE module 2 */
1611	XGBE_STATS2_INFO(rx_good_frames),
1612	XGBE_STATS2_INFO(rx_broadcast_frames),
1613	XGBE_STATS2_INFO(rx_multicast_frames),
1614	XGBE_STATS2_INFO(rx_pause_frames),
1615	XGBE_STATS2_INFO(rx_crc_errors),
1616	XGBE_STATS2_INFO(rx_align_code_errors),
1617	XGBE_STATS2_INFO(rx_oversized_frames),
1618	XGBE_STATS2_INFO(rx_jabber_frames),
1619	XGBE_STATS2_INFO(rx_undersized_frames),
1620	XGBE_STATS2_INFO(rx_fragments),
1621	XGBE_STATS2_INFO(overrun_type4),
1622	XGBE_STATS2_INFO(overrun_type5),
1623	XGBE_STATS2_INFO(rx_bytes),
1624	XGBE_STATS2_INFO(tx_good_frames),
1625	XGBE_STATS2_INFO(tx_broadcast_frames),
1626	XGBE_STATS2_INFO(tx_multicast_frames),
1627	XGBE_STATS2_INFO(tx_pause_frames),
1628	XGBE_STATS2_INFO(tx_deferred_frames),
1629	XGBE_STATS2_INFO(tx_collision_frames),
1630	XGBE_STATS2_INFO(tx_single_coll_frames),
1631	XGBE_STATS2_INFO(tx_mult_coll_frames),
1632	XGBE_STATS2_INFO(tx_excessive_collisions),
1633	XGBE_STATS2_INFO(tx_late_collisions),
1634	XGBE_STATS2_INFO(tx_underrun),
1635	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1636	XGBE_STATS2_INFO(tx_bytes),
1637	XGBE_STATS2_INFO(tx_64byte_frames),
1638	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1639	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1640	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1641	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1642	XGBE_STATS2_INFO(tx_1024byte_frames),
1643	XGBE_STATS2_INFO(net_bytes),
1644	XGBE_STATS2_INFO(rx_sof_overruns),
1645	XGBE_STATS2_INFO(rx_mof_overruns),
1646	XGBE_STATS2_INFO(rx_dma_overruns),
1647};
1648
1649#define for_each_intf(i, priv) \
1650	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1651
1652#define for_each_sec_slave(slave, priv) \
1653	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1654
1655#define first_sec_slave(priv)					\
1656	list_first_entry(&priv->secondary_slaves, \
1657			struct gbe_slave, slave_list)
1658
1659static void keystone_get_drvinfo(struct net_device *ndev,
1660				 struct ethtool_drvinfo *info)
1661{
1662	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1663	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1664}
1665
1666static u32 keystone_get_msglevel(struct net_device *ndev)
1667{
1668	struct netcp_intf *netcp = netdev_priv(ndev);
1669
1670	return netcp->msg_enable;
1671}
1672
1673static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1674{
1675	struct netcp_intf *netcp = netdev_priv(ndev);
1676
1677	netcp->msg_enable = value;
1678}
1679
 
 
 
 
 
 
 
 
 
 
 
1680static void keystone_get_stat_strings(struct net_device *ndev,
1681				      uint32_t stringset, uint8_t *data)
1682{
1683	struct netcp_intf *netcp = netdev_priv(ndev);
1684	struct gbe_intf *gbe_intf;
1685	struct gbe_priv *gbe_dev;
1686	int i;
1687
1688	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1689	if (!gbe_intf)
1690		return;
1691	gbe_dev = gbe_intf->gbe_dev;
1692
1693	switch (stringset) {
1694	case ETH_SS_STATS:
1695		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1696			memcpy(data, gbe_dev->et_stats[i].desc,
1697			       ETH_GSTRING_LEN);
1698			data += ETH_GSTRING_LEN;
1699		}
1700		break;
1701	case ETH_SS_TEST:
1702		break;
1703	}
1704}
1705
1706static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1707{
1708	struct netcp_intf *netcp = netdev_priv(ndev);
1709	struct gbe_intf *gbe_intf;
1710	struct gbe_priv *gbe_dev;
1711
1712	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1713	if (!gbe_intf)
1714		return -EINVAL;
1715	gbe_dev = gbe_intf->gbe_dev;
1716
1717	switch (stringset) {
1718	case ETH_SS_TEST:
1719		return 0;
1720	case ETH_SS_STATS:
1721		return gbe_dev->num_et_stats;
1722	default:
1723		return -EINVAL;
1724	}
1725}
1726
1727static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1728{
1729	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1730	u32  __iomem *p_stats_entry;
1731	int i;
1732
1733	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1734		if (gbe_dev->et_stats[i].type == stats_mod) {
1735			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1736			gbe_dev->hw_stats[i] = 0;
1737			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1738		}
1739	}
1740}
1741
1742static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1743					     int et_stats_entry)
1744{
1745	void __iomem *base = NULL;
1746	u32  __iomem *p_stats_entry;
1747	u32 curr, delta;
1748
1749	/* The hw_stats_regs pointers are already
1750	 * properly set to point to the right base:
1751	 */
1752	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1753	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1754	curr = readl(p_stats_entry);
1755	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1756	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1757	gbe_dev->hw_stats[et_stats_entry] += delta;
1758}
1759
1760static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1761{
1762	int i;
1763
1764	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1765		gbe_update_hw_stats_entry(gbe_dev, i);
1766
1767		if (data)
1768			data[i] = gbe_dev->hw_stats[i];
1769	}
1770}
1771
1772static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1773					       int stats_mod)
1774{
1775	u32 val;
1776
1777	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1778
1779	switch (stats_mod) {
1780	case GBE_STATSA_MODULE:
1781	case GBE_STATSB_MODULE:
1782		val &= ~GBE_STATS_CD_SEL;
1783		break;
1784	case GBE_STATSC_MODULE:
1785	case GBE_STATSD_MODULE:
1786		val |= GBE_STATS_CD_SEL;
1787		break;
1788	default:
1789		return;
1790	}
1791
1792	/* make the stat module visible */
1793	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1794}
1795
1796static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1797{
1798	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1799	gbe_reset_mod_stats(gbe_dev, stats_mod);
1800}
1801
1802static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1803{
1804	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1805	int et_entry, j, pair;
1806
1807	for (pair = 0; pair < 2; pair++) {
1808		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1809						      GBE_STATSC_MODULE :
1810						      GBE_STATSA_MODULE));
1811
1812		for (j = 0; j < half_num_et_stats; j++) {
1813			et_entry = pair * half_num_et_stats + j;
1814			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1815
1816			if (data)
1817				data[et_entry] = gbe_dev->hw_stats[et_entry];
1818		}
1819	}
1820}
1821
1822static void keystone_get_ethtool_stats(struct net_device *ndev,
1823				       struct ethtool_stats *stats,
1824				       uint64_t *data)
1825{
1826	struct netcp_intf *netcp = netdev_priv(ndev);
1827	struct gbe_intf *gbe_intf;
1828	struct gbe_priv *gbe_dev;
1829
1830	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1831	if (!gbe_intf)
1832		return;
1833
1834	gbe_dev = gbe_intf->gbe_dev;
1835	spin_lock_bh(&gbe_dev->hw_stats_lock);
1836	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1837		gbe_update_stats_ver14(gbe_dev, data);
1838	else
1839		gbe_update_stats(gbe_dev, data);
1840	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1841}
1842
1843static int keystone_get_settings(struct net_device *ndev,
1844				 struct ethtool_cmd *cmd)
1845{
1846	struct netcp_intf *netcp = netdev_priv(ndev);
1847	struct phy_device *phy = ndev->phydev;
1848	struct gbe_intf *gbe_intf;
1849	int ret;
1850
1851	if (!phy)
1852		return -EINVAL;
1853
1854	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1855	if (!gbe_intf)
1856		return -EINVAL;
1857
1858	if (!gbe_intf->slave)
1859		return -EINVAL;
1860
1861	ret = phy_ethtool_gset(phy, cmd);
1862	if (!ret)
1863		cmd->port = gbe_intf->slave->phy_port_t;
1864
1865	return ret;
1866}
1867
1868static int keystone_set_settings(struct net_device *ndev,
1869				 struct ethtool_cmd *cmd)
1870{
1871	struct netcp_intf *netcp = netdev_priv(ndev);
1872	struct phy_device *phy = ndev->phydev;
1873	struct gbe_intf *gbe_intf;
1874	u32 features = cmd->advertising & cmd->supported;
 
 
 
 
 
 
 
 
1875
1876	if (!phy)
1877		return -EINVAL;
1878
1879	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1880	if (!gbe_intf)
1881		return -EINVAL;
1882
1883	if (!gbe_intf->slave)
1884		return -EINVAL;
1885
1886	if (cmd->port != gbe_intf->slave->phy_port_t) {
1887		if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
1888			return -EINVAL;
1889
1890		if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
1891			return -EINVAL;
1892
1893		if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
1894			return -EINVAL;
1895
1896		if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
1897			return -EINVAL;
1898
1899		if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1900			return -EINVAL;
1901	}
1902
1903	gbe_intf->slave->phy_port_t = cmd->port;
1904	return phy_ethtool_sset(phy, cmd);
1905}
1906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1907static const struct ethtool_ops keystone_ethtool_ops = {
1908	.get_drvinfo		= keystone_get_drvinfo,
1909	.get_link		= ethtool_op_get_link,
1910	.get_msglevel		= keystone_get_msglevel,
1911	.set_msglevel		= keystone_set_msglevel,
1912	.get_strings		= keystone_get_stat_strings,
1913	.get_sset_count		= keystone_get_sset_count,
1914	.get_ethtool_stats	= keystone_get_ethtool_stats,
1915	.get_settings		= keystone_get_settings,
1916	.set_settings		= keystone_set_settings,
 
1917};
1918
1919#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
1920			 ((mac)[2] << 16) | ((mac)[3] << 24))
1921#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
1922
1923static void gbe_set_slave_mac(struct gbe_slave *slave,
1924			      struct gbe_intf *gbe_intf)
1925{
1926	struct net_device *ndev = gbe_intf->ndev;
1927
1928	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1929	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1930}
1931
1932static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1933{
1934	if (priv->host_port == 0)
1935		return slave_num + 1;
1936
1937	return slave_num;
1938}
1939
1940static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1941					  struct net_device *ndev,
1942					  struct gbe_slave *slave,
1943					  int up)
1944{
1945	struct phy_device *phy = slave->phy;
1946	u32 mac_control = 0;
1947
1948	if (up) {
1949		mac_control = slave->mac_control;
1950		if (phy && (phy->speed == SPEED_1000)) {
1951			mac_control |= MACSL_GIG_MODE;
1952			mac_control &= ~MACSL_XGIG_MODE;
1953		} else if (phy && (phy->speed == SPEED_10000)) {
1954			mac_control |= MACSL_XGIG_MODE;
1955			mac_control &= ~MACSL_GIG_MODE;
1956		}
1957
1958		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1959						 mac_control));
1960
1961		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1962				     ALE_PORT_STATE,
1963				     ALE_PORT_STATE_FORWARD);
1964
1965		if (ndev && slave->open &&
1966		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1967		    slave->link_interface != XGMII_LINK_MAC_PHY)
1968			netif_carrier_on(ndev);
1969	} else {
1970		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1971						 mac_control));
1972		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1973				     ALE_PORT_STATE,
1974				     ALE_PORT_STATE_DISABLE);
1975		if (ndev &&
1976		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1977		    slave->link_interface != XGMII_LINK_MAC_PHY)
1978			netif_carrier_off(ndev);
1979	}
1980
1981	if (phy)
1982		phy_print_status(phy);
1983}
1984
1985static bool gbe_phy_link_status(struct gbe_slave *slave)
1986{
1987	 return !slave->phy || slave->phy->link;
1988}
1989
1990static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1991					  struct gbe_slave *slave,
1992					  struct net_device *ndev)
1993{
1994	int sp = slave->slave_num;
1995	int phy_link_state, sgmii_link_state = 1, link_state;
1996
1997	if (!slave->open)
1998		return;
1999
2000	if (!SLAVE_LINK_IS_XGMII(slave)) {
2001		sgmii_link_state =
2002			netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2003	}
2004
2005	phy_link_state = gbe_phy_link_status(slave);
2006	link_state = phy_link_state & sgmii_link_state;
2007
2008	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2009		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2010					      link_state);
2011}
2012
2013static void xgbe_adjust_link(struct net_device *ndev)
2014{
2015	struct netcp_intf *netcp = netdev_priv(ndev);
2016	struct gbe_intf *gbe_intf;
2017
2018	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2019	if (!gbe_intf)
2020		return;
2021
2022	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2023				      ndev);
2024}
2025
2026static void gbe_adjust_link(struct net_device *ndev)
2027{
2028	struct netcp_intf *netcp = netdev_priv(ndev);
2029	struct gbe_intf *gbe_intf;
2030
2031	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2032	if (!gbe_intf)
2033		return;
2034
2035	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2036				      ndev);
2037}
2038
2039static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2040{
2041	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2042	struct gbe_slave *slave;
2043
2044	for_each_sec_slave(slave, gbe_dev)
2045		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2046}
2047
2048/* Reset EMAC
2049 * Soft reset is set and polled until clear, or until a timeout occurs
2050 */
2051static int gbe_port_reset(struct gbe_slave *slave)
2052{
2053	u32 i, v;
2054
2055	/* Set the soft reset bit */
2056	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2057
2058	/* Wait for the bit to clear */
2059	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2060		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2061		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2062			return 0;
2063	}
2064
2065	/* Timeout on the reset */
2066	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2067}
2068
2069/* Configure EMAC */
2070static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2071			    int max_rx_len)
2072{
2073	void __iomem *rx_maxlen_reg;
2074	u32 xgmii_mode;
2075
2076	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2077		max_rx_len = NETCP_MAX_FRAME_SIZE;
2078
2079	/* Enable correct MII mode at SS level */
2080	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
2081	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2082		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2083		xgmii_mode |= (1 << slave->slave_num);
2084		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2085	}
2086
2087	if (IS_SS_ID_MU(gbe_dev))
2088		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2089	else
2090		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2091
2092	writel(max_rx_len, rx_maxlen_reg);
2093	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2094}
2095
2096static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2097			      struct gbe_slave *slave, bool set)
2098{
2099	if (SLAVE_LINK_IS_XGMII(slave))
2100		return;
2101
2102	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2103			    slave->slave_num, set);
2104}
2105
2106static void gbe_slave_stop(struct gbe_intf *intf)
2107{
2108	struct gbe_priv *gbe_dev = intf->gbe_dev;
2109	struct gbe_slave *slave = intf->slave;
2110
2111	gbe_sgmii_rtreset(gbe_dev, slave, true);
2112	gbe_port_reset(slave);
2113	/* Disable forwarding */
2114	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2115			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2116	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2117			   1 << slave->port_num, 0, 0);
2118
2119	if (!slave->phy)
2120		return;
2121
2122	phy_stop(slave->phy);
2123	phy_disconnect(slave->phy);
2124	slave->phy = NULL;
2125}
2126
2127static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2128{
2129	if (SLAVE_LINK_IS_XGMII(slave))
2130		return;
2131
2132	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2133	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2134			   slave->link_interface);
2135}
2136
2137static int gbe_slave_open(struct gbe_intf *gbe_intf)
2138{
2139	struct gbe_priv *priv = gbe_intf->gbe_dev;
2140	struct gbe_slave *slave = gbe_intf->slave;
2141	phy_interface_t phy_mode;
2142	bool has_phy = false;
2143
2144	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2145
2146	gbe_sgmii_config(priv, slave);
2147	gbe_port_reset(slave);
2148	gbe_sgmii_rtreset(priv, slave, false);
2149	gbe_port_config(priv, slave, priv->rx_packet_max);
2150	gbe_set_slave_mac(slave, gbe_intf);
2151	/* enable forwarding */
2152	cpsw_ale_control_set(priv->ale, slave->port_num,
2153			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2154	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2155			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2156
2157	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2158		has_phy = true;
2159		phy_mode = PHY_INTERFACE_MODE_SGMII;
2160		slave->phy_port_t = PORT_MII;
2161	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2162		has_phy = true;
2163		phy_mode = PHY_INTERFACE_MODE_NA;
2164		slave->phy_port_t = PORT_FIBRE;
2165	}
2166
2167	if (has_phy) {
2168		if (priv->ss_version == XGBE_SS_VERSION_10)
2169			hndlr = xgbe_adjust_link;
2170
2171		slave->phy = of_phy_connect(gbe_intf->ndev,
2172					    slave->phy_node,
2173					    hndlr, 0,
2174					    phy_mode);
2175		if (!slave->phy) {
2176			dev_err(priv->dev, "phy not found on slave %d\n",
2177				slave->slave_num);
2178			return -ENODEV;
2179		}
2180		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2181			phydev_name(slave->phy));
2182		phy_start(slave->phy);
2183		phy_read_status(slave->phy);
2184	}
2185	return 0;
2186}
2187
2188static void gbe_init_host_port(struct gbe_priv *priv)
2189{
2190	int bypass_en = 1;
2191
2192	/* Host Tx Pri */
2193	if (IS_SS_ID_NU(priv))
2194		writel(HOST_TX_PRI_MAP_DEFAULT,
2195		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2196
2197	/* Max length register */
2198	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2199						  rx_maxlen));
2200
2201	cpsw_ale_start(priv->ale);
2202
2203	if (priv->enable_ale)
2204		bypass_en = 0;
2205
2206	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2207
2208	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2209
2210	cpsw_ale_control_set(priv->ale, priv->host_port,
2211			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2212
2213	cpsw_ale_control_set(priv->ale, 0,
2214			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2215			     GBE_PORT_MASK(priv->ale_ports));
2216
2217	cpsw_ale_control_set(priv->ale, 0,
2218			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2219			     GBE_PORT_MASK(priv->ale_ports - 1));
2220
2221	cpsw_ale_control_set(priv->ale, 0,
2222			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2223			     GBE_PORT_MASK(priv->ale_ports));
2224
2225	cpsw_ale_control_set(priv->ale, 0,
2226			     ALE_PORT_UNTAGGED_EGRESS,
2227			     GBE_PORT_MASK(priv->ale_ports));
2228}
2229
2230static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2231{
2232	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2233	u16 vlan_id;
2234
2235	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2236			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2237			   ALE_MCAST_FWD_2);
2238	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2239		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2240				   GBE_PORT_MASK(gbe_dev->ale_ports),
2241				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2242	}
2243}
2244
2245static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2246{
2247	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2248	u16 vlan_id;
2249
2250	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2251
2252	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2253		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2254				   ALE_VLAN, vlan_id);
2255}
2256
2257static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2258{
2259	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2260	u16 vlan_id;
2261
2262	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2263
2264	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2265		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2266	}
2267}
2268
2269static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2270{
2271	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2272	u16 vlan_id;
2273
2274	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2275
2276	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2277		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2278				   ALE_VLAN, vlan_id);
2279	}
2280}
2281
2282static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2283{
2284	struct gbe_intf *gbe_intf = intf_priv;
2285	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2286
2287	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2288		naddr->addr, naddr->type);
2289
2290	switch (naddr->type) {
2291	case ADDR_MCAST:
2292	case ADDR_BCAST:
2293		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2294		break;
2295	case ADDR_UCAST:
2296	case ADDR_DEV:
2297		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2298		break;
2299	case ADDR_ANY:
2300		/* nothing to do for promiscuous */
2301	default:
2302		break;
2303	}
2304
2305	return 0;
2306}
2307
2308static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2309{
2310	struct gbe_intf *gbe_intf = intf_priv;
2311	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2312
2313	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2314		naddr->addr, naddr->type);
2315
2316	switch (naddr->type) {
2317	case ADDR_MCAST:
2318	case ADDR_BCAST:
2319		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2320		break;
2321	case ADDR_UCAST:
2322	case ADDR_DEV:
2323		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2324		break;
2325	case ADDR_ANY:
2326		/* nothing to do for promiscuous */
2327	default:
2328		break;
2329	}
2330
2331	return 0;
2332}
2333
2334static int gbe_add_vid(void *intf_priv, int vid)
2335{
2336	struct gbe_intf *gbe_intf = intf_priv;
2337	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2338
2339	set_bit(vid, gbe_intf->active_vlans);
2340
2341	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2342			  GBE_PORT_MASK(gbe_dev->ale_ports),
2343			  GBE_MASK_NO_PORTS,
2344			  GBE_PORT_MASK(gbe_dev->ale_ports),
2345			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2346
2347	return 0;
2348}
2349
2350static int gbe_del_vid(void *intf_priv, int vid)
2351{
2352	struct gbe_intf *gbe_intf = intf_priv;
2353	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2354
2355	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2356	clear_bit(vid, gbe_intf->active_vlans);
2357	return 0;
2358}
2359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2360static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2361{
2362	struct gbe_intf *gbe_intf = intf_priv;
2363	struct phy_device *phy = gbe_intf->slave->phy;
2364	int ret = -EOPNOTSUPP;
 
 
 
 
 
 
 
 
2365
2366	if (phy)
2367		ret = phy_mii_ioctl(phy, req, cmd);
2368
2369	return ret;
2370}
2371
2372static void netcp_ethss_timer(unsigned long arg)
2373{
2374	struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2375	struct gbe_intf *gbe_intf;
2376	struct gbe_slave *slave;
2377
2378	/* Check & update SGMII link state of interfaces */
2379	for_each_intf(gbe_intf, gbe_dev) {
2380		if (!gbe_intf->slave->open)
2381			continue;
2382		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2383					      gbe_intf->ndev);
2384	}
2385
2386	/* Check & update SGMII link state of secondary ports */
2387	for_each_sec_slave(slave, gbe_dev) {
2388		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2389	}
2390
2391	/* A timer runs as a BH, no need to block them */
2392	spin_lock(&gbe_dev->hw_stats_lock);
2393
2394	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2395		gbe_update_stats_ver14(gbe_dev, NULL);
2396	else
2397		gbe_update_stats(gbe_dev, NULL);
2398
2399	spin_unlock(&gbe_dev->hw_stats_lock);
2400
2401	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2402	add_timer(&gbe_dev->timer);
2403}
2404
2405static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
2406{
2407	struct gbe_intf *gbe_intf = data;
2408
2409	p_info->tx_pipe = &gbe_intf->tx_pipe;
2410	return 0;
 
 
 
 
 
 
 
 
2411}
2412
2413static int gbe_open(void *intf_priv, struct net_device *ndev)
2414{
2415	struct gbe_intf *gbe_intf = intf_priv;
2416	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2417	struct netcp_intf *netcp = netdev_priv(ndev);
2418	struct gbe_slave *slave = gbe_intf->slave;
2419	int port_num = slave->port_num;
2420	u32 reg;
2421	int ret;
2422
2423	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2424	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2425		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2426		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2427
2428	/* For 10G and on NetCP 1.5, use directed to port */
2429	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2430		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2431
2432	if (gbe_dev->enable_ale)
2433		gbe_intf->tx_pipe.switch_to_port = 0;
2434	else
2435		gbe_intf->tx_pipe.switch_to_port = port_num;
2436
2437	dev_dbg(gbe_dev->dev,
2438		"opened TX channel %s: %p with to port %d, flags %d\n",
2439		gbe_intf->tx_pipe.dma_chan_name,
2440		gbe_intf->tx_pipe.dma_channel,
2441		gbe_intf->tx_pipe.switch_to_port,
2442		gbe_intf->tx_pipe.flags);
2443
2444	gbe_slave_stop(gbe_intf);
2445
2446	/* disable priority elevation and enable statistics on all ports */
2447	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2448
2449	/* Control register */
2450	writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
 
 
 
 
 
2451
2452	/* All statistics enabled and STAT AB visible by default */
2453	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2454						    stat_port_en));
2455
2456	ret = gbe_slave_open(gbe_intf);
2457	if (ret)
2458		goto fail;
2459
2460	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2461			      gbe_intf);
2462
2463	slave->open = true;
2464	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
 
 
 
2465	return 0;
2466
2467fail:
2468	gbe_slave_stop(gbe_intf);
2469	return ret;
2470}
2471
2472static int gbe_close(void *intf_priv, struct net_device *ndev)
2473{
2474	struct gbe_intf *gbe_intf = intf_priv;
2475	struct netcp_intf *netcp = netdev_priv(ndev);
 
 
 
2476
2477	gbe_slave_stop(gbe_intf);
2478	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2479				gbe_intf);
 
2480
2481	gbe_intf->slave->open = false;
2482	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2483	return 0;
2484}
2485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2486static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2487		      struct device_node *node)
2488{
2489	int port_reg_num;
2490	u32 port_reg_ofs, emac_reg_ofs;
2491	u32 port_reg_blk_sz, emac_reg_blk_sz;
2492
2493	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2494		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2495		return -EINVAL;
2496	}
2497
2498	if (of_property_read_u32(node, "link-interface",
2499				 &slave->link_interface)) {
2500		dev_warn(gbe_dev->dev,
2501			 "missing link-interface value defaulting to 1G mac-phy link\n");
2502		slave->link_interface = SGMII_LINK_MAC_PHY;
2503	}
2504
2505	slave->open = false;
2506	slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
 
 
2507	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2508
2509	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2510		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2511	else
2512		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2513
2514	/* Emac regs memmap are contiguous but port regs are not */
2515	port_reg_num = slave->slave_num;
2516	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2517		if (slave->slave_num > 1) {
2518			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2519			port_reg_num -= 2;
2520		} else {
2521			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2522		}
2523		emac_reg_ofs = GBE13_EMAC_OFFSET;
2524		port_reg_blk_sz = 0x30;
2525		emac_reg_blk_sz = 0x40;
2526	} else if (IS_SS_ID_MU(gbe_dev)) {
2527		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2528		emac_reg_ofs = GBENU_EMAC_OFFSET;
2529		port_reg_blk_sz = 0x1000;
2530		emac_reg_blk_sz = 0x1000;
2531	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2532		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2533		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2534		port_reg_blk_sz = 0x30;
2535		emac_reg_blk_sz = 0x40;
2536	} else {
2537		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2538			gbe_dev->ss_version);
2539		return -EINVAL;
2540	}
2541
2542	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2543				(port_reg_blk_sz * port_reg_num);
2544	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2545				(emac_reg_blk_sz * slave->slave_num);
2546
2547	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2548		/* Initialize  slave port register offsets */
2549		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2550		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2551		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2552		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2553		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2554		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2555		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2556		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2557		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2558
2559		/* Initialize EMAC register offsets */
2560		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2561		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2562		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2563
2564	} else if (IS_SS_ID_MU(gbe_dev)) {
2565		/* Initialize  slave port register offsets */
2566		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2567		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2568		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2569		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2570		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2571		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2572		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2573		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2574		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2575		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2576
2577		/* Initialize EMAC register offsets */
2578		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2579		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2580
2581	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2582		/* Initialize  slave port register offsets */
2583		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2584		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2585		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2586		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2587		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2588		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2589		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2590		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2591		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2592
2593		/* Initialize EMAC register offsets */
2594		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
2595		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2596		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2597	}
2598
2599	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
 
 
2600	return 0;
2601}
2602
2603static void init_secondary_ports(struct gbe_priv *gbe_dev,
2604				 struct device_node *node)
2605{
2606	struct device *dev = gbe_dev->dev;
2607	phy_interface_t phy_mode;
2608	struct gbe_priv **priv;
2609	struct device_node *port;
2610	struct gbe_slave *slave;
2611	bool mac_phy_link = false;
2612
2613	for_each_child_of_node(node, port) {
2614		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
2615		if (!slave) {
2616			dev_err(dev,
2617				"memomry alloc failed for secondary port(%s), skipping...\n",
2618				port->name);
2619			continue;
2620		}
2621
2622		if (init_slave(gbe_dev, slave, port)) {
2623			dev_err(dev,
2624				"Failed to initialize secondary port(%s), skipping...\n",
2625				port->name);
2626			devm_kfree(dev, slave);
2627			continue;
2628		}
2629
2630		gbe_sgmii_config(gbe_dev, slave);
2631		gbe_port_reset(slave);
2632		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
2633		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
2634		gbe_dev->num_slaves++;
2635		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2636		    (slave->link_interface == XGMII_LINK_MAC_PHY))
2637			mac_phy_link = true;
2638
2639		slave->open = true;
2640		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
2641			of_node_put(port);
2642			break;
2643		}
2644	}
2645
2646	/* of_phy_connect() is needed only for MAC-PHY interface */
2647	if (!mac_phy_link)
2648		return;
2649
2650	/* Allocate dummy netdev device for attaching to phy device */
2651	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
2652					NET_NAME_UNKNOWN, ether_setup);
2653	if (!gbe_dev->dummy_ndev) {
2654		dev_err(dev,
2655			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
2656		return;
2657	}
2658	priv = netdev_priv(gbe_dev->dummy_ndev);
2659	*priv = gbe_dev;
2660
2661	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2662		phy_mode = PHY_INTERFACE_MODE_SGMII;
2663		slave->phy_port_t = PORT_MII;
2664	} else {
2665		phy_mode = PHY_INTERFACE_MODE_NA;
2666		slave->phy_port_t = PORT_FIBRE;
2667	}
2668
2669	for_each_sec_slave(slave, gbe_dev) {
2670		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2671		    (slave->link_interface != XGMII_LINK_MAC_PHY))
2672			continue;
2673		slave->phy =
2674			of_phy_connect(gbe_dev->dummy_ndev,
2675				       slave->phy_node,
2676				       gbe_adjust_link_sec_slaves,
2677				       0, phy_mode);
2678		if (!slave->phy) {
2679			dev_err(dev, "phy not found for slave %d\n",
2680				slave->slave_num);
2681			slave->phy = NULL;
2682		} else {
2683			dev_dbg(dev, "phy found: id is: 0x%s\n",
2684				phydev_name(slave->phy));
2685			phy_start(slave->phy);
2686			phy_read_status(slave->phy);
2687		}
2688	}
2689}
2690
2691static void free_secondary_ports(struct gbe_priv *gbe_dev)
2692{
2693	struct gbe_slave *slave;
2694
2695	while (!list_empty(&gbe_dev->secondary_slaves)) {
2696		slave = first_sec_slave(gbe_dev);
2697
2698		if (slave->phy)
2699			phy_disconnect(slave->phy);
2700		list_del(&slave->slave_list);
2701	}
2702	if (gbe_dev->dummy_ndev)
2703		free_netdev(gbe_dev->dummy_ndev);
2704}
2705
2706static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
2707				 struct device_node *node)
2708{
2709	struct resource res;
2710	void __iomem *regs;
2711	int ret, i;
2712
2713	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
2714	if (ret) {
2715		dev_err(gbe_dev->dev,
2716			"Can't xlate xgbe of node(%s) ss address at %d\n",
2717			node->name, XGBE_SS_REG_INDEX);
2718		return ret;
2719	}
2720
2721	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2722	if (IS_ERR(regs)) {
2723		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
2724		return PTR_ERR(regs);
2725	}
2726	gbe_dev->ss_regs = regs;
2727
2728	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
2729	if (ret) {
2730		dev_err(gbe_dev->dev,
2731			"Can't xlate xgbe of node(%s) sm address at %d\n",
2732			node->name, XGBE_SM_REG_INDEX);
2733		return ret;
2734	}
2735
2736	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2737	if (IS_ERR(regs)) {
2738		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
2739		return PTR_ERR(regs);
2740	}
2741	gbe_dev->switch_regs = regs;
2742
2743	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
2744	if (ret) {
2745		dev_err(gbe_dev->dev,
2746			"Can't xlate xgbe serdes of node(%s) address at %d\n",
2747			node->name, XGBE_SERDES_REG_INDEX);
2748		return ret;
2749	}
2750
2751	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2752	if (IS_ERR(regs)) {
2753		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
2754		return PTR_ERR(regs);
2755	}
2756	gbe_dev->xgbe_serdes_regs = regs;
2757
2758	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
2759	gbe_dev->et_stats = xgbe10_et_stats;
2760	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
2761
2762	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2763					 gbe_dev->num_et_stats * sizeof(u64),
2764					 GFP_KERNEL);
2765	if (!gbe_dev->hw_stats) {
2766		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2767		return -ENOMEM;
2768	}
2769
2770	gbe_dev->hw_stats_prev =
2771		devm_kzalloc(gbe_dev->dev,
2772			     gbe_dev->num_et_stats * sizeof(u32),
2773			     GFP_KERNEL);
2774	if (!gbe_dev->hw_stats_prev) {
2775		dev_err(gbe_dev->dev,
2776			"hw_stats_prev memory allocation failed\n");
2777		return -ENOMEM;
2778	}
2779
2780	gbe_dev->ss_version = XGBE_SS_VERSION_10;
2781	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
2782					XGBE10_SGMII_MODULE_OFFSET;
2783	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
2784
2785	for (i = 0; i < gbe_dev->max_num_ports; i++)
2786		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2787			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
2788
2789	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
 
2790	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2791	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
2792	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
2793	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2794
2795	/* Subsystem registers */
2796	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2797	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
2798
2799	/* Switch module registers */
2800	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2801	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2802	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2803	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2804	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2805
2806	/* Host port registers */
2807	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2808	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2809	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2810	return 0;
2811}
2812
2813static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
2814				    struct device_node *node)
2815{
2816	struct resource res;
2817	void __iomem *regs;
2818	int ret;
2819
2820	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
2821	if (ret) {
2822		dev_err(gbe_dev->dev,
2823			"Can't translate of node(%s) of gbe ss address at %d\n",
2824			node->name, GBE_SS_REG_INDEX);
2825		return ret;
2826	}
2827
2828	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2829	if (IS_ERR(regs)) {
2830		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
2831		return PTR_ERR(regs);
2832	}
2833	gbe_dev->ss_regs = regs;
2834	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
2835	return 0;
2836}
2837
2838static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
2839				struct device_node *node)
2840{
2841	struct resource res;
2842	void __iomem *regs;
2843	int i, ret;
2844
2845	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
2846	if (ret) {
2847		dev_err(gbe_dev->dev,
2848			"Can't translate of gbe node(%s) address at index %d\n",
2849			node->name, GBE_SGMII34_REG_INDEX);
2850		return ret;
2851	}
2852
2853	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2854	if (IS_ERR(regs)) {
2855		dev_err(gbe_dev->dev,
2856			"Failed to map gbe sgmii port34 register base\n");
2857		return PTR_ERR(regs);
2858	}
2859	gbe_dev->sgmii_port34_regs = regs;
2860
2861	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
2862	if (ret) {
2863		dev_err(gbe_dev->dev,
2864			"Can't translate of gbe node(%s) address at index %d\n",
2865			node->name, GBE_SM_REG_INDEX);
2866		return ret;
2867	}
2868
2869	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2870	if (IS_ERR(regs)) {
2871		dev_err(gbe_dev->dev,
2872			"Failed to map gbe switch module register base\n");
2873		return PTR_ERR(regs);
2874	}
2875	gbe_dev->switch_regs = regs;
2876
2877	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
2878	gbe_dev->et_stats = gbe13_et_stats;
2879	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
2880
2881	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2882					 gbe_dev->num_et_stats * sizeof(u64),
2883					 GFP_KERNEL);
2884	if (!gbe_dev->hw_stats) {
2885		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2886		return -ENOMEM;
2887	}
2888
2889	gbe_dev->hw_stats_prev =
2890		devm_kzalloc(gbe_dev->dev,
2891			     gbe_dev->num_et_stats * sizeof(u32),
2892			     GFP_KERNEL);
2893	if (!gbe_dev->hw_stats_prev) {
2894		dev_err(gbe_dev->dev,
2895			"hw_stats_prev memory allocation failed\n");
2896		return -ENOMEM;
2897	}
2898
2899	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
2900	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
2901
2902	/* K2HK has only 2 hw stats modules visible at a time, so
2903	 * module 0 & 2 points to one base and
2904	 * module 1 & 3 points to the other base
2905	 */
2906	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
2907		gbe_dev->hw_stats_regs[i] =
2908			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
2909			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
2910	}
2911
 
2912	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
2913	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2914	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
2915	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2916	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
2917
2918	/* Subsystem registers */
2919	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2920
2921	/* Switch module registers */
2922	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2923	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2924	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
2925	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2926	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2927	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2928
2929	/* Host port registers */
2930	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2931	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2932	return 0;
2933}
2934
2935static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2936				struct device_node *node)
2937{
2938	struct resource res;
2939	void __iomem *regs;
2940	int i, ret;
2941
2942	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
2943	gbe_dev->et_stats = gbenu_et_stats;
2944
2945	if (IS_SS_ID_NU(gbe_dev))
2946		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2947			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
2948	else
2949		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2950					GBENU_ET_STATS_PORT_SIZE;
2951
2952	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2953					 gbe_dev->num_et_stats * sizeof(u64),
2954					 GFP_KERNEL);
2955	if (!gbe_dev->hw_stats) {
2956		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2957		return -ENOMEM;
2958	}
2959
2960	gbe_dev->hw_stats_prev =
2961		devm_kzalloc(gbe_dev->dev,
2962			     gbe_dev->num_et_stats * sizeof(u32),
2963			     GFP_KERNEL);
2964	if (!gbe_dev->hw_stats_prev) {
2965		dev_err(gbe_dev->dev,
2966			"hw_stats_prev memory allocation failed\n");
2967		return -ENOMEM;
2968	}
2969
2970	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
2971	if (ret) {
2972		dev_err(gbe_dev->dev,
2973			"Can't translate of gbenu node(%s) addr at index %d\n",
2974			node->name, GBENU_SM_REG_INDEX);
2975		return ret;
2976	}
2977
2978	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2979	if (IS_ERR(regs)) {
2980		dev_err(gbe_dev->dev,
2981			"Failed to map gbenu switch module register base\n");
2982		return PTR_ERR(regs);
2983	}
2984	gbe_dev->switch_regs = regs;
2985
2986	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2987
2988	/* Although sgmii modules are mem mapped to one contiguous
2989	 * region on GBENU devices, setting sgmii_port34_regs allows
2990	 * consistent code when accessing sgmii api
2991	 */
2992	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
2993				     (2 * GBENU_SGMII_MODULE_SIZE);
2994
2995	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
2996
2997	for (i = 0; i < (gbe_dev->max_num_ports); i++)
2998		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2999			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3000
 
3001	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3002	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3003	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3004	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3005	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3006
3007	/* Subsystem registers */
3008	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3009
3010	/* Switch module registers */
3011	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3012	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3013	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3014	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3015
3016	/* Host port registers */
3017	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3018	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3019
3020	/* For NU only.  2U does not need tx_pri_map.
3021	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3022	 * while 2U has only 1 such thread
3023	 */
3024	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3025	return 0;
3026}
3027
3028static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3029		     struct device_node *node, void **inst_priv)
3030{
3031	struct device_node *interfaces, *interface;
3032	struct device_node *secondary_ports;
3033	struct cpsw_ale_params ale_params;
3034	struct gbe_priv *gbe_dev;
3035	u32 slave_num;
3036	int i, ret = 0;
3037
3038	if (!node) {
3039		dev_err(dev, "device tree info unavailable\n");
3040		return -ENODEV;
3041	}
3042
3043	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3044	if (!gbe_dev)
3045		return -ENOMEM;
3046
3047	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3048	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3049		gbe_dev->max_num_slaves = 4;
3050	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3051		gbe_dev->max_num_slaves = 8;
3052	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3053		gbe_dev->max_num_slaves = 1;
3054	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3055		gbe_dev->max_num_slaves = 2;
3056	} else {
3057		dev_err(dev, "device tree node for unknown device\n");
3058		return -EINVAL;
3059	}
3060	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3061
3062	gbe_dev->dev = dev;
3063	gbe_dev->netcp_device = netcp_device;
3064	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3065
3066	/* init the hw stats lock */
3067	spin_lock_init(&gbe_dev->hw_stats_lock);
3068
3069	if (of_find_property(node, "enable-ale", NULL)) {
3070		gbe_dev->enable_ale = true;
3071		dev_info(dev, "ALE enabled\n");
3072	} else {
3073		gbe_dev->enable_ale = false;
3074		dev_dbg(dev, "ALE bypass enabled*\n");
3075	}
3076
3077	ret = of_property_read_u32(node, "tx-queue",
3078				   &gbe_dev->tx_queue_id);
3079	if (ret < 0) {
3080		dev_err(dev, "missing tx_queue parameter\n");
3081		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3082	}
3083
3084	ret = of_property_read_string(node, "tx-channel",
3085				      &gbe_dev->dma_chan_name);
3086	if (ret < 0) {
3087		dev_err(dev, "missing \"tx-channel\" parameter\n");
3088		return -EINVAL;
3089	}
3090
3091	if (!strcmp(node->name, "gbe")) {
3092		ret = get_gbe_resource_version(gbe_dev, node);
3093		if (ret)
3094			return ret;
3095
3096		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3097
3098		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3099			ret = set_gbe_ethss14_priv(gbe_dev, node);
3100		else if (IS_SS_ID_MU(gbe_dev))
3101			ret = set_gbenu_ethss_priv(gbe_dev, node);
3102		else
3103			ret = -ENODEV;
3104
3105	} else if (!strcmp(node->name, "xgbe")) {
3106		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3107		if (ret)
3108			return ret;
3109		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3110					     gbe_dev->ss_regs);
3111	} else {
3112		dev_err(dev, "unknown GBE node(%s)\n", node->name);
3113		ret = -ENODEV;
3114	}
3115
3116	if (ret)
3117		return ret;
3118
3119	interfaces = of_get_child_by_name(node, "interfaces");
3120	if (!interfaces)
3121		dev_err(dev, "could not find interfaces\n");
3122
3123	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3124				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3125	if (ret)
3126		return ret;
3127
3128	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3129	if (ret)
3130		return ret;
3131
3132	/* Create network interfaces */
3133	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3134	for_each_child_of_node(interfaces, interface) {
3135		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3136		if (ret) {
3137			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
3138				interface->name);
3139			continue;
3140		}
3141		gbe_dev->num_slaves++;
3142		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3143			of_node_put(interface);
3144			break;
3145		}
3146	}
3147	of_node_put(interfaces);
3148
3149	if (!gbe_dev->num_slaves)
3150		dev_warn(dev, "No network interface configured\n");
3151
3152	/* Initialize Secondary slave ports */
3153	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3154	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3155	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3156		init_secondary_ports(gbe_dev, secondary_ports);
3157	of_node_put(secondary_ports);
3158
3159	if (!gbe_dev->num_slaves) {
3160		dev_err(dev,
3161			"No network interface or secondary ports configured\n");
3162		ret = -ENODEV;
3163		goto free_sec_ports;
3164	}
3165
3166	memset(&ale_params, 0, sizeof(ale_params));
3167	ale_params.dev		= gbe_dev->dev;
3168	ale_params.ale_regs	= gbe_dev->ale_reg;
3169	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3170	ale_params.ale_entries	= gbe_dev->ale_entries;
3171	ale_params.ale_ports	= gbe_dev->ale_ports;
3172
 
 
 
3173	gbe_dev->ale = cpsw_ale_create(&ale_params);
3174	if (!gbe_dev->ale) {
3175		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3176		ret = -ENODEV;
3177		goto free_sec_ports;
3178	} else {
3179		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3180	}
3181
 
 
 
 
 
 
3182	/* initialize host port */
3183	gbe_init_host_port(gbe_dev);
3184
3185	spin_lock_bh(&gbe_dev->hw_stats_lock);
3186	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3187		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3188			gbe_reset_mod_stats_ver14(gbe_dev, i);
3189		else
3190			gbe_reset_mod_stats(gbe_dev, i);
3191	}
3192	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3193
3194	init_timer(&gbe_dev->timer);
3195	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
3196	gbe_dev->timer.function = netcp_ethss_timer;
3197	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3198	add_timer(&gbe_dev->timer);
3199	*inst_priv = gbe_dev;
3200	return 0;
3201
3202free_sec_ports:
3203	free_secondary_ports(gbe_dev);
3204	return ret;
3205}
3206
3207static int gbe_attach(void *inst_priv, struct net_device *ndev,
3208		      struct device_node *node, void **intf_priv)
3209{
3210	struct gbe_priv *gbe_dev = inst_priv;
3211	struct gbe_intf *gbe_intf;
3212	int ret;
3213
3214	if (!node) {
3215		dev_err(gbe_dev->dev, "interface node not available\n");
3216		return -ENODEV;
3217	}
3218
3219	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3220	if (!gbe_intf)
3221		return -ENOMEM;
3222
3223	gbe_intf->ndev = ndev;
3224	gbe_intf->dev = gbe_dev->dev;
3225	gbe_intf->gbe_dev = gbe_dev;
3226
3227	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3228					sizeof(*gbe_intf->slave),
3229					GFP_KERNEL);
3230	if (!gbe_intf->slave) {
3231		ret = -ENOMEM;
3232		goto fail;
3233	}
3234
3235	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3236		ret = -ENODEV;
3237		goto fail;
3238	}
3239
3240	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3241	ndev->ethtool_ops = &keystone_ethtool_ops;
3242	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3243	*intf_priv = gbe_intf;
3244	return 0;
3245
3246fail:
3247	if (gbe_intf->slave)
3248		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3249	if (gbe_intf)
3250		devm_kfree(gbe_dev->dev, gbe_intf);
3251	return ret;
3252}
3253
3254static int gbe_release(void *intf_priv)
3255{
3256	struct gbe_intf *gbe_intf = intf_priv;
3257
3258	gbe_intf->ndev->ethtool_ops = NULL;
3259	list_del(&gbe_intf->gbe_intf_list);
3260	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3261	devm_kfree(gbe_intf->dev, gbe_intf);
3262	return 0;
3263}
3264
3265static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3266{
3267	struct gbe_priv *gbe_dev = inst_priv;
3268
3269	del_timer_sync(&gbe_dev->timer);
 
3270	cpsw_ale_stop(gbe_dev->ale);
3271	cpsw_ale_destroy(gbe_dev->ale);
3272	netcp_txpipe_close(&gbe_dev->tx_pipe);
3273	free_secondary_ports(gbe_dev);
3274
3275	if (!list_empty(&gbe_dev->gbe_intf_head))
3276		dev_alert(gbe_dev->dev,
3277			  "unreleased ethss interfaces present\n");
3278
3279	return 0;
3280}
3281
3282static struct netcp_module gbe_module = {
3283	.name		= GBE_MODULE_NAME,
3284	.owner		= THIS_MODULE,
3285	.primary	= true,
3286	.probe		= gbe_probe,
3287	.open		= gbe_open,
3288	.close		= gbe_close,
3289	.remove		= gbe_remove,
3290	.attach		= gbe_attach,
3291	.release	= gbe_release,
3292	.add_addr	= gbe_add_addr,
3293	.del_addr	= gbe_del_addr,
3294	.add_vid	= gbe_add_vid,
3295	.del_vid	= gbe_del_vid,
3296	.ioctl		= gbe_ioctl,
3297};
3298
3299static struct netcp_module xgbe_module = {
3300	.name		= XGBE_MODULE_NAME,
3301	.owner		= THIS_MODULE,
3302	.primary	= true,
3303	.probe		= gbe_probe,
3304	.open		= gbe_open,
3305	.close		= gbe_close,
3306	.remove		= gbe_remove,
3307	.attach		= gbe_attach,
3308	.release	= gbe_release,
3309	.add_addr	= gbe_add_addr,
3310	.del_addr	= gbe_del_addr,
3311	.add_vid	= gbe_add_vid,
3312	.del_vid	= gbe_del_vid,
3313	.ioctl		= gbe_ioctl,
3314};
3315
3316static int __init keystone_gbe_init(void)
3317{
3318	int ret;
3319
3320	ret = netcp_register_module(&gbe_module);
3321	if (ret)
3322		return ret;
3323
3324	ret = netcp_register_module(&xgbe_module);
3325	if (ret)
3326		return ret;
3327
3328	return 0;
3329}
3330module_init(keystone_gbe_init);
3331
3332static void __exit keystone_gbe_exit(void)
3333{
3334	netcp_unregister_module(&gbe_module);
3335	netcp_unregister_module(&xgbe_module);
3336}
3337module_exit(keystone_gbe_exit);
3338
3339MODULE_LICENSE("GPL v2");
3340MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3341MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");