Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.8
   1/*
   2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
   3 *
   4 * Copyright (C) 2012 Marvell
   5 *
   6 * Rami Rosen <rosenr@marvell.com>
   7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
   8 *
   9 * This file is licensed under the terms of the GNU General Public
  10 * License version 2. This program is licensed "as is" without any
  11 * warranty of any kind, whether express or implied.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/cpu.h>
  16#include <linux/etherdevice.h>
  17#include <linux/if_vlan.h>
  18#include <linux/inetdevice.h>
  19#include <linux/interrupt.h>
  20#include <linux/io.h>
  21#include <linux/kernel.h>
  22#include <linux/mbus.h>
  23#include <linux/module.h>
  24#include <linux/netdevice.h>
  25#include <linux/of.h>
  26#include <linux/of_address.h>
  27#include <linux/of_irq.h>
  28#include <linux/of_mdio.h>
  29#include <linux/of_net.h>
  30#include <linux/phy/phy.h>
  31#include <linux/phy.h>
  32#include <linux/phylink.h>
  33#include <linux/platform_device.h>
  34#include <linux/skbuff.h>
  35#include <net/hwbm.h>
  36#include "mvneta_bm.h"
  37#include <net/ip.h>
  38#include <net/ipv6.h>
  39#include <net/tso.h>
  40#include <net/page_pool/helpers.h>
  41#include <net/pkt_sched.h>
  42#include <linux/bpf_trace.h>
  43
  44/* Registers */
  45#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
  46#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
  47#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
  48#define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
  49#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
  50#define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
  51#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
  52#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
  53#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
  54#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
  55#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
  56#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
  57#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
  58#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
  59#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
  60#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
  61#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
  62#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
  63#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
  64#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
  65#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
  66#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
  67#define MVNETA_PORT_RX_RESET                    0x1cc0
  68#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
  69#define MVNETA_PHY_ADDR                         0x2000
  70#define      MVNETA_PHY_ADDR_MASK               0x1f
  71#define MVNETA_MBUS_RETRY                       0x2010
  72#define MVNETA_UNIT_INTR_CAUSE                  0x2080
  73#define MVNETA_UNIT_CONTROL                     0x20B0
  74#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
  75#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
  76#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
  77#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
  78#define MVNETA_BASE_ADDR_ENABLE                 0x2290
  79#define      MVNETA_AC5_CNM_DDR_TARGET		0x2
  80#define      MVNETA_AC5_CNM_DDR_ATTR		0xb
  81#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
  82#define MVNETA_PORT_CONFIG                      0x2400
  83#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
  84#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
  85#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
  86#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
  87#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
  88#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
  89#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
  90#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
  91#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
  92						 MVNETA_DEF_RXQ_ARP(q)	 | \
  93						 MVNETA_DEF_RXQ_TCP(q)	 | \
  94						 MVNETA_DEF_RXQ_UDP(q)	 | \
  95						 MVNETA_DEF_RXQ_BPDU(q)	 | \
  96						 MVNETA_TX_UNSET_ERR_SUM | \
  97						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  98#define MVNETA_PORT_CONFIG_EXTEND                0x2404
  99#define MVNETA_MAC_ADDR_LOW                      0x2414
 100#define MVNETA_MAC_ADDR_HIGH                     0x2418
 101#define MVNETA_SDMA_CONFIG                       0x241c
 102#define      MVNETA_SDMA_BRST_SIZE_16            4
 103#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
 104#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
 105#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 106#define      MVNETA_DESC_SWAP                    BIT(6)
 107#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 108#define	MVNETA_VLAN_PRIO_TO_RXQ			 0x2440
 109#define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
 110#define MVNETA_PORT_STATUS                       0x2444
 111#define      MVNETA_TX_IN_PRGRS                  BIT(0)
 112#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 113#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 114/* Only exists on Armada XP and Armada 370 */
 115#define MVNETA_SERDES_CFG			 0x24A0
 116#define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
 117#define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
 118#define      MVNETA_HSGMII_SERDES_PROTO		 0x1107
 119#define MVNETA_TYPE_PRIO                         0x24bc
 120#define      MVNETA_FORCE_UNI                    BIT(21)
 121#define MVNETA_TXQ_CMD_1                         0x24e4
 122#define MVNETA_TXQ_CMD                           0x2448
 123#define      MVNETA_TXQ_DISABLE_SHIFT            8
 124#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
 125#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
 126#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
 127#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 128#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 129#define MVNETA_ACC_MODE                          0x2500
 130#define MVNETA_BM_ADDRESS                        0x2504
 131#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 132#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 133#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 134#define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
 135#define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
 136#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 137
 138/* Exception Interrupt Port/Queue Cause register
 139 *
 140 * Their behavior depend of the mapping done using the PCPX2Q
 141 * registers. For a given CPU if the bit associated to a queue is not
 142 * set, then for the register a read from this CPU will always return
 143 * 0 and a write won't do anything
 144 */
 145
 146#define MVNETA_INTR_NEW_CAUSE                    0x25a0
 147#define MVNETA_INTR_NEW_MASK                     0x25a4
 148
 149/* bits  0..7  = TXQ SENT, one bit per queue.
 150 * bits  8..15 = RXQ OCCUP, one bit per queue.
 151 * bits 16..23 = RXQ FREE, one bit per queue.
 152 * bit  29 = OLD_REG_SUM, see old reg ?
 153 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 154 * bit  31 = MISC_SUM,   one bit for 4 ports
 155 */
 156#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
 157#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 158#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 159#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
 160#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 161
 162#define MVNETA_INTR_OLD_CAUSE                    0x25a8
 163#define MVNETA_INTR_OLD_MASK                     0x25ac
 164
 165/* Data Path Port/Queue Cause Register */
 166#define MVNETA_INTR_MISC_CAUSE                   0x25b0
 167#define MVNETA_INTR_MISC_MASK                    0x25b4
 168
 169#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
 170#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
 171#define      MVNETA_CAUSE_PTP                    BIT(4)
 172
 173#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
 174#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
 175#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
 176#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
 177#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
 178#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
 179#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
 180#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
 181
 182#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
 183#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
 184#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
 185
 186#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
 187#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
 188#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
 189
 190#define MVNETA_INTR_ENABLE                       0x25b8
 191#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
 192#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
 193
 194#define MVNETA_RXQ_CMD                           0x2680
 195#define      MVNETA_RXQ_DISABLE_SHIFT            8
 196#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 197#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
 198#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
 199#define MVNETA_GMAC_CTRL_0                       0x2c00
 200#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 201#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 202#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 203#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 204#define MVNETA_GMAC_CTRL_2                       0x2c08
 205#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 206#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 207#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 208#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 209#define MVNETA_GMAC_STATUS                       0x2c10
 210#define      MVNETA_GMAC_LINK_UP                 BIT(0)
 211#define      MVNETA_GMAC_SPEED_1000              BIT(1)
 212#define      MVNETA_GMAC_SPEED_100               BIT(2)
 213#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
 214#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
 215#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 216#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 217#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
 218#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
 219#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 220#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 221#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 222#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 223#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 224#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
 225#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 226#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 227#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 228#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 229#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
 230#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 231#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 232#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 233#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 234#define MVNETA_GMAC_CTRL_4                       0x2c90
 235#define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
 236#define MVNETA_MIB_COUNTERS_BASE                 0x3000
 237#define      MVNETA_MIB_LATE_COLLISION           0x7c
 238#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 239#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 240#define MVNETA_DA_FILT_UCAST_BASE                0x3600
 241#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
 242#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
 243#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
 244#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 245#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 246#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
 247#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
 248#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 249#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 250#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
 251#define MVNETA_PORT_TX_RESET                     0x3cf0
 252#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
 253#define MVNETA_TXQ_CMD1_REG			 0x3e00
 254#define      MVNETA_TXQ_CMD1_BW_LIM_SEL_V1	 BIT(3)
 255#define      MVNETA_TXQ_CMD1_BW_LIM_EN		 BIT(0)
 256#define MVNETA_REFILL_NUM_CLK_REG		 0x3e08
 257#define      MVNETA_REFILL_MAX_NUM_CLK		 0x0000ffff
 258#define MVNETA_TX_MTU                            0x3e0c
 259#define MVNETA_TX_TOKEN_SIZE                     0x3e14
 260#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
 261#define MVNETA_TXQ_BUCKET_REFILL_REG(q)		 (0x3e20 + ((q) << 2))
 262#define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK	0x3ff00000
 263#define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT	20
 264#define      MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX	 0x0007ffff
 265#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 266#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 267
 268/* The values of the bucket refill base period and refill period are taken from
 269 * the reference manual, and adds up to a base resolution of 10Kbps. This allows
 270 * to cover all rate-limit values from 10Kbps up to 5Gbps
 271 */
 272
 273/* Base period for the rate limit algorithm */
 274#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS	100
 275
 276/* Number of Base Period to wait between each bucket refill */
 277#define MVNETA_TXQ_BUCKET_REFILL_PERIOD	1000
 278
 279/* The base resolution for rate limiting, in bps. Any max_rate value should be
 280 * a multiple of that value.
 281 */
 282#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
 283					 (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
 284					  MVNETA_TXQ_BUCKET_REFILL_PERIOD))
 285
 286#define MVNETA_LPI_CTRL_0                        0x2cc0
 287#define MVNETA_LPI_CTRL_1                        0x2cc4
 288#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
 289#define MVNETA_LPI_CTRL_2                        0x2cc8
 290#define MVNETA_LPI_STATUS                        0x2ccc
 291
 292#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
 293
 294/* Descriptor ring Macros */
 295#define MVNETA_QUEUE_NEXT_DESC(q, index)	\
 296	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
 297
 298/* Various constants */
 299
 300/* Coalescing */
 301#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
 302#define MVNETA_RX_COAL_PKTS		32
 303#define MVNETA_RX_COAL_USEC		100
 304
 305/* The two bytes Marvell header. Either contains a special value used
 306 * by Marvell switches when a specific hardware mode is enabled (not
 307 * supported by this driver) or is filled automatically by zeroes on
 308 * the RX side. Those two bytes being at the front of the Ethernet
 309 * header, they allow to have the IP header aligned on a 4 bytes
 310 * boundary automatically: the hardware skips those two bytes on its
 311 * own.
 312 */
 313#define MVNETA_MH_SIZE			2
 314
 315#define MVNETA_VLAN_TAG_LEN             4
 316
 317#define MVNETA_TX_CSUM_DEF_SIZE		1600
 318#define MVNETA_TX_CSUM_MAX_SIZE		9800
 319#define MVNETA_ACC_MODE_EXT1		1
 320#define MVNETA_ACC_MODE_EXT2		2
 321
 322#define MVNETA_MAX_DECODE_WIN		6
 323
 324/* Timeout constants */
 325#define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
 326#define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
 327#define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
 328
 329#define MVNETA_TX_MTU_MAX		0x3ffff
 330
 331/* The RSS lookup table actually has 256 entries but we do not use
 332 * them yet
 333 */
 334#define MVNETA_RSS_LU_TABLE_SIZE	1
 335
 336/* Max number of Rx descriptors */
 337#define MVNETA_MAX_RXD 512
 338
 339/* Max number of Tx descriptors */
 340#define MVNETA_MAX_TXD 1024
 341
 342/* Max number of allowed TCP segments for software TSO */
 343#define MVNETA_MAX_TSO_SEGS 100
 344
 345#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 346
 347/* The size of a TSO header page */
 348#define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE)
 349
 350/* Number of TSO headers per page. This should be a power of 2 */
 351#define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE)
 352
 353/* Maximum number of TSO header pages */
 354#define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE)
 355
 356/* descriptor aligned size */
 357#define MVNETA_DESC_ALIGNED_SIZE	32
 358
 359/* Number of bytes to be taken into account by HW when putting incoming data
 360 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 361 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 362 */
 363#define MVNETA_RX_PKT_OFFSET_CORRECTION		64
 364
 365#define MVNETA_RX_PKT_SIZE(mtu) \
 366	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
 367	      ETH_HLEN + ETH_FCS_LEN,			     \
 368	      cache_line_size())
 369
 370/* Driver assumes that the last 3 bits are 0 */
 371#define MVNETA_SKB_HEADROOM	ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
 372#define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
 373			 MVNETA_SKB_HEADROOM))
 374#define MVNETA_MAX_RX_BUF_SIZE	(PAGE_SIZE - MVNETA_SKB_PAD)
 375
 376#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 377	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 378
 379enum {
 380	ETHTOOL_STAT_EEE_WAKEUP,
 381	ETHTOOL_STAT_SKB_ALLOC_ERR,
 382	ETHTOOL_STAT_REFILL_ERR,
 383	ETHTOOL_XDP_REDIRECT,
 384	ETHTOOL_XDP_PASS,
 385	ETHTOOL_XDP_DROP,
 386	ETHTOOL_XDP_TX,
 387	ETHTOOL_XDP_TX_ERR,
 388	ETHTOOL_XDP_XMIT,
 389	ETHTOOL_XDP_XMIT_ERR,
 390	ETHTOOL_MAX_STATS,
 391};
 392
 393struct mvneta_statistic {
 394	unsigned short offset;
 395	unsigned short type;
 396	const char name[ETH_GSTRING_LEN];
 397};
 398
 399#define T_REG_32	32
 400#define T_REG_64	64
 401#define T_SW		1
 402
 403#define MVNETA_XDP_PASS		0
 404#define MVNETA_XDP_DROPPED	BIT(0)
 405#define MVNETA_XDP_TX		BIT(1)
 406#define MVNETA_XDP_REDIR	BIT(2)
 407
 408static const struct mvneta_statistic mvneta_statistics[] = {
 409	{ 0x3000, T_REG_64, "good_octets_received", },
 410	{ 0x3010, T_REG_32, "good_frames_received", },
 411	{ 0x3008, T_REG_32, "bad_octets_received", },
 412	{ 0x3014, T_REG_32, "bad_frames_received", },
 413	{ 0x3018, T_REG_32, "broadcast_frames_received", },
 414	{ 0x301c, T_REG_32, "multicast_frames_received", },
 415	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
 416	{ 0x3058, T_REG_32, "good_fc_received", },
 417	{ 0x305c, T_REG_32, "bad_fc_received", },
 418	{ 0x3060, T_REG_32, "undersize_received", },
 419	{ 0x3064, T_REG_32, "fragments_received", },
 420	{ 0x3068, T_REG_32, "oversize_received", },
 421	{ 0x306c, T_REG_32, "jabber_received", },
 422	{ 0x3070, T_REG_32, "mac_receive_error", },
 423	{ 0x3074, T_REG_32, "bad_crc_event", },
 424	{ 0x3078, T_REG_32, "collision", },
 425	{ 0x307c, T_REG_32, "late_collision", },
 426	{ 0x2484, T_REG_32, "rx_discard", },
 427	{ 0x2488, T_REG_32, "rx_overrun", },
 428	{ 0x3020, T_REG_32, "frames_64_octets", },
 429	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
 430	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
 431	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
 432	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
 433	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
 434	{ 0x3038, T_REG_64, "good_octets_sent", },
 435	{ 0x3040, T_REG_32, "good_frames_sent", },
 436	{ 0x3044, T_REG_32, "excessive_collision", },
 437	{ 0x3048, T_REG_32, "multicast_frames_sent", },
 438	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
 439	{ 0x3054, T_REG_32, "fc_sent", },
 440	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
 441	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 442	{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
 443	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
 444	{ ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
 445	{ ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
 446	{ ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
 447	{ ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
 448	{ ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
 449	{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
 450	{ ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
 451};
 452
 453struct mvneta_stats {
 
 454	u64	rx_packets;
 455	u64	rx_bytes;
 456	u64	tx_packets;
 457	u64	tx_bytes;
 458	/* xdp */
 459	u64	xdp_redirect;
 460	u64	xdp_pass;
 461	u64	xdp_drop;
 462	u64	xdp_xmit;
 463	u64	xdp_xmit_err;
 464	u64	xdp_tx;
 465	u64	xdp_tx_err;
 466};
 467
 468struct mvneta_ethtool_stats {
 469	struct mvneta_stats ps;
 470	u64	skb_alloc_error;
 471	u64	refill_error;
 472};
 473
 474struct mvneta_pcpu_stats {
 475	struct u64_stats_sync syncp;
 476
 477	struct mvneta_ethtool_stats es;
 478	u64	rx_dropped;
 479	u64	rx_errors;
 480};
 481
 482struct mvneta_pcpu_port {
 483	/* Pointer to the shared port */
 484	struct mvneta_port	*pp;
 485
 486	/* Pointer to the CPU-local NAPI struct */
 487	struct napi_struct	napi;
 488
 489	/* Cause of the previous interrupt */
 490	u32			cause_rx_tx;
 491};
 492
 493enum {
 494	__MVNETA_DOWN,
 495};
 496
 497struct mvneta_port {
 498	u8 id;
 499	struct mvneta_pcpu_port __percpu	*ports;
 500	struct mvneta_pcpu_stats __percpu	*stats;
 501
 502	unsigned long state;
 503
 504	int pkt_size;
 505	void __iomem *base;
 506	struct mvneta_rx_queue *rxqs;
 507	struct mvneta_tx_queue *txqs;
 508	struct net_device *dev;
 509	struct hlist_node node_online;
 510	struct hlist_node node_dead;
 511	int rxq_def;
 512	/* Protect the access to the percpu interrupt registers,
 513	 * ensuring that the configuration remains coherent.
 514	 */
 515	spinlock_t lock;
 516	bool is_stopped;
 517
 518	u32 cause_rx_tx;
 519	struct napi_struct napi;
 520
 521	struct bpf_prog *xdp_prog;
 522
 523	/* Core clock */
 524	struct clk *clk;
 525	/* AXI clock */
 526	struct clk *clk_bus;
 527	u8 mcast_count[256];
 528	u16 tx_ring_size;
 529	u16 rx_ring_size;
 530
 531	phy_interface_t phy_interface;
 532	struct device_node *dn;
 533	unsigned int tx_csum_limit;
 534	struct phylink *phylink;
 535	struct phylink_config phylink_config;
 536	struct phylink_pcs phylink_pcs;
 537	struct phy *comphy;
 538
 539	struct mvneta_bm *bm_priv;
 540	struct mvneta_bm_pool *pool_long;
 541	struct mvneta_bm_pool *pool_short;
 542	int bm_win_id;
 543
 544	bool eee_enabled;
 545	bool eee_active;
 546	bool tx_lpi_enabled;
 547
 548	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 549
 550	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 551
 552	/* Flags for special SoC configurations */
 553	bool neta_armada3700;
 554	bool neta_ac5;
 555	u16 rx_offset_correction;
 556	const struct mbus_dram_target_info *dram_target_info;
 557};
 558
 559/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 560 * layout of the transmit and reception DMA descriptors, and their
 561 * layout is therefore defined by the hardware design
 562 */
 563
 564#define MVNETA_TX_L3_OFF_SHIFT	0
 565#define MVNETA_TX_IP_HLEN_SHIFT	8
 566#define MVNETA_TX_L4_UDP	BIT(16)
 567#define MVNETA_TX_L3_IP6	BIT(17)
 568#define MVNETA_TXD_IP_CSUM	BIT(18)
 569#define MVNETA_TXD_Z_PAD	BIT(19)
 570#define MVNETA_TXD_L_DESC	BIT(20)
 571#define MVNETA_TXD_F_DESC	BIT(21)
 572#define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
 573				 MVNETA_TXD_L_DESC | \
 574				 MVNETA_TXD_F_DESC)
 575#define MVNETA_TX_L4_CSUM_FULL	BIT(30)
 576#define MVNETA_TX_L4_CSUM_NOT	BIT(31)
 577
 578#define MVNETA_RXD_ERR_CRC		0x0
 579#define MVNETA_RXD_BM_POOL_SHIFT	13
 580#define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
 581#define MVNETA_RXD_ERR_SUMMARY		BIT(16)
 582#define MVNETA_RXD_ERR_OVERRUN		BIT(17)
 583#define MVNETA_RXD_ERR_LEN		BIT(18)
 584#define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
 585#define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
 586#define MVNETA_RXD_L3_IP4		BIT(25)
 587#define MVNETA_RXD_LAST_DESC		BIT(26)
 588#define MVNETA_RXD_FIRST_DESC		BIT(27)
 589#define MVNETA_RXD_FIRST_LAST_DESC	(MVNETA_RXD_FIRST_DESC | \
 590					 MVNETA_RXD_LAST_DESC)
 591#define MVNETA_RXD_L4_CSUM_OK		BIT(30)
 592
 593#if defined(__LITTLE_ENDIAN)
 594struct mvneta_tx_desc {
 595	u32  command;		/* Options used by HW for packet transmitting.*/
 596	u16  reserved1;		/* csum_l4 (for future use)		*/
 597	u16  data_size;		/* Data size of transmitted packet in bytes */
 598	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
 599	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
 600	u32  reserved3[4];	/* Reserved - (for future use)		*/
 601};
 602
 603struct mvneta_rx_desc {
 604	u32  status;		/* Info about received packet		*/
 605	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
 606	u16  data_size;		/* Size of received packet in bytes	*/
 607
 608	u32  buf_phys_addr;	/* Physical address of the buffer	*/
 609	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
 610
 611	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
 612	u16  reserved3;		/* prefetch_cmd, for future use		*/
 613	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
 614
 615	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
 616	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
 617};
 618#else
 619struct mvneta_tx_desc {
 620	u16  data_size;		/* Data size of transmitted packet in bytes */
 621	u16  reserved1;		/* csum_l4 (for future use)		*/
 622	u32  command;		/* Options used by HW for packet transmitting.*/
 623	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
 624	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
 625	u32  reserved3[4];	/* Reserved - (for future use)		*/
 626};
 627
 628struct mvneta_rx_desc {
 629	u16  data_size;		/* Size of received packet in bytes	*/
 630	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
 631	u32  status;		/* Info about received packet		*/
 632
 633	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
 634	u32  buf_phys_addr;	/* Physical address of the buffer	*/
 635
 636	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
 637	u16  reserved3;		/* prefetch_cmd, for future use		*/
 638	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
 639
 640	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
 641	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
 642};
 643#endif
 644
 645enum mvneta_tx_buf_type {
 646	MVNETA_TYPE_TSO,
 647	MVNETA_TYPE_SKB,
 648	MVNETA_TYPE_XDP_TX,
 649	MVNETA_TYPE_XDP_NDO,
 650};
 651
 652struct mvneta_tx_buf {
 653	enum mvneta_tx_buf_type type;
 654	union {
 655		struct xdp_frame *xdpf;
 656		struct sk_buff *skb;
 657	};
 658};
 659
 660struct mvneta_tx_queue {
 661	/* Number of this TX queue, in the range 0-7 */
 662	u8 id;
 663
 664	/* Number of TX DMA descriptors in the descriptor ring */
 665	int size;
 666
 667	/* Number of currently used TX DMA descriptor in the
 668	 * descriptor ring
 669	 */
 670	int count;
 671	int pending;
 672	int tx_stop_threshold;
 673	int tx_wake_threshold;
 674
 675	/* Array of transmitted buffers */
 676	struct mvneta_tx_buf *buf;
 677
 678	/* Index of last TX DMA descriptor that was inserted */
 679	int txq_put_index;
 680
 681	/* Index of the TX DMA descriptor to be cleaned up */
 682	int txq_get_index;
 683
 684	u32 done_pkts_coal;
 685
 686	/* Virtual address of the TX DMA descriptors array */
 687	struct mvneta_tx_desc *descs;
 688
 689	/* DMA address of the TX DMA descriptors array */
 690	dma_addr_t descs_phys;
 691
 692	/* Index of the last TX DMA descriptor */
 693	int last_desc;
 694
 695	/* Index of the next TX DMA descriptor to process */
 696	int next_desc_to_proc;
 697
 698	/* DMA buffers for TSO headers */
 699	char *tso_hdrs[MVNETA_MAX_TSO_PAGES];
 700
 701	/* DMA address of TSO headers */
 702	dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES];
 703
 704	/* Affinity mask for CPUs*/
 705	cpumask_t affinity_mask;
 706};
 707
 708struct mvneta_rx_queue {
 709	/* rx queue number, in the range 0-7 */
 710	u8 id;
 711
 712	/* num of rx descriptors in the rx descriptor ring */
 713	int size;
 714
 715	u32 pkts_coal;
 716	u32 time_coal;
 717
 718	/* page_pool */
 719	struct page_pool *page_pool;
 720	struct xdp_rxq_info xdp_rxq;
 721
 722	/* Virtual address of the RX buffer */
 723	void  **buf_virt_addr;
 724
 725	/* Virtual address of the RX DMA descriptors array */
 726	struct mvneta_rx_desc *descs;
 727
 728	/* DMA address of the RX DMA descriptors array */
 729	dma_addr_t descs_phys;
 730
 731	/* Index of the last RX DMA descriptor */
 732	int last_desc;
 733
 734	/* Index of the next RX DMA descriptor to process */
 735	int next_desc_to_proc;
 736
 737	/* Index of first RX DMA descriptor to refill */
 738	int first_to_refill;
 739	u32 refill_num;
 
 
 
 
 
 
 
 
 740};
 741
 742static enum cpuhp_state online_hpstate;
 743/* The hardware supports eight (8) rx queues, but we are only allowing
 744 * the first one to be used. Therefore, let's just allocate one queue.
 745 */
 746static int rxq_number = 8;
 747static int txq_number = 8;
 748
 749static int rxq_def;
 750
 751static int rx_copybreak __read_mostly = 256;
 
 752
 753/* HW BM need that each port be identify by a unique ID */
 754static int global_port_id;
 755
 756#define MVNETA_DRIVER_NAME "mvneta"
 757#define MVNETA_DRIVER_VERSION "1.0"
 758
 759/* Utility/helper methods */
 760
 761/* Write helper method */
 762static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
 763{
 764	writel(data, pp->base + offset);
 765}
 766
 767/* Read helper method */
 768static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
 769{
 770	return readl(pp->base + offset);
 771}
 772
 773/* Increment txq get counter */
 774static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
 775{
 776	txq->txq_get_index++;
 777	if (txq->txq_get_index == txq->size)
 778		txq->txq_get_index = 0;
 779}
 780
 781/* Increment txq put counter */
 782static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
 783{
 784	txq->txq_put_index++;
 785	if (txq->txq_put_index == txq->size)
 786		txq->txq_put_index = 0;
 787}
 788
 789
 790/* Clear all MIB counters */
 791static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 792{
 793	int i;
 
 794
 795	/* Perform dummy reads from MIB counters */
 796	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 797		mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
 798	mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
 799	mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 800}
 801
 802/* Get System Network Statistics */
 803static void
 804mvneta_get_stats64(struct net_device *dev,
 805		   struct rtnl_link_stats64 *stats)
 806{
 807	struct mvneta_port *pp = netdev_priv(dev);
 808	unsigned int start;
 809	int cpu;
 810
 811	for_each_possible_cpu(cpu) {
 812		struct mvneta_pcpu_stats *cpu_stats;
 813		u64 rx_packets;
 814		u64 rx_bytes;
 815		u64 rx_dropped;
 816		u64 rx_errors;
 817		u64 tx_packets;
 818		u64 tx_bytes;
 819
 820		cpu_stats = per_cpu_ptr(pp->stats, cpu);
 821		do {
 822			start = u64_stats_fetch_begin(&cpu_stats->syncp);
 823			rx_packets = cpu_stats->es.ps.rx_packets;
 824			rx_bytes   = cpu_stats->es.ps.rx_bytes;
 825			rx_dropped = cpu_stats->rx_dropped;
 826			rx_errors  = cpu_stats->rx_errors;
 827			tx_packets = cpu_stats->es.ps.tx_packets;
 828			tx_bytes   = cpu_stats->es.ps.tx_bytes;
 829		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
 830
 831		stats->rx_packets += rx_packets;
 832		stats->rx_bytes   += rx_bytes;
 833		stats->rx_dropped += rx_dropped;
 834		stats->rx_errors  += rx_errors;
 835		stats->tx_packets += tx_packets;
 836		stats->tx_bytes   += tx_bytes;
 837	}
 838
 
 
 
 839	stats->tx_dropped	= dev->stats.tx_dropped;
 840}
 841
 842/* Rx descriptors helper methods */
 843
 844/* Checks whether the RX descriptor having this status is both the first
 845 * and the last descriptor for the RX packet. Each RX packet is currently
 846 * received through a single RX descriptor, so not having each RX
 847 * descriptor with its first and last bits set is an error
 848 */
 849static int mvneta_rxq_desc_is_first_last(u32 status)
 850{
 851	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 852		MVNETA_RXD_FIRST_LAST_DESC;
 853}
 854
 855/* Add number of descriptors ready to receive new packets */
 856static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
 857					  struct mvneta_rx_queue *rxq,
 858					  int ndescs)
 859{
 860	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
 861	 * be added at once
 862	 */
 863	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
 864		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 865			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
 866			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 867		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
 868	}
 869
 870	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 871		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 872}
 873
 874/* Get number of RX descriptors occupied by received packets */
 875static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
 876					struct mvneta_rx_queue *rxq)
 877{
 878	u32 val;
 879
 880	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
 881	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
 882}
 883
 884/* Update num of rx desc called upon return from rx path or
 885 * from mvneta_rxq_drop_pkts().
 886 */
 887static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
 888				       struct mvneta_rx_queue *rxq,
 889				       int rx_done, int rx_filled)
 890{
 891	u32 val;
 892
 893	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
 894		val = rx_done |
 895		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
 896		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 897		return;
 898	}
 899
 900	/* Only 255 descriptors can be added at once */
 901	while ((rx_done > 0) || (rx_filled > 0)) {
 902		if (rx_done <= 0xff) {
 903			val = rx_done;
 904			rx_done = 0;
 905		} else {
 906			val = 0xff;
 907			rx_done -= 0xff;
 908		}
 909		if (rx_filled <= 0xff) {
 910			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 911			rx_filled = 0;
 912		} else {
 913			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 914			rx_filled -= 0xff;
 915		}
 916		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 917	}
 918}
 919
 920/* Get pointer to next RX descriptor to be processed by SW */
 921static struct mvneta_rx_desc *
 922mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 923{
 924	int rx_desc = rxq->next_desc_to_proc;
 925
 926	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
 927	prefetch(rxq->descs + rxq->next_desc_to_proc);
 928	return rxq->descs + rx_desc;
 929}
 930
 931/* Change maximum receive size of the port. */
 932static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
 933{
 934	u32 val;
 935
 936	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 937	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
 938	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
 939		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
 940	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 941}
 942
 943
 944/* Set rx queue offset */
 945static void mvneta_rxq_offset_set(struct mvneta_port *pp,
 946				  struct mvneta_rx_queue *rxq,
 947				  int offset)
 948{
 949	u32 val;
 950
 951	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 952	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
 953
 954	/* Offset is in */
 955	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
 956	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 957}
 958
 959
 960/* Tx descriptors helper methods */
 961
 962/* Update HW with number of TX descriptors to be sent */
 963static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
 964				     struct mvneta_tx_queue *txq,
 965				     int pend_desc)
 966{
 967	u32 val;
 968
 969	pend_desc += txq->pending;
 970
 971	/* Only 255 Tx descriptors can be added at once */
 972	do {
 973		val = min(pend_desc, 255);
 974		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 975		pend_desc -= val;
 976	} while (pend_desc > 0);
 977	txq->pending = 0;
 978}
 979
 980/* Get pointer to next TX descriptor to be processed (send) by HW */
 981static struct mvneta_tx_desc *
 982mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
 983{
 984	int tx_desc = txq->next_desc_to_proc;
 985
 986	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
 987	return txq->descs + tx_desc;
 988}
 989
 990/* Release the last allocated TX descriptor. Useful to handle DMA
 991 * mapping failures in the TX path.
 992 */
 993static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
 994{
 995	if (txq->next_desc_to_proc == 0)
 996		txq->next_desc_to_proc = txq->last_desc - 1;
 997	else
 998		txq->next_desc_to_proc--;
 999}
1000
1001/* Set rxq buf size */
1002static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
1003				    struct mvneta_rx_queue *rxq,
1004				    int buf_size)
1005{
1006	u32 val;
1007
1008	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
1009
1010	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
1011	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
1012
1013	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
1014}
1015
1016/* Disable buffer management (BM) */
1017static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
1018				  struct mvneta_rx_queue *rxq)
1019{
1020	u32 val;
1021
1022	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1023	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
1024	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1025}
1026
1027/* Enable buffer management (BM) */
1028static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
1029				 struct mvneta_rx_queue *rxq)
1030{
1031	u32 val;
1032
1033	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1034	val |= MVNETA_RXQ_HW_BUF_ALLOC;
1035	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1036}
1037
1038/* Notify HW about port's assignment of pool for bigger packets */
1039static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1040				     struct mvneta_rx_queue *rxq)
1041{
1042	u32 val;
1043
1044	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1045	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
1046	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1047
1048	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1049}
1050
1051/* Notify HW about port's assignment of pool for smaller packets */
1052static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1053				      struct mvneta_rx_queue *rxq)
1054{
1055	u32 val;
1056
1057	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1058	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
1059	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1060
1061	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1062}
1063
1064/* Set port's receive buffer size for assigned BM pool */
1065static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1066					      int buf_size,
1067					      u8 pool_id)
1068{
1069	u32 val;
1070
1071	if (!IS_ALIGNED(buf_size, 8)) {
1072		dev_warn(pp->dev->dev.parent,
1073			 "illegal buf_size value %d, round to %d\n",
1074			 buf_size, ALIGN(buf_size, 8));
1075		buf_size = ALIGN(buf_size, 8);
1076	}
1077
1078	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1079	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
1080	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1081}
1082
1083/* Configure MBUS window in order to enable access BM internal SRAM */
1084static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1085				  u8 target, u8 attr)
1086{
1087	u32 win_enable, win_protect;
1088	int i;
1089
1090	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1091
1092	if (pp->bm_win_id < 0) {
1093		/* Find first not occupied window */
1094		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1095			if (win_enable & (1 << i)) {
1096				pp->bm_win_id = i;
1097				break;
1098			}
1099		}
1100		if (i == MVNETA_MAX_DECODE_WIN)
1101			return -ENOMEM;
1102	} else {
1103		i = pp->bm_win_id;
1104	}
1105
1106	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1107	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1108
1109	if (i < 4)
1110		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1111
1112	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1113		    (attr << 8) | target);
1114
1115	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1116
1117	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1118	win_protect |= 3 << (2 * i);
1119	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1120
1121	win_enable &= ~(1 << i);
1122	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1123
1124	return 0;
1125}
1126
1127static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1128{
1129	u32 wsize;
1130	u8 target, attr;
1131	int err;
1132
1133	/* Get BM window information */
1134	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1135					 &target, &attr);
1136	if (err < 0)
1137		return err;
1138
1139	pp->bm_win_id = -1;
1140
1141	/* Open NETA -> BM window */
1142	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1143				     target, attr);
1144	if (err < 0) {
1145		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1146		return err;
1147	}
1148	return 0;
1149}
1150
1151/* Assign and initialize pools for port. In case of fail
1152 * buffer manager will remain disabled for current port.
1153 */
1154static int mvneta_bm_port_init(struct platform_device *pdev,
1155			       struct mvneta_port *pp)
1156{
1157	struct device_node *dn = pdev->dev.of_node;
1158	u32 long_pool_id, short_pool_id;
1159
1160	if (!pp->neta_armada3700) {
1161		int ret;
1162
1163		ret = mvneta_bm_port_mbus_init(pp);
1164		if (ret)
1165			return ret;
1166	}
1167
1168	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1169		netdev_info(pp->dev, "missing long pool id\n");
1170		return -EINVAL;
1171	}
1172
1173	/* Create port's long pool depending on mtu */
1174	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1175					   MVNETA_BM_LONG, pp->id,
1176					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1177	if (!pp->pool_long) {
1178		netdev_info(pp->dev, "fail to obtain long pool for port\n");
1179		return -ENOMEM;
1180	}
1181
1182	pp->pool_long->port_map |= 1 << pp->id;
1183
1184	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1185				   pp->pool_long->id);
1186
1187	/* If short pool id is not defined, assume using single pool */
1188	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1189		short_pool_id = long_pool_id;
1190
1191	/* Create port's short pool */
1192	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1193					    MVNETA_BM_SHORT, pp->id,
1194					    MVNETA_BM_SHORT_PKT_SIZE);
1195	if (!pp->pool_short) {
1196		netdev_info(pp->dev, "fail to obtain short pool for port\n");
1197		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1198		return -ENOMEM;
1199	}
1200
1201	if (short_pool_id != long_pool_id) {
1202		pp->pool_short->port_map |= 1 << pp->id;
1203		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1204					   pp->pool_short->id);
1205	}
1206
1207	return 0;
1208}
1209
1210/* Update settings of a pool for bigger packets */
1211static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1212{
1213	struct mvneta_bm_pool *bm_pool = pp->pool_long;
1214	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1215	int num;
1216
1217	/* Release all buffers from long pool */
1218	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1219	if (hwbm_pool->buf_num) {
1220		WARN(1, "cannot free all buffers in pool %d\n",
1221		     bm_pool->id);
1222		goto bm_mtu_err;
1223	}
1224
1225	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1226	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1227	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1228			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1229
1230	/* Fill entire long pool */
1231	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1232	if (num != hwbm_pool->size) {
1233		WARN(1, "pool %d: %d of %d allocated\n",
1234		     bm_pool->id, num, hwbm_pool->size);
1235		goto bm_mtu_err;
1236	}
1237	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1238
1239	return;
1240
1241bm_mtu_err:
1242	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1243	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1244
1245	pp->bm_priv = NULL;
1246	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1247	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1248	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1249}
1250
1251/* Start the Ethernet port RX and TX activity */
1252static void mvneta_port_up(struct mvneta_port *pp)
1253{
1254	int queue;
1255	u32 q_map;
1256
1257	/* Enable all initialized TXs. */
1258	q_map = 0;
1259	for (queue = 0; queue < txq_number; queue++) {
1260		struct mvneta_tx_queue *txq = &pp->txqs[queue];
1261		if (txq->descs)
1262			q_map |= (1 << queue);
1263	}
1264	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1265
1266	q_map = 0;
1267	/* Enable all initialized RXQs. */
1268	for (queue = 0; queue < rxq_number; queue++) {
1269		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1270
1271		if (rxq->descs)
1272			q_map |= (1 << queue);
1273	}
1274	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1275}
1276
1277/* Stop the Ethernet port activity */
1278static void mvneta_port_down(struct mvneta_port *pp)
1279{
1280	u32 val;
1281	int count;
1282
1283	/* Stop Rx port activity. Check port Rx activity. */
1284	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1285
1286	/* Issue stop command for active channels only */
1287	if (val != 0)
1288		mvreg_write(pp, MVNETA_RXQ_CMD,
1289			    val << MVNETA_RXQ_DISABLE_SHIFT);
1290
1291	/* Wait for all Rx activity to terminate. */
1292	count = 0;
1293	do {
1294		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1295			netdev_warn(pp->dev,
1296				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1297				    val);
1298			break;
1299		}
1300		mdelay(1);
1301
1302		val = mvreg_read(pp, MVNETA_RXQ_CMD);
1303	} while (val & MVNETA_RXQ_ENABLE_MASK);
1304
1305	/* Stop Tx port activity. Check port Tx activity. Issue stop
1306	 * command for active channels only
1307	 */
1308	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1309
1310	if (val != 0)
1311		mvreg_write(pp, MVNETA_TXQ_CMD,
1312			    (val << MVNETA_TXQ_DISABLE_SHIFT));
1313
1314	/* Wait for all Tx activity to terminate. */
1315	count = 0;
1316	do {
1317		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1318			netdev_warn(pp->dev,
1319				    "TIMEOUT for TX stopped status=0x%08x\n",
1320				    val);
1321			break;
1322		}
1323		mdelay(1);
1324
1325		/* Check TX Command reg that all Txqs are stopped */
1326		val = mvreg_read(pp, MVNETA_TXQ_CMD);
1327
1328	} while (val & MVNETA_TXQ_ENABLE_MASK);
1329
1330	/* Double check to verify that TX FIFO is empty */
1331	count = 0;
1332	do {
1333		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1334			netdev_warn(pp->dev,
1335				    "TX FIFO empty timeout status=0x%08x\n",
1336				    val);
1337			break;
1338		}
1339		mdelay(1);
1340
1341		val = mvreg_read(pp, MVNETA_PORT_STATUS);
1342	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1343		 (val & MVNETA_TX_IN_PRGRS));
1344
1345	udelay(200);
1346}
1347
1348/* Enable the port by setting the port enable bit of the MAC control register */
1349static void mvneta_port_enable(struct mvneta_port *pp)
1350{
1351	u32 val;
1352
1353	/* Enable port */
1354	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1355	val |= MVNETA_GMAC0_PORT_ENABLE;
1356	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1357}
1358
1359/* Disable the port and wait for about 200 usec before retuning */
1360static void mvneta_port_disable(struct mvneta_port *pp)
1361{
1362	u32 val;
1363
1364	/* Reset the Enable bit in the Serial Control Register */
1365	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1366	val &= ~MVNETA_GMAC0_PORT_ENABLE;
1367	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1368
1369	udelay(200);
1370}
1371
1372/* Multicast tables methods */
1373
1374/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1375static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1376{
1377	int offset;
1378	u32 val;
1379
1380	if (queue == -1) {
1381		val = 0;
1382	} else {
1383		val = 0x1 | (queue << 1);
1384		val |= (val << 24) | (val << 16) | (val << 8);
1385	}
1386
1387	for (offset = 0; offset <= 0xc; offset += 4)
1388		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1389}
1390
1391/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1392static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1393{
1394	int offset;
1395	u32 val;
1396
1397	if (queue == -1) {
1398		val = 0;
1399	} else {
1400		val = 0x1 | (queue << 1);
1401		val |= (val << 24) | (val << 16) | (val << 8);
1402	}
1403
1404	for (offset = 0; offset <= 0xfc; offset += 4)
1405		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1406
1407}
1408
1409/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1410static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1411{
1412	int offset;
1413	u32 val;
1414
1415	if (queue == -1) {
1416		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1417		val = 0;
1418	} else {
1419		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1420		val = 0x1 | (queue << 1);
1421		val |= (val << 24) | (val << 16) | (val << 8);
1422	}
1423
1424	for (offset = 0; offset <= 0xfc; offset += 4)
1425		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1426}
1427
1428static void mvneta_percpu_unmask_interrupt(void *arg)
1429{
1430	struct mvneta_port *pp = arg;
1431
1432	/* All the queue are unmasked, but actually only the ones
1433	 * mapped to this CPU will be unmasked
1434	 */
1435	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1436		    MVNETA_RX_INTR_MASK_ALL |
1437		    MVNETA_TX_INTR_MASK_ALL |
1438		    MVNETA_MISCINTR_INTR_MASK);
1439}
1440
1441static void mvneta_percpu_mask_interrupt(void *arg)
1442{
1443	struct mvneta_port *pp = arg;
1444
1445	/* All the queue are masked, but actually only the ones
1446	 * mapped to this CPU will be masked
1447	 */
1448	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1449	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1450	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1451}
1452
1453static void mvneta_percpu_clear_intr_cause(void *arg)
1454{
1455	struct mvneta_port *pp = arg;
1456
1457	/* All the queue are cleared, but actually only the ones
1458	 * mapped to this CPU will be cleared
1459	 */
1460	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1461	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1462	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1463}
1464
1465/* This method sets defaults to the NETA port:
1466 *	Clears interrupt Cause and Mask registers.
1467 *	Clears all MAC tables.
1468 *	Sets defaults to all registers.
1469 *	Resets RX and TX descriptor rings.
1470 *	Resets PHY.
1471 * This method can be called after mvneta_port_down() to return the port
1472 *	settings to defaults.
1473 */
1474static void mvneta_defaults_set(struct mvneta_port *pp)
1475{
1476	int cpu;
1477	int queue;
1478	u32 val;
1479	int max_cpu = num_present_cpus();
1480
1481	/* Clear all Cause registers */
1482	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1483
1484	/* Mask all interrupts */
1485	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1486	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1487
1488	/* Enable MBUS Retry bit16 */
1489	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1490
1491	/* Set CPU queue access map. CPUs are assigned to the RX and
1492	 * TX queues modulo their number. If there is only one TX
1493	 * queue then it is assigned to the CPU associated to the
1494	 * default RX queue.
1495	 */
1496	for_each_present_cpu(cpu) {
1497		int rxq_map = 0, txq_map = 0;
1498		int rxq, txq;
1499		if (!pp->neta_armada3700) {
1500			for (rxq = 0; rxq < rxq_number; rxq++)
1501				if ((rxq % max_cpu) == cpu)
1502					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1503
1504			for (txq = 0; txq < txq_number; txq++)
1505				if ((txq % max_cpu) == cpu)
1506					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1507
1508			/* With only one TX queue we configure a special case
1509			 * which will allow to get all the irq on a single
1510			 * CPU
1511			 */
1512			if (txq_number == 1)
1513				txq_map = (cpu == pp->rxq_def) ?
1514					MVNETA_CPU_TXQ_ACCESS(0) : 0;
1515
1516		} else {
1517			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1518			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1519		}
1520
1521		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1522	}
1523
1524	/* Reset RX and TX DMAs */
1525	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1526	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1527
1528	/* Disable Legacy WRR, Disable EJP, Release from reset */
1529	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1530	for (queue = 0; queue < txq_number; queue++) {
1531		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1532		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1533	}
1534
1535	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1536	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1537
1538	/* Set Port Acceleration Mode */
1539	if (pp->bm_priv)
1540		/* HW buffer management + legacy parser */
1541		val = MVNETA_ACC_MODE_EXT2;
1542	else
1543		/* SW buffer management + legacy parser */
1544		val = MVNETA_ACC_MODE_EXT1;
1545	mvreg_write(pp, MVNETA_ACC_MODE, val);
1546
1547	if (pp->bm_priv)
1548		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1549
1550	/* Update val of portCfg register accordingly with all RxQueue types */
1551	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1552	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1553
1554	val = 0;
1555	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1556	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1557
1558	/* Build PORT_SDMA_CONFIG_REG */
1559	val = 0;
1560
1561	/* Default burst size */
1562	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1563	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1564	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1565
1566#if defined(__BIG_ENDIAN)
1567	val |= MVNETA_DESC_SWAP;
1568#endif
1569
1570	/* Assign port SDMA configuration */
1571	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1572
1573	/* Disable PHY polling in hardware, since we're using the
1574	 * kernel phylib to do this.
1575	 */
1576	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1577	val &= ~MVNETA_PHY_POLLING_ENABLE;
1578	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1579
1580	mvneta_set_ucast_table(pp, -1);
1581	mvneta_set_special_mcast_table(pp, -1);
1582	mvneta_set_other_mcast_table(pp, -1);
1583
1584	/* Set port interrupt enable register - default enable all */
1585	mvreg_write(pp, MVNETA_INTR_ENABLE,
1586		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1587		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1588
1589	mvneta_mib_counters_clear(pp);
1590}
1591
1592/* Set max sizes for tx queues */
1593static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1594
1595{
1596	u32 val, size, mtu;
1597	int queue;
1598
1599	mtu = max_tx_size * 8;
1600	if (mtu > MVNETA_TX_MTU_MAX)
1601		mtu = MVNETA_TX_MTU_MAX;
1602
1603	/* Set MTU */
1604	val = mvreg_read(pp, MVNETA_TX_MTU);
1605	val &= ~MVNETA_TX_MTU_MAX;
1606	val |= mtu;
1607	mvreg_write(pp, MVNETA_TX_MTU, val);
1608
1609	/* TX token size and all TXQs token size must be larger that MTU */
1610	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1611
1612	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1613	if (size < mtu) {
1614		size = mtu;
1615		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1616		val |= size;
1617		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1618	}
1619	for (queue = 0; queue < txq_number; queue++) {
1620		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1621
1622		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1623		if (size < mtu) {
1624			size = mtu;
1625			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1626			val |= size;
1627			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1628		}
1629	}
1630}
1631
1632/* Set unicast address */
1633static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1634				  int queue)
1635{
1636	unsigned int unicast_reg;
1637	unsigned int tbl_offset;
1638	unsigned int reg_offset;
1639
1640	/* Locate the Unicast table entry */
1641	last_nibble = (0xf & last_nibble);
1642
1643	/* offset from unicast tbl base */
1644	tbl_offset = (last_nibble / 4) * 4;
1645
1646	/* offset within the above reg  */
1647	reg_offset = last_nibble % 4;
1648
1649	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1650
1651	if (queue == -1) {
1652		/* Clear accepts frame bit at specified unicast DA tbl entry */
1653		unicast_reg &= ~(0xff << (8 * reg_offset));
1654	} else {
1655		unicast_reg &= ~(0xff << (8 * reg_offset));
1656		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1657	}
1658
1659	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1660}
1661
1662/* Set mac address */
1663static void mvneta_mac_addr_set(struct mvneta_port *pp,
1664				const unsigned char *addr, int queue)
1665{
1666	unsigned int mac_h;
1667	unsigned int mac_l;
1668
1669	if (queue != -1) {
1670		mac_l = (addr[4] << 8) | (addr[5]);
1671		mac_h = (addr[0] << 24) | (addr[1] << 16) |
1672			(addr[2] << 8) | (addr[3] << 0);
1673
1674		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1675		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1676	}
1677
1678	/* Accept frames of this address */
1679	mvneta_set_ucast_addr(pp, addr[5], queue);
1680}
1681
1682/* Set the number of packets that will be received before RX interrupt
1683 * will be generated by HW.
1684 */
1685static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1686				    struct mvneta_rx_queue *rxq, u32 value)
1687{
1688	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1689		    value | MVNETA_RXQ_NON_OCCUPIED(0));
1690}
1691
1692/* Set the time delay in usec before RX interrupt will be generated by
1693 * HW.
1694 */
1695static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1696				    struct mvneta_rx_queue *rxq, u32 value)
1697{
1698	u32 val;
1699	unsigned long clk_rate;
1700
1701	clk_rate = clk_get_rate(pp->clk);
1702	val = (clk_rate / 1000000) * value;
1703
1704	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1705}
1706
1707/* Set threshold for TX_DONE pkts coalescing */
1708static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1709					 struct mvneta_tx_queue *txq, u32 value)
1710{
1711	u32 val;
1712
1713	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1714
1715	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1716	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1717
1718	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1719}
1720
1721/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1722static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1723				u32 phys_addr, void *virt_addr,
1724				struct mvneta_rx_queue *rxq)
1725{
1726	int i;
1727
1728	rx_desc->buf_phys_addr = phys_addr;
1729	i = rx_desc - rxq->descs;
1730	rxq->buf_virt_addr[i] = virt_addr;
1731}
1732
1733/* Decrement sent descriptors counter */
1734static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1735				     struct mvneta_tx_queue *txq,
1736				     int sent_desc)
1737{
1738	u32 val;
1739
1740	/* Only 255 TX descriptors can be updated at once */
1741	while (sent_desc > 0xff) {
1742		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1743		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1744		sent_desc = sent_desc - 0xff;
1745	}
1746
1747	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1748	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1749}
1750
1751/* Get number of TX descriptors already sent by HW */
1752static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1753					struct mvneta_tx_queue *txq)
1754{
1755	u32 val;
1756	int sent_desc;
1757
1758	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1759	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1760		MVNETA_TXQ_SENT_DESC_SHIFT;
1761
1762	return sent_desc;
1763}
1764
1765/* Get number of sent descriptors and decrement counter.
1766 *  The number of sent descriptors is returned.
1767 */
1768static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1769				     struct mvneta_tx_queue *txq)
1770{
1771	int sent_desc;
1772
1773	/* Get number of sent descriptors */
1774	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1775
1776	/* Decrement sent descriptors counter */
1777	if (sent_desc)
1778		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1779
1780	return sent_desc;
1781}
1782
1783/* Set TXQ descriptors fields relevant for CSUM calculation */
1784static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1785				int ip_hdr_len, int l4_proto)
1786{
1787	u32 command;
1788
1789	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1790	 * G_L4_chk, L4_type; required only for checksum
1791	 * calculation
1792	 */
1793	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1794	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1795
1796	if (l3_proto == htons(ETH_P_IP))
1797		command |= MVNETA_TXD_IP_CSUM;
1798	else
1799		command |= MVNETA_TX_L3_IP6;
1800
1801	if (l4_proto == IPPROTO_TCP)
1802		command |=  MVNETA_TX_L4_CSUM_FULL;
1803	else if (l4_proto == IPPROTO_UDP)
1804		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1805	else
1806		command |= MVNETA_TX_L4_CSUM_NOT;
1807
1808	return command;
1809}
1810
1811
1812/* Display more error info */
1813static void mvneta_rx_error(struct mvneta_port *pp,
1814			    struct mvneta_rx_desc *rx_desc)
1815{
1816	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1817	u32 status = rx_desc->status;
1818
1819	/* update per-cpu counter */
1820	u64_stats_update_begin(&stats->syncp);
1821	stats->rx_errors++;
1822	u64_stats_update_end(&stats->syncp);
1823
1824	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1825	case MVNETA_RXD_ERR_CRC:
1826		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1827			   status, rx_desc->data_size);
1828		break;
1829	case MVNETA_RXD_ERR_OVERRUN:
1830		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1831			   status, rx_desc->data_size);
1832		break;
1833	case MVNETA_RXD_ERR_LEN:
1834		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1835			   status, rx_desc->data_size);
1836		break;
1837	case MVNETA_RXD_ERR_RESOURCE:
1838		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1839			   status, rx_desc->data_size);
1840		break;
1841	}
1842}
1843
1844/* Handle RX checksum offload based on the descriptor's status */
1845static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
 
1846{
1847	if ((pp->dev->features & NETIF_F_RXCSUM) &&
1848	    (status & MVNETA_RXD_L3_IP4) &&
1849	    (status & MVNETA_RXD_L4_CSUM_OK))
1850		return CHECKSUM_UNNECESSARY;
 
 
 
1851
1852	return CHECKSUM_NONE;
1853}
1854
1855/* Return tx queue pointer (find last set bit) according to <cause> returned
1856 * form tx_done reg. <cause> must not be null. The return value is always a
1857 * valid queue for matching the first one found in <cause>.
1858 */
1859static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1860						     u32 cause)
1861{
1862	int queue = fls(cause) - 1;
1863
1864	return &pp->txqs[queue];
1865}
1866
1867/* Free tx queue skbuffs */
1868static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1869				 struct mvneta_tx_queue *txq, int num,
1870				 struct netdev_queue *nq, bool napi)
1871{
1872	unsigned int bytes_compl = 0, pkts_compl = 0;
1873	struct xdp_frame_bulk bq;
1874	int i;
1875
1876	xdp_frame_bulk_init(&bq);
1877
1878	rcu_read_lock(); /* need for xdp_return_frame_bulk */
1879
1880	for (i = 0; i < num; i++) {
1881		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1882		struct mvneta_tx_desc *tx_desc = txq->descs +
1883			txq->txq_get_index;
 
 
 
 
 
 
1884
1885		mvneta_txq_inc_get(txq);
1886
1887		if (buf->type == MVNETA_TYPE_XDP_NDO ||
1888		    buf->type == MVNETA_TYPE_SKB)
1889			dma_unmap_single(pp->dev->dev.parent,
1890					 tx_desc->buf_phys_addr,
1891					 tx_desc->data_size, DMA_TO_DEVICE);
1892		if ((buf->type == MVNETA_TYPE_TSO ||
1893		     buf->type == MVNETA_TYPE_SKB) && buf->skb) {
1894			bytes_compl += buf->skb->len;
1895			pkts_compl++;
1896			dev_kfree_skb_any(buf->skb);
1897		} else if ((buf->type == MVNETA_TYPE_XDP_TX ||
1898			    buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
1899			if (napi && buf->type == MVNETA_TYPE_XDP_TX)
1900				xdp_return_frame_rx_napi(buf->xdpf);
1901			else
1902				xdp_return_frame_bulk(buf->xdpf, &bq);
1903		}
1904	}
1905	xdp_flush_frame_bulk(&bq);
1906
1907	rcu_read_unlock();
1908
1909	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1910}
1911
1912/* Handle end of transmission */
1913static void mvneta_txq_done(struct mvneta_port *pp,
1914			   struct mvneta_tx_queue *txq)
1915{
1916	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1917	int tx_done;
1918
1919	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1920	if (!tx_done)
1921		return;
1922
1923	mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1924
1925	txq->count -= tx_done;
1926
1927	if (netif_tx_queue_stopped(nq)) {
1928		if (txq->count <= txq->tx_wake_threshold)
1929			netif_tx_wake_queue(nq);
1930	}
1931}
1932
1933/* Refill processing for SW buffer management */
1934/* Allocate page per descriptor */
1935static int mvneta_rx_refill(struct mvneta_port *pp,
1936			    struct mvneta_rx_desc *rx_desc,
1937			    struct mvneta_rx_queue *rxq,
1938			    gfp_t gfp_mask)
1939{
1940	dma_addr_t phys_addr;
1941	struct page *page;
1942
1943	page = page_pool_alloc_pages(rxq->page_pool,
1944				     gfp_mask | __GFP_NOWARN);
1945	if (!page)
1946		return -ENOMEM;
1947
1948	phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1949	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
 
 
 
 
 
1950
 
 
1951	return 0;
1952}
1953
1954/* Handle tx checksum */
1955static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
1956{
1957	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1958		int ip_hdr_len = 0;
1959		__be16 l3_proto = vlan_get_protocol(skb);
1960		u8 l4_proto;
1961
1962		if (l3_proto == htons(ETH_P_IP)) {
1963			struct iphdr *ip4h = ip_hdr(skb);
1964
1965			/* Calculate IPv4 checksum and L4 checksum */
1966			ip_hdr_len = ip4h->ihl;
1967			l4_proto = ip4h->protocol;
1968		} else if (l3_proto == htons(ETH_P_IPV6)) {
1969			struct ipv6hdr *ip6h = ipv6_hdr(skb);
1970
1971			/* Read l4_protocol from one of IPv6 extra headers */
1972			if (skb_network_header_len(skb) > 0)
1973				ip_hdr_len = (skb_network_header_len(skb) >> 2);
1974			l4_proto = ip6h->nexthdr;
1975		} else
1976			return MVNETA_TX_L4_CSUM_NOT;
1977
1978		return mvneta_txq_desc_csum(skb_network_offset(skb),
1979					    l3_proto, ip_hdr_len, l4_proto);
1980	}
1981
1982	return MVNETA_TX_L4_CSUM_NOT;
1983}
1984
1985/* Drop packets received by the RXQ and free buffers */
1986static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1987				 struct mvneta_rx_queue *rxq)
1988{
1989	int rx_done, i;
1990
1991	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1992	if (rx_done)
1993		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1994
1995	if (pp->bm_priv) {
1996		for (i = 0; i < rx_done; i++) {
1997			struct mvneta_rx_desc *rx_desc =
1998						  mvneta_rxq_next_desc_get(rxq);
1999			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2000			struct mvneta_bm_pool *bm_pool;
2001
2002			bm_pool = &pp->bm_priv->bm_pools[pool_id];
2003			/* Return dropped buffer to the pool */
2004			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2005					      rx_desc->buf_phys_addr);
2006		}
2007		return;
2008	}
2009
2010	for (i = 0; i < rxq->size; i++) {
2011		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
2012		void *data = rxq->buf_virt_addr[i];
2013		if (!data || !(rx_desc->buf_phys_addr))
2014			continue;
2015
2016		page_pool_put_full_page(rxq->page_pool, data, false);
 
 
2017	}
2018	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
2019		xdp_rxq_info_unreg(&rxq->xdp_rxq);
2020	page_pool_destroy(rxq->page_pool);
2021	rxq->page_pool = NULL;
2022}
2023
2024static void
2025mvneta_update_stats(struct mvneta_port *pp,
2026		    struct mvneta_stats *ps)
2027{
2028	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2029
2030	u64_stats_update_begin(&stats->syncp);
2031	stats->es.ps.rx_packets += ps->rx_packets;
2032	stats->es.ps.rx_bytes += ps->rx_bytes;
2033	/* xdp */
2034	stats->es.ps.xdp_redirect += ps->xdp_redirect;
2035	stats->es.ps.xdp_pass += ps->xdp_pass;
2036	stats->es.ps.xdp_drop += ps->xdp_drop;
2037	u64_stats_update_end(&stats->syncp);
2038}
2039
2040static inline
2041int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2042{
2043	struct mvneta_rx_desc *rx_desc;
2044	int curr_desc = rxq->first_to_refill;
2045	int i;
2046
2047	for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
2048		rx_desc = rxq->descs + curr_desc;
2049		if (!(rx_desc->buf_phys_addr)) {
2050			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2051				struct mvneta_pcpu_stats *stats;
2052
2053				pr_err("Can't refill queue %d. Done %d from %d\n",
2054				       rxq->id, i, rxq->refill_num);
2055
2056				stats = this_cpu_ptr(pp->stats);
2057				u64_stats_update_begin(&stats->syncp);
2058				stats->es.refill_error++;
2059				u64_stats_update_end(&stats->syncp);
2060				break;
2061			}
2062		}
2063		curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2064	}
2065	rxq->refill_num -= i;
2066	rxq->first_to_refill = curr_desc;
2067
2068	return i;
2069}
2070
2071static void
2072mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2073		    struct xdp_buff *xdp, int sync_len)
2074{
2075	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2076	int i;
2077
2078	if (likely(!xdp_buff_has_frags(xdp)))
2079		goto out;
2080
2081	for (i = 0; i < sinfo->nr_frags; i++)
2082		page_pool_put_full_page(rxq->page_pool,
2083					skb_frag_page(&sinfo->frags[i]), true);
2084
2085out:
2086	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2087			   sync_len, true);
2088}
2089
2090static int
2091mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2092			struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
2093{
2094	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2095	struct device *dev = pp->dev->dev.parent;
2096	struct mvneta_tx_desc *tx_desc;
2097	int i, num_frames = 1;
2098	struct page *page;
2099
2100	if (unlikely(xdp_frame_has_frags(xdpf)))
2101		num_frames += sinfo->nr_frags;
2102
2103	if (txq->count + num_frames >= txq->size)
2104		return MVNETA_XDP_DROPPED;
2105
2106	for (i = 0; i < num_frames; i++) {
2107		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2108		skb_frag_t *frag = NULL;
2109		int len = xdpf->len;
2110		dma_addr_t dma_addr;
2111
2112		if (unlikely(i)) { /* paged area */
2113			frag = &sinfo->frags[i - 1];
2114			len = skb_frag_size(frag);
2115		}
2116
2117		tx_desc = mvneta_txq_next_desc_get(txq);
2118		if (dma_map) {
2119			/* ndo_xdp_xmit */
2120			void *data;
2121
2122			data = unlikely(frag) ? skb_frag_address(frag)
2123					      : xdpf->data;
2124			dma_addr = dma_map_single(dev, data, len,
2125						  DMA_TO_DEVICE);
2126			if (dma_mapping_error(dev, dma_addr)) {
2127				mvneta_txq_desc_put(txq);
2128				goto unmap;
2129			}
2130
2131			buf->type = MVNETA_TYPE_XDP_NDO;
2132		} else {
2133			page = unlikely(frag) ? skb_frag_page(frag)
2134					      : virt_to_page(xdpf->data);
2135			dma_addr = page_pool_get_dma_addr(page);
2136			if (unlikely(frag))
2137				dma_addr += skb_frag_off(frag);
2138			else
2139				dma_addr += sizeof(*xdpf) + xdpf->headroom;
2140			dma_sync_single_for_device(dev, dma_addr, len,
2141						   DMA_BIDIRECTIONAL);
2142			buf->type = MVNETA_TYPE_XDP_TX;
2143		}
2144		buf->xdpf = unlikely(i) ? NULL : xdpf;
2145
2146		tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
2147		tx_desc->buf_phys_addr = dma_addr;
2148		tx_desc->data_size = len;
2149		*nxmit_byte += len;
2150
2151		mvneta_txq_inc_put(txq);
2152	}
2153	/*last descriptor */
2154	tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2155
2156	txq->pending += num_frames;
2157	txq->count += num_frames;
2158
2159	return MVNETA_XDP_TX;
2160
2161unmap:
2162	for (i--; i >= 0; i--) {
2163		mvneta_txq_desc_put(txq);
2164		tx_desc = txq->descs + txq->next_desc_to_proc;
2165		dma_unmap_single(dev, tx_desc->buf_phys_addr,
2166				 tx_desc->data_size,
2167				 DMA_TO_DEVICE);
2168	}
2169
2170	return MVNETA_XDP_DROPPED;
2171}
2172
2173static int
2174mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2175{
2176	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2177	struct mvneta_tx_queue *txq;
2178	struct netdev_queue *nq;
2179	int cpu, nxmit_byte = 0;
2180	struct xdp_frame *xdpf;
2181	u32 ret;
2182
2183	xdpf = xdp_convert_buff_to_frame(xdp);
2184	if (unlikely(!xdpf))
2185		return MVNETA_XDP_DROPPED;
2186
2187	cpu = smp_processor_id();
2188	txq = &pp->txqs[cpu % txq_number];
2189	nq = netdev_get_tx_queue(pp->dev, txq->id);
2190
2191	__netif_tx_lock(nq, cpu);
2192	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
2193	if (ret == MVNETA_XDP_TX) {
2194		u64_stats_update_begin(&stats->syncp);
2195		stats->es.ps.tx_bytes += nxmit_byte;
2196		stats->es.ps.tx_packets++;
2197		stats->es.ps.xdp_tx++;
2198		u64_stats_update_end(&stats->syncp);
2199
2200		mvneta_txq_pend_desc_add(pp, txq, 0);
2201	} else {
2202		u64_stats_update_begin(&stats->syncp);
2203		stats->es.ps.xdp_tx_err++;
2204		u64_stats_update_end(&stats->syncp);
2205	}
2206	__netif_tx_unlock(nq);
2207
2208	return ret;
2209}
2210
2211static int
2212mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2213		struct xdp_frame **frames, u32 flags)
2214{
2215	struct mvneta_port *pp = netdev_priv(dev);
2216	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2217	int i, nxmit_byte = 0, nxmit = 0;
2218	int cpu = smp_processor_id();
2219	struct mvneta_tx_queue *txq;
2220	struct netdev_queue *nq;
2221	u32 ret;
2222
2223	if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2224		return -ENETDOWN;
2225
2226	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2227		return -EINVAL;
2228
2229	txq = &pp->txqs[cpu % txq_number];
2230	nq = netdev_get_tx_queue(pp->dev, txq->id);
2231
2232	__netif_tx_lock(nq, cpu);
2233	for (i = 0; i < num_frame; i++) {
2234		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
2235					      true);
2236		if (ret != MVNETA_XDP_TX)
2237			break;
2238
2239		nxmit++;
2240	}
2241
2242	if (unlikely(flags & XDP_XMIT_FLUSH))
2243		mvneta_txq_pend_desc_add(pp, txq, 0);
2244	__netif_tx_unlock(nq);
2245
2246	u64_stats_update_begin(&stats->syncp);
2247	stats->es.ps.tx_bytes += nxmit_byte;
2248	stats->es.ps.tx_packets += nxmit;
2249	stats->es.ps.xdp_xmit += nxmit;
2250	stats->es.ps.xdp_xmit_err += num_frame - nxmit;
2251	u64_stats_update_end(&stats->syncp);
2252
2253	return nxmit;
2254}
2255
2256static int
2257mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2258	       struct bpf_prog *prog, struct xdp_buff *xdp,
2259	       u32 frame_sz, struct mvneta_stats *stats)
2260{
2261	unsigned int len, data_len, sync;
2262	u32 ret, act;
2263
2264	len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2265	data_len = xdp->data_end - xdp->data;
2266	act = bpf_prog_run_xdp(prog, xdp);
2267
2268	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
2269	sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2270	sync = max(sync, len);
2271
2272	switch (act) {
2273	case XDP_PASS:
2274		stats->xdp_pass++;
2275		return MVNETA_XDP_PASS;
2276	case XDP_REDIRECT: {
2277		int err;
2278
2279		err = xdp_do_redirect(pp->dev, xdp, prog);
2280		if (unlikely(err)) {
2281			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2282			ret = MVNETA_XDP_DROPPED;
2283		} else {
2284			ret = MVNETA_XDP_REDIR;
2285			stats->xdp_redirect++;
2286		}
2287		break;
2288	}
2289	case XDP_TX:
2290		ret = mvneta_xdp_xmit_back(pp, xdp);
2291		if (ret != MVNETA_XDP_TX)
2292			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2293		break;
2294	default:
2295		bpf_warn_invalid_xdp_action(pp->dev, prog, act);
2296		fallthrough;
2297	case XDP_ABORTED:
2298		trace_xdp_exception(pp->dev, prog, act);
2299		fallthrough;
2300	case XDP_DROP:
2301		mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2302		ret = MVNETA_XDP_DROPPED;
2303		stats->xdp_drop++;
2304		break;
2305	}
2306
2307	stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
2308	stats->rx_packets++;
2309
2310	return ret;
2311}
2312
2313static void
2314mvneta_swbm_rx_frame(struct mvneta_port *pp,
2315		     struct mvneta_rx_desc *rx_desc,
2316		     struct mvneta_rx_queue *rxq,
2317		     struct xdp_buff *xdp, int *size,
2318		     struct page *page)
2319{
2320	unsigned char *data = page_address(page);
2321	int data_len = -MVNETA_MH_SIZE, len;
2322	struct net_device *dev = pp->dev;
2323	enum dma_data_direction dma_dir;
2324
2325	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2326		len = MVNETA_MAX_RX_BUF_SIZE;
2327		data_len += len;
2328	} else {
2329		len = *size;
2330		data_len += len - ETH_FCS_LEN;
2331	}
2332	*size = *size - len;
2333
2334	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2335	dma_sync_single_for_cpu(dev->dev.parent,
2336				rx_desc->buf_phys_addr,
2337				len, dma_dir);
2338
2339	rx_desc->buf_phys_addr = 0;
2340
2341	/* Prefetch header */
2342	prefetch(data);
2343	xdp_buff_clear_frags_flag(xdp);
2344	xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
2345			 data_len, false);
2346}
2347
2348static void
2349mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2350			    struct mvneta_rx_desc *rx_desc,
2351			    struct mvneta_rx_queue *rxq,
2352			    struct xdp_buff *xdp, int *size,
2353			    struct page *page)
2354{
2355	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2356	struct net_device *dev = pp->dev;
2357	enum dma_data_direction dma_dir;
2358	int data_len, len;
2359
2360	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2361		len = MVNETA_MAX_RX_BUF_SIZE;
2362		data_len = len;
2363	} else {
2364		len = *size;
2365		data_len = len - ETH_FCS_LEN;
2366	}
2367	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2368	dma_sync_single_for_cpu(dev->dev.parent,
2369				rx_desc->buf_phys_addr,
2370				len, dma_dir);
2371	rx_desc->buf_phys_addr = 0;
2372
2373	if (!xdp_buff_has_frags(xdp))
2374		sinfo->nr_frags = 0;
2375
2376	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
2377		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
2378
2379		skb_frag_fill_page_desc(frag, page,
2380					pp->rx_offset_correction, data_len);
2381
2382		if (!xdp_buff_has_frags(xdp)) {
2383			sinfo->xdp_frags_size = *size;
2384			xdp_buff_set_frags_flag(xdp);
2385		}
2386		if (page_is_pfmemalloc(page))
2387			xdp_buff_set_frag_pfmemalloc(xdp);
2388	} else {
2389		page_pool_put_full_page(rxq->page_pool, page, true);
2390	}
2391	*size -= len;
2392}
2393
2394static struct sk_buff *
2395mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2396		      struct xdp_buff *xdp, u32 desc_status)
2397{
2398	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2399	struct sk_buff *skb;
2400	u8 num_frags;
2401
2402	if (unlikely(xdp_buff_has_frags(xdp)))
2403		num_frags = sinfo->nr_frags;
2404
2405	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2406	if (!skb)
2407		return ERR_PTR(-ENOMEM);
2408
2409	skb_mark_for_recycle(skb);
2410
2411	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2412	skb_put(skb, xdp->data_end - xdp->data);
2413	skb->ip_summed = mvneta_rx_csum(pp, desc_status);
2414
2415	if (unlikely(xdp_buff_has_frags(xdp)))
2416		xdp_update_skb_shared_info(skb, num_frags,
2417					   sinfo->xdp_frags_size,
2418					   num_frags * xdp->frame_sz,
2419					   xdp_buff_is_frag_pfmemalloc(xdp));
2420
2421	return skb;
2422}
2423
2424/* Main rx processing when using software buffer management */
2425static int mvneta_rx_swbm(struct napi_struct *napi,
2426			  struct mvneta_port *pp, int budget,
2427			  struct mvneta_rx_queue *rxq)
2428{
2429	int rx_proc = 0, rx_todo, refill, size = 0;
2430	struct net_device *dev = pp->dev;
2431	struct mvneta_stats ps = {};
2432	struct bpf_prog *xdp_prog;
2433	u32 desc_status, frame_sz;
2434	struct xdp_buff xdp_buf;
2435
2436	xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
2437	xdp_buf.data_hard_start = NULL;
2438
2439	/* Get number of received packets */
2440	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2441
2442	xdp_prog = READ_ONCE(pp->xdp_prog);
2443
2444	/* Fairness NAPI loop */
2445	while (rx_proc < budget && rx_proc < rx_todo) {
2446		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2447		u32 rx_status, index;
2448		struct sk_buff *skb;
2449		struct page *page;
 
 
 
 
2450
2451		index = rx_desc - rxq->descs;
2452		page = (struct page *)rxq->buf_virt_addr[index];
 
 
 
2453
 
2454		rx_status = rx_desc->status;
2455		rx_proc++;
2456		rxq->refill_num++;
2457
2458		if (rx_status & MVNETA_RXD_FIRST_DESC) {
2459			/* Check errors only for FIRST descriptor */
2460			if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
2461				mvneta_rx_error(pp, rx_desc);
2462				goto next;
 
 
2463			}
 
 
2464
2465			size = rx_desc->data_size;
2466			frame_sz = size - ETH_FCS_LEN;
2467			desc_status = rx_status;
 
 
 
 
 
 
 
 
 
2468
2469			mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2470					     &size, page);
2471		} else {
2472			if (unlikely(!xdp_buf.data_hard_start)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2473				rx_desc->buf_phys_addr = 0;
2474				page_pool_put_full_page(rxq->page_pool, page,
2475							true);
2476				goto next;
 
 
 
 
 
 
 
 
2477			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2478
2479			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2480						    &size, page);
2481		} /* Middle or Last descriptor */
2482
2483		if (!(rx_status & MVNETA_RXD_LAST_DESC))
2484			/* no last descriptor this time */
2485			continue;
2486
2487		if (size) {
2488			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2489			goto next;
 
 
 
 
2490		}
 
 
2491
2492		if (xdp_prog &&
2493		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2494			goto next;
2495
2496		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2497		if (IS_ERR(skb)) {
2498			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2499
2500			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2501
2502			u64_stats_update_begin(&stats->syncp);
2503			stats->es.skb_alloc_error++;
2504			stats->rx_dropped++;
2505			u64_stats_update_end(&stats->syncp);
2506
2507			goto next;
2508		}
2509
2510		ps.rx_bytes += skb->len;
2511		ps.rx_packets++;
2512
2513		skb->protocol = eth_type_trans(skb, dev);
2514		napi_gro_receive(napi, skb);
2515next:
2516		xdp_buf.data_hard_start = NULL;
2517	}
2518
2519	if (xdp_buf.data_hard_start)
2520		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2521
2522	if (ps.xdp_redirect)
2523		xdp_do_flush();
2524
2525	if (ps.rx_packets)
2526		mvneta_update_stats(pp, &ps);
 
 
 
2527
2528	/* return some buffers to hardware queue, one at a time is too slow */
2529	refill = mvneta_rx_refill_queue(pp, rxq);
2530
2531	/* Update rxq management counters */
2532	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2533
2534	return ps.rx_packets;
2535}
2536
2537/* Main rx processing when using hardware buffer management */
2538static int mvneta_rx_hwbm(struct napi_struct *napi,
2539			  struct mvneta_port *pp, int rx_todo,
2540			  struct mvneta_rx_queue *rxq)
2541{
2542	struct net_device *dev = pp->dev;
2543	int rx_done;
2544	u32 rcvd_pkts = 0;
2545	u32 rcvd_bytes = 0;
2546
2547	/* Get number of received packets */
2548	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2549
2550	if (rx_todo > rx_done)
2551		rx_todo = rx_done;
2552
2553	rx_done = 0;
2554
2555	/* Fairness NAPI loop */
2556	while (rx_done < rx_todo) {
2557		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2558		struct mvneta_bm_pool *bm_pool = NULL;
2559		struct sk_buff *skb;
2560		unsigned char *data;
2561		dma_addr_t phys_addr;
2562		u32 rx_status, frag_size;
2563		int rx_bytes, err;
2564		u8 pool_id;
2565
2566		rx_done++;
2567		rx_status = rx_desc->status;
2568		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2569		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2570		phys_addr = rx_desc->buf_phys_addr;
2571		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2572		bm_pool = &pp->bm_priv->bm_pools[pool_id];
2573
2574		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2575		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2576err_drop_frame_ret_pool:
2577			/* Return the buffer to the pool */
2578			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2579					      rx_desc->buf_phys_addr);
2580err_drop_frame:
 
2581			mvneta_rx_error(pp, rx_desc);
2582			/* leave the descriptor untouched */
2583			continue;
2584		}
2585
2586		if (rx_bytes <= rx_copybreak) {
2587			/* better copy a small frame and not unmap the DMA region */
2588			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2589			if (unlikely(!skb))
2590				goto err_drop_frame_ret_pool;
2591
2592			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2593			                              rx_desc->buf_phys_addr,
2594			                              MVNETA_MH_SIZE + NET_SKB_PAD,
2595			                              rx_bytes,
2596			                              DMA_FROM_DEVICE);
2597			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2598				     rx_bytes);
2599
2600			skb->protocol = eth_type_trans(skb, dev);
2601			skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2602			napi_gro_receive(napi, skb);
2603
2604			rcvd_pkts++;
2605			rcvd_bytes += rx_bytes;
2606
2607			/* Return the buffer to the pool */
2608			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2609					      rx_desc->buf_phys_addr);
2610
2611			/* leave the descriptor and buffer untouched */
2612			continue;
2613		}
2614
2615		/* Refill processing */
2616		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2617		if (err) {
2618			struct mvneta_pcpu_stats *stats;
2619
2620			netdev_err(dev, "Linux processing - Can't refill\n");
2621
2622			stats = this_cpu_ptr(pp->stats);
2623			u64_stats_update_begin(&stats->syncp);
2624			stats->es.refill_error++;
2625			u64_stats_update_end(&stats->syncp);
2626
2627			goto err_drop_frame_ret_pool;
2628		}
2629
2630		frag_size = bm_pool->hwbm_pool.frag_size;
2631
2632		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2633
2634		/* After refill old buffer has to be unmapped regardless
2635		 * the skb is successfully built or not.
2636		 */
2637		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2638				 bm_pool->buf_size, DMA_FROM_DEVICE);
2639		if (!skb)
2640			goto err_drop_frame;
2641
2642		rcvd_pkts++;
2643		rcvd_bytes += rx_bytes;
2644
2645		/* Linux processing */
2646		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2647		skb_put(skb, rx_bytes);
2648
2649		skb->protocol = eth_type_trans(skb, dev);
2650		skb->ip_summed = mvneta_rx_csum(pp, rx_status);
 
2651
2652		napi_gro_receive(napi, skb);
2653	}
2654
2655	if (rcvd_pkts) {
2656		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2657
2658		u64_stats_update_begin(&stats->syncp);
2659		stats->es.ps.rx_packets += rcvd_pkts;
2660		stats->es.ps.rx_bytes += rcvd_bytes;
2661		u64_stats_update_end(&stats->syncp);
2662	}
2663
2664	/* Update rxq management counters */
2665	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2666
2667	return rx_done;
2668}
2669
2670static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
2671				 struct mvneta_tx_queue *txq)
2672{
2673	struct device *dev = pp->dev->dev.parent;
2674	int i;
2675
2676	for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) {
2677		if (txq->tso_hdrs[i]) {
2678			dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2679					  txq->tso_hdrs[i],
2680					  txq->tso_hdrs_phys[i]);
2681			txq->tso_hdrs[i] = NULL;
2682		}
2683	}
2684}
2685
2686static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
2687				 struct mvneta_tx_queue *txq)
2688{
2689	struct device *dev = pp->dev->dev.parent;
2690	int i, num;
2691
2692	num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE);
2693	for (i = 0; i < num; i++) {
2694		txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2695						      &txq->tso_hdrs_phys[i],
2696						      GFP_KERNEL);
2697		if (!txq->tso_hdrs[i]) {
2698			mvneta_free_tso_hdrs(pp, txq);
2699			return -ENOMEM;
2700		}
2701	}
2702
2703	return 0;
2704}
2705
2706static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma)
2707{
2708	int index, offset;
2709
2710	index = txq->txq_put_index / MVNETA_TSO_PER_PAGE;
2711	offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE;
2712
2713	*dma = txq->tso_hdrs_phys[index] + offset;
2714
2715	return txq->tso_hdrs[index] + offset;
2716}
2717
2718static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
2719			       struct tso_t *tso, int size, bool is_last)
2720{
2721	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2722	int hdr_len = skb_tcp_all_headers(skb);
2723	struct mvneta_tx_desc *tx_desc;
2724	dma_addr_t hdr_phys;
2725	char *hdr;
2726
2727	hdr = mvneta_get_tso_hdr(txq, &hdr_phys);
2728	tso_build_hdr(skb, hdr, tso, size, is_last);
2729
 
2730	tx_desc = mvneta_txq_next_desc_get(txq);
2731	tx_desc->data_size = hdr_len;
2732	tx_desc->command = mvneta_skb_tx_csum(skb);
2733	tx_desc->command |= MVNETA_TXD_F_DESC;
2734	tx_desc->buf_phys_addr = hdr_phys;
2735	buf->type = MVNETA_TYPE_TSO;
2736	buf->skb = NULL;
2737
2738	mvneta_txq_inc_put(txq);
2739}
2740
2741static inline int
2742mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2743		    struct sk_buff *skb, char *data, int size,
2744		    bool last_tcp, bool is_last)
2745{
2746	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2747	struct mvneta_tx_desc *tx_desc;
2748
2749	tx_desc = mvneta_txq_next_desc_get(txq);
2750	tx_desc->data_size = size;
2751	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2752						size, DMA_TO_DEVICE);
2753	if (unlikely(dma_mapping_error(dev->dev.parent,
2754		     tx_desc->buf_phys_addr))) {
2755		mvneta_txq_desc_put(txq);
2756		return -ENOMEM;
2757	}
2758
2759	tx_desc->command = 0;
2760	buf->type = MVNETA_TYPE_SKB;
2761	buf->skb = NULL;
2762
2763	if (last_tcp) {
2764		/* last descriptor in the TCP packet */
2765		tx_desc->command = MVNETA_TXD_L_DESC;
2766
2767		/* last descriptor in SKB */
2768		if (is_last)
2769			buf->skb = skb;
2770	}
2771	mvneta_txq_inc_put(txq);
2772	return 0;
2773}
2774
2775static void mvneta_release_descs(struct mvneta_port *pp,
2776				 struct mvneta_tx_queue *txq,
2777				 int first, int num)
2778{
2779	int desc_idx, i;
2780
2781	desc_idx = first + num;
2782	if (desc_idx >= txq->size)
2783		desc_idx -= txq->size;
2784
2785	for (i = num; i >= 0; i--) {
2786		struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
2787		struct mvneta_tx_buf *buf = &txq->buf[desc_idx];
2788
2789		if (buf->type == MVNETA_TYPE_SKB)
2790			dma_unmap_single(pp->dev->dev.parent,
2791					 tx_desc->buf_phys_addr,
2792					 tx_desc->data_size,
2793					 DMA_TO_DEVICE);
2794
2795		mvneta_txq_desc_put(txq);
2796
2797		if (desc_idx == 0)
2798			desc_idx = txq->size;
2799		desc_idx -= 1;
2800	}
2801}
2802
2803static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2804			 struct mvneta_tx_queue *txq)
2805{
2806	int hdr_len, total_len, data_left;
2807	int first_desc, desc_count = 0;
2808	struct mvneta_port *pp = netdev_priv(dev);
2809	struct tso_t tso;
 
 
2810
2811	/* Count needed descriptors */
2812	if ((txq->count + tso_count_descs(skb)) >= txq->size)
2813		return 0;
2814
2815	if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
2816		pr_info("*** Is this even possible?\n");
2817		return 0;
2818	}
2819
2820	first_desc = txq->txq_put_index;
2821
2822	/* Initialize the TSO handler, and prepare the first payload */
2823	hdr_len = tso_start(skb, &tso);
2824
2825	total_len = skb->len - hdr_len;
2826	while (total_len > 0) {
 
 
2827		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2828		total_len -= data_left;
2829		desc_count++;
2830
2831		/* prepare packet headers: MAC + IP + TCP */
2832		mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0);
 
 
 
2833
2834		while (data_left > 0) {
2835			int size;
2836			desc_count++;
2837
2838			size = min_t(int, tso.size, data_left);
2839
2840			if (mvneta_tso_put_data(dev, txq, skb,
2841						 tso.data, size,
2842						 size == data_left,
2843						 total_len == 0))
2844				goto err_release;
2845			data_left -= size;
2846
2847			tso_build_data(skb, &tso, size);
2848		}
2849	}
2850
2851	return desc_count;
2852
2853err_release:
2854	/* Release all used data descriptors; header descriptors must not
2855	 * be DMA-unmapped.
2856	 */
2857	mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
 
 
 
 
 
 
 
 
2858	return 0;
2859}
2860
2861/* Handle tx fragmentation processing */
2862static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2863				  struct mvneta_tx_queue *txq)
2864{
2865	struct mvneta_tx_desc *tx_desc;
2866	int i, nr_frags = skb_shinfo(skb)->nr_frags;
2867	int first_desc = txq->txq_put_index;
2868
2869	for (i = 0; i < nr_frags; i++) {
2870		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2871		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2872		void *addr = skb_frag_address(frag);
2873
2874		tx_desc = mvneta_txq_next_desc_get(txq);
2875		tx_desc->data_size = skb_frag_size(frag);
2876
2877		tx_desc->buf_phys_addr =
2878			dma_map_single(pp->dev->dev.parent, addr,
2879				       tx_desc->data_size, DMA_TO_DEVICE);
2880
2881		if (dma_mapping_error(pp->dev->dev.parent,
2882				      tx_desc->buf_phys_addr)) {
2883			mvneta_txq_desc_put(txq);
2884			goto error;
2885		}
2886
2887		if (i == nr_frags - 1) {
2888			/* Last descriptor */
2889			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2890			buf->skb = skb;
2891		} else {
2892			/* Descriptor in the middle: Not First, Not Last */
2893			tx_desc->command = 0;
2894			buf->skb = NULL;
2895		}
2896		buf->type = MVNETA_TYPE_SKB;
2897		mvneta_txq_inc_put(txq);
2898	}
2899
2900	return 0;
2901
2902error:
2903	/* Release all descriptors that were used to map fragments of
2904	 * this packet, as well as the corresponding DMA mappings
2905	 */
2906	mvneta_release_descs(pp, txq, first_desc, i - 1);
 
 
 
 
 
 
 
 
2907	return -ENOMEM;
2908}
2909
2910/* Main tx processing */
2911static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2912{
2913	struct mvneta_port *pp = netdev_priv(dev);
2914	u16 txq_id = skb_get_queue_mapping(skb);
2915	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2916	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2917	struct mvneta_tx_desc *tx_desc;
2918	int len = skb->len;
2919	int frags = 0;
2920	u32 tx_cmd;
2921
2922	if (!netif_running(dev))
2923		goto out;
2924
2925	if (skb_is_gso(skb)) {
2926		frags = mvneta_tx_tso(skb, dev, txq);
2927		goto out;
2928	}
2929
2930	frags = skb_shinfo(skb)->nr_frags + 1;
2931
2932	/* Get a descriptor for the first part of the packet */
2933	tx_desc = mvneta_txq_next_desc_get(txq);
2934
2935	tx_cmd = mvneta_skb_tx_csum(skb);
2936
2937	tx_desc->data_size = skb_headlen(skb);
2938
2939	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2940						tx_desc->data_size,
2941						DMA_TO_DEVICE);
2942	if (unlikely(dma_mapping_error(dev->dev.parent,
2943				       tx_desc->buf_phys_addr))) {
2944		mvneta_txq_desc_put(txq);
2945		frags = 0;
2946		goto out;
2947	}
2948
2949	buf->type = MVNETA_TYPE_SKB;
2950	if (frags == 1) {
2951		/* First and Last descriptor */
2952		tx_cmd |= MVNETA_TXD_FLZ_DESC;
2953		tx_desc->command = tx_cmd;
2954		buf->skb = skb;
2955		mvneta_txq_inc_put(txq);
2956	} else {
2957		/* First but not Last */
2958		tx_cmd |= MVNETA_TXD_F_DESC;
2959		buf->skb = NULL;
2960		mvneta_txq_inc_put(txq);
2961		tx_desc->command = tx_cmd;
2962		/* Continue with other skb fragments */
2963		if (mvneta_tx_frag_process(pp, skb, txq)) {
2964			dma_unmap_single(dev->dev.parent,
2965					 tx_desc->buf_phys_addr,
2966					 tx_desc->data_size,
2967					 DMA_TO_DEVICE);
2968			mvneta_txq_desc_put(txq);
2969			frags = 0;
2970			goto out;
2971		}
2972	}
2973
2974out:
2975	if (frags > 0) {
2976		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2977		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
2978
2979		netdev_tx_sent_queue(nq, len);
2980
2981		txq->count += frags;
2982		if (txq->count >= txq->tx_stop_threshold)
2983			netif_tx_stop_queue(nq);
2984
2985		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2986		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2987			mvneta_txq_pend_desc_add(pp, txq, frags);
2988		else
2989			txq->pending += frags;
2990
2991		u64_stats_update_begin(&stats->syncp);
2992		stats->es.ps.tx_bytes += len;
2993		stats->es.ps.tx_packets++;
2994		u64_stats_update_end(&stats->syncp);
2995	} else {
2996		dev->stats.tx_dropped++;
2997		dev_kfree_skb_any(skb);
2998	}
2999
3000	return NETDEV_TX_OK;
3001}
3002
3003
3004/* Free tx resources, when resetting a port */
3005static void mvneta_txq_done_force(struct mvneta_port *pp,
3006				  struct mvneta_tx_queue *txq)
3007
3008{
3009	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3010	int tx_done = txq->count;
3011
3012	mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
3013
3014	/* reset txq */
3015	txq->count = 0;
3016	txq->txq_put_index = 0;
3017	txq->txq_get_index = 0;
3018}
3019
3020/* Handle tx done - called in softirq context. The <cause_tx_done> argument
3021 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
3022 */
3023static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
3024{
3025	struct mvneta_tx_queue *txq;
3026	struct netdev_queue *nq;
3027	int cpu = smp_processor_id();
3028
3029	while (cause_tx_done) {
3030		txq = mvneta_tx_done_policy(pp, cause_tx_done);
3031
3032		nq = netdev_get_tx_queue(pp->dev, txq->id);
3033		__netif_tx_lock(nq, cpu);
3034
3035		if (txq->count)
3036			mvneta_txq_done(pp, txq);
3037
3038		__netif_tx_unlock(nq);
3039		cause_tx_done &= ~((1 << txq->id));
3040	}
3041}
3042
3043/* Compute crc8 of the specified address, using a unique algorithm ,
3044 * according to hw spec, different than generic crc8 algorithm
3045 */
3046static int mvneta_addr_crc(unsigned char *addr)
3047{
3048	int crc = 0;
3049	int i;
3050
3051	for (i = 0; i < ETH_ALEN; i++) {
3052		int j;
3053
3054		crc = (crc ^ addr[i]) << 8;
3055		for (j = 7; j >= 0; j--) {
3056			if (crc & (0x100 << j))
3057				crc ^= 0x107 << j;
3058		}
3059	}
3060
3061	return crc;
3062}
3063
3064/* This method controls the net device special MAC multicast support.
3065 * The Special Multicast Table for MAC addresses supports MAC of the form
3066 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3067 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3068 * Table entries in the DA-Filter table. This method set the Special
3069 * Multicast Table appropriate entry.
3070 */
3071static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
3072					  unsigned char last_byte,
3073					  int queue)
3074{
3075	unsigned int smc_table_reg;
3076	unsigned int tbl_offset;
3077	unsigned int reg_offset;
3078
3079	/* Register offset from SMC table base    */
3080	tbl_offset = (last_byte / 4);
3081	/* Entry offset within the above reg */
3082	reg_offset = last_byte % 4;
3083
3084	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
3085					+ tbl_offset * 4));
3086
3087	if (queue == -1)
3088		smc_table_reg &= ~(0xff << (8 * reg_offset));
3089	else {
3090		smc_table_reg &= ~(0xff << (8 * reg_offset));
3091		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
3092	}
3093
3094	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
3095		    smc_table_reg);
3096}
3097
3098/* This method controls the network device Other MAC multicast support.
3099 * The Other Multicast Table is used for multicast of another type.
3100 * A CRC-8 is used as an index to the Other Multicast Table entries
3101 * in the DA-Filter table.
3102 * The method gets the CRC-8 value from the calling routine and
3103 * sets the Other Multicast Table appropriate entry according to the
3104 * specified CRC-8 .
3105 */
3106static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
3107					unsigned char crc8,
3108					int queue)
3109{
3110	unsigned int omc_table_reg;
3111	unsigned int tbl_offset;
3112	unsigned int reg_offset;
3113
3114	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
3115	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
3116
3117	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
3118
3119	if (queue == -1) {
3120		/* Clear accepts frame bit at specified Other DA table entry */
3121		omc_table_reg &= ~(0xff << (8 * reg_offset));
3122	} else {
3123		omc_table_reg &= ~(0xff << (8 * reg_offset));
3124		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
3125	}
3126
3127	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
3128}
3129
3130/* The network device supports multicast using two tables:
3131 *    1) Special Multicast Table for MAC addresses of the form
3132 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3133 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3134 *       Table entries in the DA-Filter table.
3135 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
3136 *       is used as an index to the Other Multicast Table entries in the
3137 *       DA-Filter table.
3138 */
3139static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
3140				 int queue)
3141{
3142	unsigned char crc_result = 0;
3143
3144	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
3145		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3146		return 0;
3147	}
3148
3149	crc_result = mvneta_addr_crc(p_addr);
3150	if (queue == -1) {
3151		if (pp->mcast_count[crc_result] == 0) {
3152			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3153				    crc_result);
3154			return -EINVAL;
3155		}
3156
3157		pp->mcast_count[crc_result]--;
3158		if (pp->mcast_count[crc_result] != 0) {
3159			netdev_info(pp->dev,
3160				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
3161				    pp->mcast_count[crc_result], crc_result);
3162			return -EINVAL;
3163		}
3164	} else
3165		pp->mcast_count[crc_result]++;
3166
3167	mvneta_set_other_mcast_addr(pp, crc_result, queue);
3168
3169	return 0;
3170}
3171
3172/* Configure Fitering mode of Ethernet port */
3173static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3174					  int is_promisc)
3175{
3176	u32 port_cfg_reg, val;
3177
3178	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3179
3180	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3181
3182	/* Set / Clear UPM bit in port configuration register */
3183	if (is_promisc) {
3184		/* Accept all Unicast addresses */
3185		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
3186		val |= MVNETA_FORCE_UNI;
3187		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3188		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3189	} else {
3190		/* Reject all Unicast addresses */
3191		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
3192		val &= ~MVNETA_FORCE_UNI;
3193	}
3194
3195	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3196	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3197}
3198
3199/* register unicast and multicast addresses */
3200static void mvneta_set_rx_mode(struct net_device *dev)
3201{
3202	struct mvneta_port *pp = netdev_priv(dev);
3203	struct netdev_hw_addr *ha;
3204
3205	if (dev->flags & IFF_PROMISC) {
3206		/* Accept all: Multicast + Unicast */
3207		mvneta_rx_unicast_promisc_set(pp, 1);
3208		mvneta_set_ucast_table(pp, pp->rxq_def);
3209		mvneta_set_special_mcast_table(pp, pp->rxq_def);
3210		mvneta_set_other_mcast_table(pp, pp->rxq_def);
3211	} else {
3212		/* Accept single Unicast */
3213		mvneta_rx_unicast_promisc_set(pp, 0);
3214		mvneta_set_ucast_table(pp, -1);
3215		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3216
3217		if (dev->flags & IFF_ALLMULTI) {
3218			/* Accept all multicast */
3219			mvneta_set_special_mcast_table(pp, pp->rxq_def);
3220			mvneta_set_other_mcast_table(pp, pp->rxq_def);
3221		} else {
3222			/* Accept only initialized multicast */
3223			mvneta_set_special_mcast_table(pp, -1);
3224			mvneta_set_other_mcast_table(pp, -1);
3225
3226			if (!netdev_mc_empty(dev)) {
3227				netdev_for_each_mc_addr(ha, dev) {
3228					mvneta_mcast_addr_set(pp, ha->addr,
3229							      pp->rxq_def);
3230				}
3231			}
3232		}
3233	}
3234}
3235
3236/* Interrupt handling - the callback for request_irq() */
3237static irqreturn_t mvneta_isr(int irq, void *dev_id)
3238{
3239	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3240
3241	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3242	napi_schedule(&pp->napi);
3243
3244	return IRQ_HANDLED;
3245}
3246
3247/* Interrupt handling - the callback for request_percpu_irq() */
3248static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
3249{
3250	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
3251
3252	disable_percpu_irq(port->pp->dev->irq);
3253	napi_schedule(&port->napi);
3254
3255	return IRQ_HANDLED;
3256}
3257
3258static void mvneta_link_change(struct mvneta_port *pp)
3259{
3260	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3261
3262	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3263}
3264
3265/* NAPI handler
3266 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3267 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3268 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3269 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3270 * Each CPU has its own causeRxTx register
3271 */
3272static int mvneta_poll(struct napi_struct *napi, int budget)
3273{
3274	int rx_done = 0;
3275	u32 cause_rx_tx;
3276	int rx_queue;
3277	struct mvneta_port *pp = netdev_priv(napi->dev);
3278	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3279
3280	if (!netif_running(pp->dev)) {
3281		napi_complete(napi);
3282		return rx_done;
3283	}
3284
3285	/* Read cause register */
3286	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3287	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
3288		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3289
3290		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3291
3292		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3293				  MVNETA_CAUSE_LINK_CHANGE))
3294			mvneta_link_change(pp);
3295	}
3296
3297	/* Release Tx descriptors */
3298	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3299		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3300		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
3301	}
3302
3303	/* For the case where the last mvneta_poll did not process all
3304	 * RX packets
3305	 */
 
 
3306	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3307		port->cause_rx_tx;
3308
3309	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3310	if (rx_queue) {
3311		rx_queue = rx_queue - 1;
3312		if (pp->bm_priv)
3313			rx_done = mvneta_rx_hwbm(napi, pp, budget,
3314						 &pp->rxqs[rx_queue]);
3315		else
3316			rx_done = mvneta_rx_swbm(napi, pp, budget,
3317						 &pp->rxqs[rx_queue]);
3318	}
3319
3320	if (rx_done < budget) {
3321		cause_rx_tx = 0;
3322		napi_complete_done(napi, rx_done);
3323
3324		if (pp->neta_armada3700) {
3325			unsigned long flags;
3326
3327			local_irq_save(flags);
3328			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3329				    MVNETA_RX_INTR_MASK(rxq_number) |
3330				    MVNETA_TX_INTR_MASK(txq_number) |
3331				    MVNETA_MISCINTR_INTR_MASK);
3332			local_irq_restore(flags);
3333		} else {
3334			enable_percpu_irq(pp->dev->irq, 0);
3335		}
3336	}
3337
3338	if (pp->neta_armada3700)
3339		pp->cause_rx_tx = cause_rx_tx;
3340	else
3341		port->cause_rx_tx = cause_rx_tx;
3342
3343	return rx_done;
3344}
3345
3346static int mvneta_create_page_pool(struct mvneta_port *pp,
3347				   struct mvneta_rx_queue *rxq, int size)
3348{
3349	struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3350	struct page_pool_params pp_params = {
3351		.order = 0,
3352		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3353		.pool_size = size,
3354		.nid = NUMA_NO_NODE,
3355		.dev = pp->dev->dev.parent,
3356		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3357		.offset = pp->rx_offset_correction,
3358		.max_len = MVNETA_MAX_RX_BUF_SIZE,
3359	};
3360	int err;
3361
3362	rxq->page_pool = page_pool_create(&pp_params);
3363	if (IS_ERR(rxq->page_pool)) {
3364		err = PTR_ERR(rxq->page_pool);
3365		rxq->page_pool = NULL;
3366		return err;
3367	}
3368
3369	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
3370				 PAGE_SIZE);
3371	if (err < 0)
3372		goto err_free_pp;
3373
3374	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3375					 rxq->page_pool);
3376	if (err)
3377		goto err_unregister_rxq;
3378
3379	return 0;
3380
3381err_unregister_rxq:
3382	xdp_rxq_info_unreg(&rxq->xdp_rxq);
3383err_free_pp:
3384	page_pool_destroy(rxq->page_pool);
3385	rxq->page_pool = NULL;
3386	return err;
3387}
3388
3389/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3390static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3391			   int num)
3392{
3393	int i, err;
3394
3395	err = mvneta_create_page_pool(pp, rxq, num);
3396	if (err < 0)
3397		return err;
3398
3399	for (i = 0; i < num; i++) {
3400		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3401		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3402				     GFP_KERNEL) != 0) {
3403			netdev_err(pp->dev,
3404				   "%s:rxq %d, %d of %d buffs  filled\n",
3405				   __func__, rxq->id, i, num);
3406			break;
3407		}
3408	}
3409
3410	/* Add this number of RX descriptors as non occupied (ready to
3411	 * get packets)
3412	 */
3413	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3414
3415	return i;
3416}
3417
3418/* Free all packets pending transmit from all TXQs and reset TX port */
3419static void mvneta_tx_reset(struct mvneta_port *pp)
3420{
3421	int queue;
3422
3423	/* free the skb's in the tx ring */
3424	for (queue = 0; queue < txq_number; queue++)
3425		mvneta_txq_done_force(pp, &pp->txqs[queue]);
3426
3427	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3428	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3429}
3430
3431static void mvneta_rx_reset(struct mvneta_port *pp)
3432{
3433	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3434	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3435}
3436
3437/* Rx/Tx queue initialization/cleanup methods */
3438
3439static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3440			      struct mvneta_rx_queue *rxq)
3441{
3442	rxq->size = pp->rx_ring_size;
3443
3444	/* Allocate memory for RX descriptors */
3445	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3446					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3447					&rxq->descs_phys, GFP_KERNEL);
3448	if (!rxq->descs)
3449		return -ENOMEM;
3450
3451	rxq->last_desc = rxq->size - 1;
3452
3453	return 0;
3454}
3455
3456static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3457			       struct mvneta_rx_queue *rxq)
3458{
3459	/* Set Rx descriptors queue starting address */
3460	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3461	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3462
3463	/* Set coalescing pkts and time */
3464	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3465	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3466
3467	if (!pp->bm_priv) {
3468		/* Set Offset */
3469		mvneta_rxq_offset_set(pp, rxq, 0);
3470		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3471					MVNETA_MAX_RX_BUF_SIZE :
3472					MVNETA_RX_BUF_SIZE(pp->pkt_size));
3473		mvneta_rxq_bm_disable(pp, rxq);
3474		mvneta_rxq_fill(pp, rxq, rxq->size);
3475	} else {
3476		/* Set Offset */
3477		mvneta_rxq_offset_set(pp, rxq,
3478				      NET_SKB_PAD - pp->rx_offset_correction);
3479
3480		mvneta_rxq_bm_enable(pp, rxq);
3481		/* Fill RXQ with buffers from RX pool */
3482		mvneta_rxq_long_pool_set(pp, rxq);
3483		mvneta_rxq_short_pool_set(pp, rxq);
3484		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3485	}
3486}
3487
3488/* Create a specified RX queue */
3489static int mvneta_rxq_init(struct mvneta_port *pp,
3490			   struct mvneta_rx_queue *rxq)
3491
3492{
3493	int ret;
3494
3495	ret = mvneta_rxq_sw_init(pp, rxq);
3496	if (ret < 0)
3497		return ret;
3498
3499	mvneta_rxq_hw_init(pp, rxq);
3500
3501	return 0;
3502}
3503
3504/* Cleanup Rx queue */
3505static void mvneta_rxq_deinit(struct mvneta_port *pp,
3506			      struct mvneta_rx_queue *rxq)
3507{
3508	mvneta_rxq_drop_pkts(pp, rxq);
3509
 
 
 
3510	if (rxq->descs)
3511		dma_free_coherent(pp->dev->dev.parent,
3512				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3513				  rxq->descs,
3514				  rxq->descs_phys);
3515
3516	rxq->descs             = NULL;
3517	rxq->last_desc         = 0;
3518	rxq->next_desc_to_proc = 0;
3519	rxq->descs_phys        = 0;
3520	rxq->first_to_refill   = 0;
3521	rxq->refill_num        = 0;
 
 
3522}
3523
3524static int mvneta_txq_sw_init(struct mvneta_port *pp,
3525			      struct mvneta_tx_queue *txq)
3526{
3527	int cpu, err;
3528
3529	txq->size = pp->tx_ring_size;
3530
3531	/* A queue must always have room for at least one skb.
3532	 * Therefore, stop the queue when the free entries reaches
3533	 * the maximum number of descriptors per skb.
3534	 */
3535	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3536	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3537
3538	/* Allocate memory for TX descriptors */
3539	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3540					txq->size * MVNETA_DESC_ALIGNED_SIZE,
3541					&txq->descs_phys, GFP_KERNEL);
3542	if (!txq->descs)
3543		return -ENOMEM;
3544
3545	txq->last_desc = txq->size - 1;
3546
3547	txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3548	if (!txq->buf)
 
 
 
 
3549		return -ENOMEM;
 
3550
3551	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3552	err = mvneta_alloc_tso_hdrs(pp, txq);
3553	if (err)
3554		return err;
 
 
 
 
 
 
 
3555
3556	/* Setup XPS mapping */
3557	if (pp->neta_armada3700)
3558		cpu = 0;
3559	else if (txq_number > 1)
3560		cpu = txq->id % num_present_cpus();
3561	else
3562		cpu = pp->rxq_def % num_present_cpus();
3563	cpumask_set_cpu(cpu, &txq->affinity_mask);
3564	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3565
3566	return 0;
3567}
3568
3569static void mvneta_txq_hw_init(struct mvneta_port *pp,
3570			       struct mvneta_tx_queue *txq)
3571{
3572	/* Set maximum bandwidth for enabled TXQs */
3573	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3574	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3575
3576	/* Set Tx descriptors queue starting address */
3577	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3578	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3579
3580	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3581}
3582
3583/* Create and initialize a tx queue */
3584static int mvneta_txq_init(struct mvneta_port *pp,
3585			   struct mvneta_tx_queue *txq)
3586{
3587	int ret;
3588
3589	ret = mvneta_txq_sw_init(pp, txq);
3590	if (ret < 0)
3591		return ret;
3592
3593	mvneta_txq_hw_init(pp, txq);
3594
3595	return 0;
3596}
3597
3598/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3599static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3600				 struct mvneta_tx_queue *txq)
3601{
3602	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3603
3604	kfree(txq->buf);
3605
3606	mvneta_free_tso_hdrs(pp, txq);
 
 
 
3607	if (txq->descs)
3608		dma_free_coherent(pp->dev->dev.parent,
3609				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
3610				  txq->descs, txq->descs_phys);
3611
3612	netdev_tx_reset_queue(nq);
3613
3614	txq->buf               = NULL;
3615	txq->descs             = NULL;
3616	txq->last_desc         = 0;
3617	txq->next_desc_to_proc = 0;
3618	txq->descs_phys        = 0;
3619}
3620
3621static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3622				 struct mvneta_tx_queue *txq)
3623{
3624	/* Set minimum bandwidth for disabled TXQs */
3625	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3626	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3627
3628	/* Set Tx descriptors queue starting address and size */
3629	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3630	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3631}
3632
3633static void mvneta_txq_deinit(struct mvneta_port *pp,
3634			      struct mvneta_tx_queue *txq)
3635{
3636	mvneta_txq_sw_deinit(pp, txq);
3637	mvneta_txq_hw_deinit(pp, txq);
3638}
3639
3640/* Cleanup all Tx queues */
3641static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3642{
3643	int queue;
3644
3645	for (queue = 0; queue < txq_number; queue++)
3646		mvneta_txq_deinit(pp, &pp->txqs[queue]);
3647}
3648
3649/* Cleanup all Rx queues */
3650static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3651{
3652	int queue;
3653
3654	for (queue = 0; queue < rxq_number; queue++)
3655		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3656}
3657
3658
3659/* Init all Rx queues */
3660static int mvneta_setup_rxqs(struct mvneta_port *pp)
3661{
3662	int queue;
3663
3664	for (queue = 0; queue < rxq_number; queue++) {
3665		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3666
3667		if (err) {
3668			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3669				   __func__, queue);
3670			mvneta_cleanup_rxqs(pp);
3671			return err;
3672		}
3673	}
3674
3675	return 0;
3676}
3677
3678/* Init all tx queues */
3679static int mvneta_setup_txqs(struct mvneta_port *pp)
3680{
3681	int queue;
3682
3683	for (queue = 0; queue < txq_number; queue++) {
3684		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3685		if (err) {
3686			netdev_err(pp->dev, "%s: can't create txq=%d\n",
3687				   __func__, queue);
3688			mvneta_cleanup_txqs(pp);
3689			return err;
3690		}
3691	}
3692
3693	return 0;
3694}
3695
3696static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3697{
3698	int ret;
3699
3700	ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
 
 
 
 
3701	if (ret)
3702		return ret;
3703
3704	return phy_power_on(pp->comphy);
3705}
3706
3707static int mvneta_config_interface(struct mvneta_port *pp,
3708				   phy_interface_t interface)
3709{
3710	int ret = 0;
3711
3712	if (pp->comphy) {
3713		if (interface == PHY_INTERFACE_MODE_SGMII ||
3714		    interface == PHY_INTERFACE_MODE_1000BASEX ||
3715		    interface == PHY_INTERFACE_MODE_2500BASEX) {
3716			ret = mvneta_comphy_init(pp, interface);
3717		}
3718	} else {
3719		switch (interface) {
3720		case PHY_INTERFACE_MODE_QSGMII:
3721			mvreg_write(pp, MVNETA_SERDES_CFG,
3722				    MVNETA_QSGMII_SERDES_PROTO);
3723			break;
3724
3725		case PHY_INTERFACE_MODE_SGMII:
3726		case PHY_INTERFACE_MODE_1000BASEX:
3727			mvreg_write(pp, MVNETA_SERDES_CFG,
3728				    MVNETA_SGMII_SERDES_PROTO);
3729			break;
3730
3731		case PHY_INTERFACE_MODE_2500BASEX:
3732			mvreg_write(pp, MVNETA_SERDES_CFG,
3733				    MVNETA_HSGMII_SERDES_PROTO);
3734			break;
3735		default:
3736			break;
3737		}
3738	}
3739
3740	pp->phy_interface = interface;
3741
3742	return ret;
3743}
3744
3745static void mvneta_start_dev(struct mvneta_port *pp)
3746{
3747	int cpu;
3748
3749	WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3750
3751	mvneta_max_rx_size_set(pp, pp->pkt_size);
3752	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3753
3754	/* start the Rx/Tx activity */
3755	mvneta_port_enable(pp);
3756
3757	if (!pp->neta_armada3700) {
3758		/* Enable polling on the port */
3759		for_each_online_cpu(cpu) {
3760			struct mvneta_pcpu_port *port =
3761				per_cpu_ptr(pp->ports, cpu);
3762
3763			napi_enable(&port->napi);
3764		}
3765	} else {
3766		napi_enable(&pp->napi);
3767	}
3768
3769	/* Unmask interrupts. It has to be done from each CPU */
3770	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3771
3772	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3773		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3774		    MVNETA_CAUSE_LINK_CHANGE);
3775
3776	phylink_start(pp->phylink);
3777
3778	/* We may have called phylink_speed_down before */
3779	phylink_speed_up(pp->phylink);
3780
3781	netif_tx_start_all_queues(pp->dev);
3782
3783	clear_bit(__MVNETA_DOWN, &pp->state);
3784}
3785
3786static void mvneta_stop_dev(struct mvneta_port *pp)
3787{
3788	unsigned int cpu;
3789
3790	set_bit(__MVNETA_DOWN, &pp->state);
3791
3792	if (device_may_wakeup(&pp->dev->dev))
3793		phylink_speed_down(pp->phylink, false);
3794
3795	phylink_stop(pp->phylink);
3796
3797	if (!pp->neta_armada3700) {
3798		for_each_online_cpu(cpu) {
3799			struct mvneta_pcpu_port *port =
3800				per_cpu_ptr(pp->ports, cpu);
3801
3802			napi_disable(&port->napi);
3803		}
3804	} else {
3805		napi_disable(&pp->napi);
3806	}
3807
3808	netif_carrier_off(pp->dev);
3809
3810	mvneta_port_down(pp);
3811	netif_tx_stop_all_queues(pp->dev);
3812
3813	/* Stop the port activity */
3814	mvneta_port_disable(pp);
3815
3816	/* Clear all ethernet port interrupts */
3817	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3818
3819	/* Mask all ethernet port interrupts */
3820	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3821
3822	mvneta_tx_reset(pp);
3823	mvneta_rx_reset(pp);
3824
3825	WARN_ON(phy_power_off(pp->comphy));
3826}
3827
3828static void mvneta_percpu_enable(void *arg)
3829{
3830	struct mvneta_port *pp = arg;
3831
3832	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3833}
3834
3835static void mvneta_percpu_disable(void *arg)
3836{
3837	struct mvneta_port *pp = arg;
3838
3839	disable_percpu_irq(pp->dev->irq);
3840}
3841
3842/* Change the device mtu */
3843static int mvneta_change_mtu(struct net_device *dev, int mtu)
3844{
3845	struct mvneta_port *pp = netdev_priv(dev);
3846	struct bpf_prog *prog = pp->xdp_prog;
3847	int ret;
3848
3849	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3850		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3851			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3852		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3853	}
3854
3855	if (prog && !prog->aux->xdp_has_frags &&
3856	    mtu > MVNETA_MAX_RX_BUF_SIZE) {
3857		netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
3858			    mtu);
3859
3860		return -EINVAL;
3861	}
3862
3863	dev->mtu = mtu;
3864
3865	if (!netif_running(dev)) {
3866		if (pp->bm_priv)
3867			mvneta_bm_update_mtu(pp, mtu);
3868
3869		netdev_update_features(dev);
3870		return 0;
3871	}
3872
3873	/* The interface is running, so we have to force a
3874	 * reallocation of the queues
3875	 */
3876	mvneta_stop_dev(pp);
3877	on_each_cpu(mvneta_percpu_disable, pp, true);
3878
3879	mvneta_cleanup_txqs(pp);
3880	mvneta_cleanup_rxqs(pp);
3881
3882	if (pp->bm_priv)
3883		mvneta_bm_update_mtu(pp, mtu);
3884
3885	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3886
3887	ret = mvneta_setup_rxqs(pp);
3888	if (ret) {
3889		netdev_err(dev, "unable to setup rxqs after MTU change\n");
3890		return ret;
3891	}
3892
3893	ret = mvneta_setup_txqs(pp);
3894	if (ret) {
3895		netdev_err(dev, "unable to setup txqs after MTU change\n");
3896		return ret;
3897	}
3898
3899	on_each_cpu(mvneta_percpu_enable, pp, true);
3900	mvneta_start_dev(pp);
3901
3902	netdev_update_features(dev);
3903
3904	return 0;
3905}
3906
3907static netdev_features_t mvneta_fix_features(struct net_device *dev,
3908					     netdev_features_t features)
3909{
3910	struct mvneta_port *pp = netdev_priv(dev);
3911
3912	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3913		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3914		netdev_info(dev,
3915			    "Disable IP checksum for MTU greater than %dB\n",
3916			    pp->tx_csum_limit);
3917	}
3918
3919	return features;
3920}
3921
3922/* Get mac address */
3923static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3924{
3925	u32 mac_addr_l, mac_addr_h;
3926
3927	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3928	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3929	addr[0] = (mac_addr_h >> 24) & 0xFF;
3930	addr[1] = (mac_addr_h >> 16) & 0xFF;
3931	addr[2] = (mac_addr_h >> 8) & 0xFF;
3932	addr[3] = mac_addr_h & 0xFF;
3933	addr[4] = (mac_addr_l >> 8) & 0xFF;
3934	addr[5] = mac_addr_l & 0xFF;
3935}
3936
3937/* Handle setting mac address */
3938static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3939{
3940	struct mvneta_port *pp = netdev_priv(dev);
3941	struct sockaddr *sockaddr = addr;
3942	int ret;
3943
3944	ret = eth_prepare_mac_addr_change(dev, addr);
3945	if (ret < 0)
3946		return ret;
3947	/* Remove previous address table entry */
3948	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3949
3950	/* Set new addr in hw */
3951	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3952
3953	eth_commit_mac_addr_change(dev, addr);
3954	return 0;
3955}
3956
3957static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
 
 
3958{
3959	return container_of(pcs, struct mvneta_port, phylink_pcs);
3960}
 
3961
3962static int mvneta_pcs_validate(struct phylink_pcs *pcs,
3963			       unsigned long *supported,
3964			       const struct phylink_link_state *state)
3965{
3966	/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
3967	 * When in 802.3z mode, we must have AN enabled:
3968	 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
3969	 * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
3970	 */
3971	if (phy_interface_mode_is_8023z(state->interface) &&
3972	    !phylink_test(state->advertising, Autoneg))
3973		return -EINVAL;
3974
3975	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3976}
3977
3978static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
3979				 struct phylink_link_state *state)
3980{
3981	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
 
3982	u32 gmac_stat;
3983
3984	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3985
3986	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3987		state->speed =
3988			state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3989			SPEED_2500 : SPEED_1000;
3990	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3991		state->speed = SPEED_100;
3992	else
3993		state->speed = SPEED_10;
3994
3995	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3996	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3997	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3998
 
3999	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
4000		state->pause |= MLO_PAUSE_RX;
4001	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
4002		state->pause |= MLO_PAUSE_TX;
4003}
4004
4005static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
4006			     phy_interface_t interface,
4007			     const unsigned long *advertising,
4008			     bool permit_pause_to_mac)
4009{
4010	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4011	u32 mask, val, an, old_an, changed;
4012
4013	mask = MVNETA_GMAC_INBAND_AN_ENABLE |
4014	       MVNETA_GMAC_INBAND_RESTART_AN |
4015	       MVNETA_GMAC_AN_SPEED_EN |
4016	       MVNETA_GMAC_AN_FLOW_CTRL_EN |
4017	       MVNETA_GMAC_AN_DUPLEX_EN;
4018
4019	if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
4020		mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
4021			MVNETA_GMAC_CONFIG_GMII_SPEED |
4022			MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4023		val = MVNETA_GMAC_INBAND_AN_ENABLE;
4024
4025		if (interface == PHY_INTERFACE_MODE_SGMII) {
4026			/* SGMII mode receives the speed and duplex from PHY */
4027			val |= MVNETA_GMAC_AN_SPEED_EN |
4028			       MVNETA_GMAC_AN_DUPLEX_EN;
4029		} else {
4030			/* 802.3z mode has fixed speed and duplex */
4031			val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
4032			       MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4033
4034			/* The FLOW_CTRL_EN bit selects either the hardware
4035			 * automatically or the CONFIG_FLOW_CTRL manually
4036			 * controls the GMAC pause mode.
4037			 */
4038			if (permit_pause_to_mac)
4039				val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
4040
4041			/* Update the advertisement bits */
4042			mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
4043			if (phylink_test(advertising, Pause))
4044				val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
4045		}
4046	} else {
4047		/* Phy or fixed speed - disable in-band AN modes */
4048		val = 0;
4049	}
4050
4051	old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4052	an = (an & ~mask) | val;
4053	changed = old_an ^ an;
4054	if (changed)
4055		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
4056
4057	/* We are only interested in the advertisement bits changing */
4058	return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
4059}
4060
4061static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
4062{
4063	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
 
4064	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4065
4066	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4067		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
4068	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4069		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
4070}
4071
4072static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
4073	.pcs_validate = mvneta_pcs_validate,
4074	.pcs_get_state = mvneta_pcs_get_state,
4075	.pcs_config = mvneta_pcs_config,
4076	.pcs_an_restart = mvneta_pcs_an_restart,
4077};
4078
4079static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
4080						 phy_interface_t interface)
4081{
4082	struct net_device *ndev = to_net_dev(config->dev);
4083	struct mvneta_port *pp = netdev_priv(ndev);
4084
4085	return &pp->phylink_pcs;
4086}
4087
4088static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
4089			      phy_interface_t interface)
4090{
4091	struct net_device *ndev = to_net_dev(config->dev);
4092	struct mvneta_port *pp = netdev_priv(ndev);
4093	u32 val;
4094
4095	if (pp->phy_interface != interface ||
4096	    phylink_autoneg_inband(mode)) {
4097		/* Force the link down when changing the interface or if in
4098		 * in-band mode. According to Armada 370 documentation, we
4099		 * can only change the port mode and in-band enable when the
4100		 * link is down.
4101		 */
4102		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4103		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
4104		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
4105		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4106	}
4107
4108	if (pp->phy_interface != interface)
4109		WARN_ON(phy_power_off(pp->comphy));
4110
4111	/* Enable the 1ms clock */
4112	if (phylink_autoneg_inband(mode)) {
4113		unsigned long rate = clk_get_rate(pp->clk);
4114
4115		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
4116			    MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
4117	}
4118
4119	return 0;
4120}
4121
4122static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
4123			      const struct phylink_link_state *state)
4124{
4125	struct net_device *ndev = to_net_dev(config->dev);
4126	struct mvneta_port *pp = netdev_priv(ndev);
4127	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
4128	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4129	u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
 
 
4130
4131	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
4132	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
4133				   MVNETA_GMAC2_PORT_RESET);
4134	new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
 
 
 
 
 
 
 
 
 
 
 
4135
4136	/* Even though it might look weird, when we're configured in
4137	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
4138	 */
4139	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
4140
4141	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
4142	    state->interface == PHY_INTERFACE_MODE_SGMII ||
4143	    phy_interface_mode_is_8023z(state->interface))
4144		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
4145
 
 
 
 
 
4146	if (!phylink_autoneg_inband(mode)) {
4147		/* Phy or fixed speed - nothing to do, leave the
4148		 * configured speed, duplex and flow control as-is.
4149		 */
 
 
 
 
 
4150	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
4151		/* SGMII mode receives the state from the PHY */
4152		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
 
 
 
 
 
 
4153	} else {
4154		/* 802.3z negotiation - only 1000base-X */
4155		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4156	}
4157
 
4158	/* When at 2.5G, the link partner can send frames with shortened
4159	 * preambles.
4160	 */
4161	if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
4162		new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
4163
 
 
 
 
 
 
 
 
 
 
4164	if (new_ctrl0 != gmac_ctrl0)
4165		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4166	if (new_ctrl2 != gmac_ctrl2)
4167		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4168	if (new_ctrl4 != gmac_ctrl4)
4169		mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
 
 
 
 
4170
4171	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
4172		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4173			MVNETA_GMAC2_PORT_RESET) != 0)
4174			continue;
4175	}
4176}
4177
4178static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
4179			     phy_interface_t interface)
4180{
4181	struct net_device *ndev = to_net_dev(config->dev);
4182	struct mvneta_port *pp = netdev_priv(ndev);
4183	u32 val, clk;
4184
4185	/* Disable 1ms clock if not in in-band mode */
4186	if (!phylink_autoneg_inband(mode)) {
4187		clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
4188		clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
4189		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
4190	}
4191
4192	if (pp->phy_interface != interface)
4193		/* Enable the Serdes PHY */
4194		WARN_ON(mvneta_config_interface(pp, interface));
4195
4196	/* Allow the link to come up if in in-band mode, otherwise the
4197	 * link is forced via mac_link_down()/mac_link_up()
4198	 */
4199	if (phylink_autoneg_inband(mode)) {
4200		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4201		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
4202		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4203	}
4204
4205	return 0;
4206}
4207
4208static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
4209{
4210	u32 lpi_ctl1;
4211
4212	lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4213	if (enable)
4214		lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
4215	else
4216		lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
4217	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
4218}
4219
4220static void mvneta_mac_link_down(struct phylink_config *config,
4221				 unsigned int mode, phy_interface_t interface)
4222{
4223	struct net_device *ndev = to_net_dev(config->dev);
4224	struct mvneta_port *pp = netdev_priv(ndev);
4225	u32 val;
4226
4227	mvneta_port_down(pp);
4228
4229	if (!phylink_autoneg_inband(mode)) {
4230		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4231		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
4232		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
4233		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4234	}
4235
4236	pp->eee_active = false;
4237	mvneta_set_eee(pp, false);
4238}
4239
4240static void mvneta_mac_link_up(struct phylink_config *config,
4241			       struct phy_device *phy,
4242			       unsigned int mode, phy_interface_t interface,
4243			       int speed, int duplex,
4244			       bool tx_pause, bool rx_pause)
4245{
4246	struct net_device *ndev = to_net_dev(config->dev);
4247	struct mvneta_port *pp = netdev_priv(ndev);
4248	u32 val;
4249
4250	if (!phylink_autoneg_inband(mode)) {
4251		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4252		val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
4253			 MVNETA_GMAC_CONFIG_MII_SPEED |
4254			 MVNETA_GMAC_CONFIG_GMII_SPEED |
4255			 MVNETA_GMAC_CONFIG_FLOW_CTRL |
4256			 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
4257		val |= MVNETA_GMAC_FORCE_LINK_PASS;
4258
4259		if (speed == SPEED_1000 || speed == SPEED_2500)
4260			val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
4261		else if (speed == SPEED_100)
4262			val |= MVNETA_GMAC_CONFIG_MII_SPEED;
4263
4264		if (duplex == DUPLEX_FULL)
4265			val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4266
4267		if (tx_pause || rx_pause)
4268			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4269
4270		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4271	} else {
4272		/* When inband doesn't cover flow control or flow control is
4273		 * disabled, we need to manually configure it. This bit will
4274		 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
4275		 */
4276		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4277		val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
4278
4279		if (tx_pause || rx_pause)
4280			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4281
4282		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4283	}
4284
4285	mvneta_port_up(pp);
4286
4287	if (phy && pp->eee_enabled) {
4288		pp->eee_active = phy_init_eee(phy, false) >= 0;
4289		mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
4290	}
4291}
4292
4293static const struct phylink_mac_ops mvneta_phylink_ops = {
4294	.mac_select_pcs = mvneta_mac_select_pcs,
4295	.mac_prepare = mvneta_mac_prepare,
 
4296	.mac_config = mvneta_mac_config,
4297	.mac_finish = mvneta_mac_finish,
4298	.mac_link_down = mvneta_mac_link_down,
4299	.mac_link_up = mvneta_mac_link_up,
4300};
4301
4302static int mvneta_mdio_probe(struct mvneta_port *pp)
4303{
4304	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
4305	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4306
4307	if (err)
4308		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4309
4310	phylink_ethtool_get_wol(pp->phylink, &wol);
4311	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4312
4313	/* PHY WoL may be enabled but device wakeup disabled */
4314	if (wol.supported)
4315		device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4316
4317	return err;
4318}
4319
4320static void mvneta_mdio_remove(struct mvneta_port *pp)
4321{
4322	phylink_disconnect_phy(pp->phylink);
4323}
4324
4325/* Electing a CPU must be done in an atomic way: it should be done
4326 * after or before the removal/insertion of a CPU and this function is
4327 * not reentrant.
4328 */
4329static void mvneta_percpu_elect(struct mvneta_port *pp)
4330{
4331	int elected_cpu = 0, max_cpu, cpu;
4332
4333	/* Use the cpu associated to the rxq when it is online, in all
4334	 * the other cases, use the cpu 0 which can't be offline.
4335	 */
4336	if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
4337		elected_cpu = pp->rxq_def;
4338
4339	max_cpu = num_present_cpus();
4340
4341	for_each_online_cpu(cpu) {
4342		int rxq_map = 0, txq_map = 0;
4343		int rxq;
4344
4345		for (rxq = 0; rxq < rxq_number; rxq++)
4346			if ((rxq % max_cpu) == cpu)
4347				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4348
4349		if (cpu == elected_cpu)
4350			/* Map the default receive queue to the elected CPU */
 
 
4351			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4352
4353		/* We update the TX queue map only if we have one
4354		 * queue. In this case we associate the TX queue to
4355		 * the CPU bound to the default RX queue
4356		 */
4357		if (txq_number == 1)
4358			txq_map = (cpu == elected_cpu) ?
4359				MVNETA_CPU_TXQ_ACCESS(0) : 0;
4360		else
4361			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4362				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
4363
4364		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4365
4366		/* Update the interrupt mask on each CPU according the
4367		 * new mapping
4368		 */
4369		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
4370					 pp, true);
 
 
4371	}
4372};
4373
4374static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
4375{
4376	int other_cpu;
4377	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4378						  node_online);
4379	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4380
4381	/* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
4382	 * are routed to CPU 0, so we don't need all the cpu-hotplug support
4383	 */
4384	if (pp->neta_armada3700)
4385		return 0;
4386
4387	spin_lock(&pp->lock);
4388	/*
4389	 * Configuring the driver for a new CPU while the driver is
4390	 * stopping is racy, so just avoid it.
4391	 */
4392	if (pp->is_stopped) {
4393		spin_unlock(&pp->lock);
4394		return 0;
4395	}
4396	netif_tx_stop_all_queues(pp->dev);
4397
4398	/*
4399	 * We have to synchronise on tha napi of each CPU except the one
4400	 * just being woken up
4401	 */
4402	for_each_online_cpu(other_cpu) {
4403		if (other_cpu != cpu) {
4404			struct mvneta_pcpu_port *other_port =
4405				per_cpu_ptr(pp->ports, other_cpu);
4406
4407			napi_synchronize(&other_port->napi);
4408		}
4409	}
4410
4411	/* Mask all ethernet port interrupts */
4412	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4413	napi_enable(&port->napi);
4414
4415	/*
4416	 * Enable per-CPU interrupts on the CPU that is
4417	 * brought up.
4418	 */
4419	mvneta_percpu_enable(pp);
4420
4421	/*
4422	 * Enable per-CPU interrupt on the one CPU we care
4423	 * about.
4424	 */
4425	mvneta_percpu_elect(pp);
4426
4427	/* Unmask all ethernet port interrupts */
4428	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4429	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4430		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4431		    MVNETA_CAUSE_LINK_CHANGE);
4432	netif_tx_start_all_queues(pp->dev);
4433	spin_unlock(&pp->lock);
4434	return 0;
4435}
4436
4437static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
4438{
4439	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4440						  node_online);
4441	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4442
4443	/*
4444	 * Thanks to this lock we are sure that any pending cpu election is
4445	 * done.
4446	 */
4447	spin_lock(&pp->lock);
4448	/* Mask all ethernet port interrupts */
4449	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4450	spin_unlock(&pp->lock);
4451
4452	napi_synchronize(&port->napi);
4453	napi_disable(&port->napi);
4454	/* Disable per-CPU interrupts on the CPU that is brought down. */
4455	mvneta_percpu_disable(pp);
4456	return 0;
4457}
4458
4459static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
4460{
4461	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4462						  node_dead);
4463
4464	/* Check if a new CPU must be elected now this on is down */
4465	spin_lock(&pp->lock);
4466	mvneta_percpu_elect(pp);
4467	spin_unlock(&pp->lock);
4468	/* Unmask all ethernet port interrupts */
4469	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4470	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4471		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4472		    MVNETA_CAUSE_LINK_CHANGE);
4473	netif_tx_start_all_queues(pp->dev);
4474	return 0;
4475}
4476
4477static int mvneta_open(struct net_device *dev)
4478{
4479	struct mvneta_port *pp = netdev_priv(dev);
4480	int ret;
4481
4482	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4483
4484	ret = mvneta_setup_rxqs(pp);
4485	if (ret)
4486		return ret;
4487
4488	ret = mvneta_setup_txqs(pp);
4489	if (ret)
4490		goto err_cleanup_rxqs;
4491
4492	/* Connect to port interrupt line */
4493	if (pp->neta_armada3700)
4494		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4495				  dev->name, pp);
4496	else
4497		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4498					 dev->name, pp->ports);
4499	if (ret) {
4500		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4501		goto err_cleanup_txqs;
4502	}
4503
4504	if (!pp->neta_armada3700) {
4505		/* Enable per-CPU interrupt on all the CPU to handle our RX
4506		 * queue interrupts
4507		 */
4508		on_each_cpu(mvneta_percpu_enable, pp, true);
4509
4510		pp->is_stopped = false;
4511		/* Register a CPU notifier to handle the case where our CPU
4512		 * might be taken offline.
4513		 */
4514		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
4515						       &pp->node_online);
4516		if (ret)
4517			goto err_free_irq;
4518
4519		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4520						       &pp->node_dead);
4521		if (ret)
4522			goto err_free_online_hp;
4523	}
4524
4525	ret = mvneta_mdio_probe(pp);
4526	if (ret < 0) {
4527		netdev_err(dev, "cannot probe MDIO bus\n");
4528		goto err_free_dead_hp;
4529	}
4530
4531	mvneta_start_dev(pp);
4532
4533	return 0;
4534
4535err_free_dead_hp:
4536	if (!pp->neta_armada3700)
4537		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4538						    &pp->node_dead);
4539err_free_online_hp:
4540	if (!pp->neta_armada3700)
4541		cpuhp_state_remove_instance_nocalls(online_hpstate,
4542						    &pp->node_online);
4543err_free_irq:
4544	if (pp->neta_armada3700) {
4545		free_irq(pp->dev->irq, pp);
4546	} else {
4547		on_each_cpu(mvneta_percpu_disable, pp, true);
4548		free_percpu_irq(pp->dev->irq, pp->ports);
4549	}
4550err_cleanup_txqs:
4551	mvneta_cleanup_txqs(pp);
4552err_cleanup_rxqs:
4553	mvneta_cleanup_rxqs(pp);
4554	return ret;
4555}
4556
4557/* Stop the port, free port interrupt line */
4558static int mvneta_stop(struct net_device *dev)
4559{
4560	struct mvneta_port *pp = netdev_priv(dev);
4561
4562	if (!pp->neta_armada3700) {
4563		/* Inform that we are stopping so we don't want to setup the
4564		 * driver for new CPUs in the notifiers. The code of the
4565		 * notifier for CPU online is protected by the same spinlock,
4566		 * so when we get the lock, the notifer work is done.
4567		 */
4568		spin_lock(&pp->lock);
4569		pp->is_stopped = true;
4570		spin_unlock(&pp->lock);
4571
4572		mvneta_stop_dev(pp);
4573		mvneta_mdio_remove(pp);
4574
4575		cpuhp_state_remove_instance_nocalls(online_hpstate,
4576						    &pp->node_online);
4577		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4578						    &pp->node_dead);
4579		on_each_cpu(mvneta_percpu_disable, pp, true);
4580		free_percpu_irq(dev->irq, pp->ports);
4581	} else {
4582		mvneta_stop_dev(pp);
4583		mvneta_mdio_remove(pp);
4584		free_irq(dev->irq, pp);
4585	}
4586
4587	mvneta_cleanup_rxqs(pp);
4588	mvneta_cleanup_txqs(pp);
4589
4590	return 0;
4591}
4592
4593static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4594{
4595	struct mvneta_port *pp = netdev_priv(dev);
4596
4597	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4598}
4599
4600static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4601			    struct netlink_ext_ack *extack)
4602{
4603	bool need_update, running = netif_running(dev);
4604	struct mvneta_port *pp = netdev_priv(dev);
4605	struct bpf_prog *old_prog;
4606
4607	if (prog && !prog->aux->xdp_has_frags &&
4608	    dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4609		NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
4610		return -EOPNOTSUPP;
4611	}
4612
4613	if (pp->bm_priv) {
4614		NL_SET_ERR_MSG_MOD(extack,
4615				   "Hardware Buffer Management not supported on XDP");
4616		return -EOPNOTSUPP;
4617	}
4618
4619	need_update = !!pp->xdp_prog != !!prog;
4620	if (running && need_update)
4621		mvneta_stop(dev);
4622
4623	old_prog = xchg(&pp->xdp_prog, prog);
4624	if (old_prog)
4625		bpf_prog_put(old_prog);
4626
4627	if (running && need_update)
4628		return mvneta_open(dev);
4629
4630	return 0;
4631}
4632
4633static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4634{
4635	switch (xdp->command) {
4636	case XDP_SETUP_PROG:
4637		return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4638	default:
4639		return -EINVAL;
4640	}
4641}
4642
4643/* Ethtool methods */
4644
4645/* Set link ksettings (phy address, speed) for ethtools */
4646static int
4647mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
4648				  const struct ethtool_link_ksettings *cmd)
4649{
4650	struct mvneta_port *pp = netdev_priv(ndev);
4651
4652	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4653}
4654
4655/* Get link ksettings for ethtools */
4656static int
4657mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
4658				  struct ethtool_link_ksettings *cmd)
4659{
4660	struct mvneta_port *pp = netdev_priv(ndev);
4661
4662	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4663}
4664
4665static int mvneta_ethtool_nway_reset(struct net_device *dev)
4666{
4667	struct mvneta_port *pp = netdev_priv(dev);
4668
4669	return phylink_ethtool_nway_reset(pp->phylink);
4670}
4671
4672/* Set interrupt coalescing for ethtools */
4673static int
4674mvneta_ethtool_set_coalesce(struct net_device *dev,
4675			    struct ethtool_coalesce *c,
4676			    struct kernel_ethtool_coalesce *kernel_coal,
4677			    struct netlink_ext_ack *extack)
4678{
4679	struct mvneta_port *pp = netdev_priv(dev);
4680	int queue;
4681
4682	for (queue = 0; queue < rxq_number; queue++) {
4683		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4684		rxq->time_coal = c->rx_coalesce_usecs;
4685		rxq->pkts_coal = c->rx_max_coalesced_frames;
4686		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4687		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4688	}
4689
4690	for (queue = 0; queue < txq_number; queue++) {
4691		struct mvneta_tx_queue *txq = &pp->txqs[queue];
4692		txq->done_pkts_coal = c->tx_max_coalesced_frames;
4693		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4694	}
4695
4696	return 0;
4697}
4698
4699/* get coalescing for ethtools */
4700static int
4701mvneta_ethtool_get_coalesce(struct net_device *dev,
4702			    struct ethtool_coalesce *c,
4703			    struct kernel_ethtool_coalesce *kernel_coal,
4704			    struct netlink_ext_ack *extack)
4705{
4706	struct mvneta_port *pp = netdev_priv(dev);
4707
4708	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
4709	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
4710
4711	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
4712	return 0;
4713}
4714
4715
4716static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4717				    struct ethtool_drvinfo *drvinfo)
4718{
4719	strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4720		sizeof(drvinfo->driver));
4721	strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4722		sizeof(drvinfo->version));
4723	strscpy(drvinfo->bus_info, dev_name(&dev->dev),
4724		sizeof(drvinfo->bus_info));
4725}
4726
4727
4728static void
4729mvneta_ethtool_get_ringparam(struct net_device *netdev,
4730			     struct ethtool_ringparam *ring,
4731			     struct kernel_ethtool_ringparam *kernel_ring,
4732			     struct netlink_ext_ack *extack)
4733{
4734	struct mvneta_port *pp = netdev_priv(netdev);
4735
4736	ring->rx_max_pending = MVNETA_MAX_RXD;
4737	ring->tx_max_pending = MVNETA_MAX_TXD;
4738	ring->rx_pending = pp->rx_ring_size;
4739	ring->tx_pending = pp->tx_ring_size;
4740}
4741
4742static int
4743mvneta_ethtool_set_ringparam(struct net_device *dev,
4744			     struct ethtool_ringparam *ring,
4745			     struct kernel_ethtool_ringparam *kernel_ring,
4746			     struct netlink_ext_ack *extack)
4747{
4748	struct mvneta_port *pp = netdev_priv(dev);
4749
4750	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4751		return -EINVAL;
4752	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4753		ring->rx_pending : MVNETA_MAX_RXD;
4754
4755	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4756				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4757	if (pp->tx_ring_size != ring->tx_pending)
4758		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4759			    pp->tx_ring_size, ring->tx_pending);
4760
4761	if (netif_running(dev)) {
4762		mvneta_stop(dev);
4763		if (mvneta_open(dev)) {
4764			netdev_err(dev,
4765				   "error on opening device after ring param change\n");
4766			return -ENOMEM;
4767		}
4768	}
4769
4770	return 0;
4771}
4772
4773static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4774					  struct ethtool_pauseparam *pause)
4775{
4776	struct mvneta_port *pp = netdev_priv(dev);
4777
4778	phylink_ethtool_get_pauseparam(pp->phylink, pause);
4779}
4780
4781static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4782					 struct ethtool_pauseparam *pause)
4783{
4784	struct mvneta_port *pp = netdev_priv(dev);
4785
4786	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4787}
4788
4789static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4790				       u8 *data)
4791{
4792	if (sset == ETH_SS_STATS) {
4793		struct mvneta_port *pp = netdev_priv(netdev);
4794		int i;
4795
4796		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4797			memcpy(data + i * ETH_GSTRING_LEN,
4798			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
4799
4800		if (!pp->bm_priv) {
4801			data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
4802			page_pool_ethtool_stats_get_strings(data);
4803		}
4804	}
4805}
4806
4807static void
4808mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4809				 struct mvneta_ethtool_stats *es)
4810{
4811	unsigned int start;
4812	int cpu;
4813
4814	for_each_possible_cpu(cpu) {
4815		struct mvneta_pcpu_stats *stats;
4816		u64 skb_alloc_error;
4817		u64 refill_error;
4818		u64 xdp_redirect;
4819		u64 xdp_xmit_err;
4820		u64 xdp_tx_err;
4821		u64 xdp_pass;
4822		u64 xdp_drop;
4823		u64 xdp_xmit;
4824		u64 xdp_tx;
4825
4826		stats = per_cpu_ptr(pp->stats, cpu);
4827		do {
4828			start = u64_stats_fetch_begin(&stats->syncp);
4829			skb_alloc_error = stats->es.skb_alloc_error;
4830			refill_error = stats->es.refill_error;
4831			xdp_redirect = stats->es.ps.xdp_redirect;
4832			xdp_pass = stats->es.ps.xdp_pass;
4833			xdp_drop = stats->es.ps.xdp_drop;
4834			xdp_xmit = stats->es.ps.xdp_xmit;
4835			xdp_xmit_err = stats->es.ps.xdp_xmit_err;
4836			xdp_tx = stats->es.ps.xdp_tx;
4837			xdp_tx_err = stats->es.ps.xdp_tx_err;
4838		} while (u64_stats_fetch_retry(&stats->syncp, start));
4839
4840		es->skb_alloc_error += skb_alloc_error;
4841		es->refill_error += refill_error;
4842		es->ps.xdp_redirect += xdp_redirect;
4843		es->ps.xdp_pass += xdp_pass;
4844		es->ps.xdp_drop += xdp_drop;
4845		es->ps.xdp_xmit += xdp_xmit;
4846		es->ps.xdp_xmit_err += xdp_xmit_err;
4847		es->ps.xdp_tx += xdp_tx;
4848		es->ps.xdp_tx_err += xdp_tx_err;
4849	}
4850}
4851
4852static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4853{
4854	struct mvneta_ethtool_stats stats = {};
4855	const struct mvneta_statistic *s;
4856	void __iomem *base = pp->base;
4857	u32 high, low;
4858	u64 val;
4859	int i;
4860
4861	mvneta_ethtool_update_pcpu_stats(pp, &stats);
4862	for (i = 0, s = mvneta_statistics;
4863	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4864	     s++, i++) {
 
 
4865		switch (s->type) {
4866		case T_REG_32:
4867			val = readl_relaxed(base + s->offset);
4868			pp->ethtool_stats[i] += val;
4869			break;
4870		case T_REG_64:
4871			/* Docs say to read low 32-bit then high */
4872			low = readl_relaxed(base + s->offset);
4873			high = readl_relaxed(base + s->offset + 4);
4874			val = (u64)high << 32 | low;
4875			pp->ethtool_stats[i] += val;
4876			break;
4877		case T_SW:
4878			switch (s->offset) {
4879			case ETHTOOL_STAT_EEE_WAKEUP:
4880				val = phylink_get_eee_err(pp->phylink);
4881				pp->ethtool_stats[i] += val;
4882				break;
4883			case ETHTOOL_STAT_SKB_ALLOC_ERR:
4884				pp->ethtool_stats[i] = stats.skb_alloc_error;
4885				break;
4886			case ETHTOOL_STAT_REFILL_ERR:
4887				pp->ethtool_stats[i] = stats.refill_error;
4888				break;
4889			case ETHTOOL_XDP_REDIRECT:
4890				pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4891				break;
4892			case ETHTOOL_XDP_PASS:
4893				pp->ethtool_stats[i] = stats.ps.xdp_pass;
4894				break;
4895			case ETHTOOL_XDP_DROP:
4896				pp->ethtool_stats[i] = stats.ps.xdp_drop;
4897				break;
4898			case ETHTOOL_XDP_TX:
4899				pp->ethtool_stats[i] = stats.ps.xdp_tx;
4900				break;
4901			case ETHTOOL_XDP_TX_ERR:
4902				pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4903				break;
4904			case ETHTOOL_XDP_XMIT:
4905				pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4906				break;
4907			case ETHTOOL_XDP_XMIT_ERR:
4908				pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4909				break;
4910			}
4911			break;
4912		}
4913	}
4914}
4915
4916static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
4917{
4918	struct page_pool_stats stats = {};
4919	int i;
4920
4921	for (i = 0; i < rxq_number; i++) {
4922		if (pp->rxqs[i].page_pool)
4923			page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
4924	}
4925
4926	page_pool_ethtool_stats_get(data, &stats);
4927}
4928
4929static void mvneta_ethtool_get_stats(struct net_device *dev,
4930				     struct ethtool_stats *stats, u64 *data)
4931{
4932	struct mvneta_port *pp = netdev_priv(dev);
4933	int i;
4934
4935	mvneta_ethtool_update_stats(pp);
4936
4937	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4938		*data++ = pp->ethtool_stats[i];
4939
4940	if (!pp->bm_priv)
4941		mvneta_ethtool_pp_stats(pp, data);
4942}
4943
4944static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4945{
4946	if (sset == ETH_SS_STATS) {
4947		int count = ARRAY_SIZE(mvneta_statistics);
4948		struct mvneta_port *pp = netdev_priv(dev);
4949
4950		if (!pp->bm_priv)
4951			count += page_pool_ethtool_stats_get_count();
4952
4953		return count;
4954	}
4955
4956	return -EOPNOTSUPP;
4957}
4958
4959static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4960{
4961	return MVNETA_RSS_LU_TABLE_SIZE;
4962}
4963
4964static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4965				    struct ethtool_rxnfc *info,
4966				    u32 *rules __always_unused)
4967{
4968	switch (info->cmd) {
4969	case ETHTOOL_GRXRINGS:
4970		info->data =  rxq_number;
4971		return 0;
4972	case ETHTOOL_GRXFH:
4973		return -EOPNOTSUPP;
4974	default:
4975		return -EOPNOTSUPP;
4976	}
4977}
4978
4979static int  mvneta_config_rss(struct mvneta_port *pp)
4980{
4981	int cpu;
4982	u32 val;
4983
4984	netif_tx_stop_all_queues(pp->dev);
4985
4986	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4987
4988	if (!pp->neta_armada3700) {
4989		/* We have to synchronise on the napi of each CPU */
4990		for_each_online_cpu(cpu) {
4991			struct mvneta_pcpu_port *pcpu_port =
4992				per_cpu_ptr(pp->ports, cpu);
4993
4994			napi_synchronize(&pcpu_port->napi);
4995			napi_disable(&pcpu_port->napi);
4996		}
4997	} else {
4998		napi_synchronize(&pp->napi);
4999		napi_disable(&pp->napi);
5000	}
5001
5002	pp->rxq_def = pp->indir[0];
5003
5004	/* Update unicast mapping */
5005	mvneta_set_rx_mode(pp->dev);
5006
5007	/* Update val of portCfg register accordingly with all RxQueue types */
5008	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
5009	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
5010
5011	/* Update the elected CPU matching the new rxq_def */
5012	spin_lock(&pp->lock);
5013	mvneta_percpu_elect(pp);
5014	spin_unlock(&pp->lock);
5015
5016	if (!pp->neta_armada3700) {
5017		/* We have to synchronise on the napi of each CPU */
5018		for_each_online_cpu(cpu) {
5019			struct mvneta_pcpu_port *pcpu_port =
5020				per_cpu_ptr(pp->ports, cpu);
5021
5022			napi_enable(&pcpu_port->napi);
5023		}
5024	} else {
5025		napi_enable(&pp->napi);
5026	}
5027
5028	netif_tx_start_all_queues(pp->dev);
5029
5030	return 0;
5031}
5032
5033static int mvneta_ethtool_set_rxfh(struct net_device *dev,
5034				   struct ethtool_rxfh_param *rxfh,
5035				   struct netlink_ext_ack *extack)
5036{
5037	struct mvneta_port *pp = netdev_priv(dev);
5038
5039	/* Current code for Armada 3700 doesn't support RSS features yet */
5040	if (pp->neta_armada3700)
5041		return -EOPNOTSUPP;
5042
5043	/* We require at least one supported parameter to be changed
5044	 * and no change in any of the unsupported parameters
5045	 */
5046	if (rxfh->key ||
5047	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5048	     rxfh->hfunc != ETH_RSS_HASH_TOP))
5049		return -EOPNOTSUPP;
5050
5051	if (!rxfh->indir)
5052		return 0;
5053
5054	memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
5055
5056	return mvneta_config_rss(pp);
5057}
5058
5059static int mvneta_ethtool_get_rxfh(struct net_device *dev,
5060				   struct ethtool_rxfh_param *rxfh)
5061{
5062	struct mvneta_port *pp = netdev_priv(dev);
5063
5064	/* Current code for Armada 3700 doesn't support RSS features yet */
5065	if (pp->neta_armada3700)
5066		return -EOPNOTSUPP;
5067
5068	rxfh->hfunc = ETH_RSS_HASH_TOP;
 
5069
5070	if (!rxfh->indir)
5071		return 0;
5072
5073	memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
5074
5075	return 0;
5076}
5077
5078static void mvneta_ethtool_get_wol(struct net_device *dev,
5079				   struct ethtool_wolinfo *wol)
5080{
5081	struct mvneta_port *pp = netdev_priv(dev);
5082
5083	phylink_ethtool_get_wol(pp->phylink, wol);
5084}
5085
5086static int mvneta_ethtool_set_wol(struct net_device *dev,
5087				  struct ethtool_wolinfo *wol)
5088{
5089	struct mvneta_port *pp = netdev_priv(dev);
5090	int ret;
5091
5092	ret = phylink_ethtool_set_wol(pp->phylink, wol);
5093	if (!ret)
5094		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
5095
5096	return ret;
5097}
5098
5099static int mvneta_ethtool_get_eee(struct net_device *dev,
5100				  struct ethtool_eee *eee)
5101{
5102	struct mvneta_port *pp = netdev_priv(dev);
5103	u32 lpi_ctl0;
5104
5105	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
5106
5107	eee->eee_enabled = pp->eee_enabled;
5108	eee->eee_active = pp->eee_active;
5109	eee->tx_lpi_enabled = pp->tx_lpi_enabled;
5110	eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
5111
5112	return phylink_ethtool_get_eee(pp->phylink, eee);
5113}
5114
5115static int mvneta_ethtool_set_eee(struct net_device *dev,
5116				  struct ethtool_eee *eee)
5117{
5118	struct mvneta_port *pp = netdev_priv(dev);
5119	u32 lpi_ctl0;
5120
5121	/* The Armada 37x documents do not give limits for this other than
5122	 * it being an 8-bit register.
5123	 */
5124	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
5125		return -EINVAL;
5126
5127	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
5128	lpi_ctl0 &= ~(0xff << 8);
5129	lpi_ctl0 |= eee->tx_lpi_timer << 8;
5130	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
5131
5132	pp->eee_enabled = eee->eee_enabled;
5133	pp->tx_lpi_enabled = eee->tx_lpi_enabled;
5134
5135	mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
5136
5137	return phylink_ethtool_set_eee(pp->phylink, eee);
5138}
5139
5140static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
5141{
5142	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
5143}
5144
5145static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
5146{
5147	u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
5148
5149	val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
5150	val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
5151
5152	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
5153}
5154
5155static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
5156{
5157	unsigned long core_clk_rate;
5158	u32 refill_cycles;
5159	u32 val;
5160
5161	core_clk_rate = clk_get_rate(pp->clk);
5162	if (!core_clk_rate)
5163		return -EINVAL;
5164
5165	refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
5166			(NSEC_PER_SEC / core_clk_rate);
5167
5168	if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
5169		return -EINVAL;
5170
5171	/* Enable bw limit algorithm version 3 */
5172	val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5173	val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
5174	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5175
5176	/* Set the base refill rate */
5177	mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
5178
5179	return 0;
5180}
5181
5182static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
5183{
5184	u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5185
5186	val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
5187	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5188}
5189
5190static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
5191				    u64 min_rate, u64 max_rate)
5192{
5193	u32 refill_val, rem;
5194	u32 val = 0;
5195
5196	/* Convert to from Bps to bps */
5197	max_rate *= 8;
5198
5199	if (min_rate)
5200		return -EINVAL;
5201
5202	refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
5203				 &rem);
5204
5205	if (rem || !refill_val ||
5206	    refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
5207		return -EINVAL;
5208
5209	val = refill_val;
5210	val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
5211		MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
5212
5213	mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
5214
5215	return 0;
5216}
5217
5218static int mvneta_setup_mqprio(struct net_device *dev,
5219			       struct tc_mqprio_qopt_offload *mqprio)
5220{
5221	struct mvneta_port *pp = netdev_priv(dev);
5222	int rxq, txq, tc, ret;
5223	u8 num_tc;
5224
5225	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
5226		return 0;
5227
5228	num_tc = mqprio->qopt.num_tc;
5229
5230	if (num_tc > rxq_number)
5231		return -EINVAL;
5232
5233	mvneta_clear_rx_prio_map(pp);
5234
5235	if (!num_tc) {
5236		mvneta_disable_per_queue_rate_limit(pp);
5237		netdev_reset_tc(dev);
5238		return 0;
5239	}
5240
5241	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
5242
5243	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
5244		netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
5245				    mqprio->qopt.offset[tc]);
5246
5247		for (rxq = mqprio->qopt.offset[tc];
5248		     rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
5249		     rxq++) {
5250			if (rxq >= rxq_number)
5251				return -EINVAL;
5252
5253			mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
5254		}
5255	}
5256
5257	if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
5258		mvneta_disable_per_queue_rate_limit(pp);
5259		return 0;
5260	}
5261
5262	if (mqprio->qopt.num_tc > txq_number)
5263		return -EINVAL;
5264
5265	ret = mvneta_enable_per_queue_rate_limit(pp);
5266	if (ret)
5267		return ret;
5268
5269	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
5270		for (txq = mqprio->qopt.offset[tc];
5271		     txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
5272		     txq++) {
5273			if (txq >= txq_number)
5274				return -EINVAL;
5275
5276			ret = mvneta_setup_queue_rates(pp, txq,
5277						       mqprio->min_rate[tc],
5278						       mqprio->max_rate[tc]);
5279			if (ret)
5280				return ret;
5281		}
5282	}
5283
5284	return 0;
5285}
5286
5287static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
5288			   void *type_data)
5289{
5290	switch (type) {
5291	case TC_SETUP_QDISC_MQPRIO:
5292		return mvneta_setup_mqprio(dev, type_data);
5293	default:
5294		return -EOPNOTSUPP;
5295	}
5296}
5297
5298static const struct net_device_ops mvneta_netdev_ops = {
5299	.ndo_open            = mvneta_open,
5300	.ndo_stop            = mvneta_stop,
5301	.ndo_start_xmit      = mvneta_tx,
5302	.ndo_set_rx_mode     = mvneta_set_rx_mode,
5303	.ndo_set_mac_address = mvneta_set_mac_addr,
5304	.ndo_change_mtu      = mvneta_change_mtu,
5305	.ndo_fix_features    = mvneta_fix_features,
5306	.ndo_get_stats64     = mvneta_get_stats64,
5307	.ndo_eth_ioctl        = mvneta_ioctl,
5308	.ndo_bpf	     = mvneta_xdp,
5309	.ndo_xdp_xmit        = mvneta_xdp_xmit,
5310	.ndo_setup_tc	     = mvneta_setup_tc,
5311};
5312
5313static const struct ethtool_ops mvneta_eth_tool_ops = {
5314	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
5315				     ETHTOOL_COALESCE_MAX_FRAMES,
5316	.nway_reset	= mvneta_ethtool_nway_reset,
5317	.get_link       = ethtool_op_get_link,
5318	.set_coalesce   = mvneta_ethtool_set_coalesce,
5319	.get_coalesce   = mvneta_ethtool_get_coalesce,
5320	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
5321	.get_ringparam  = mvneta_ethtool_get_ringparam,
5322	.set_ringparam	= mvneta_ethtool_set_ringparam,
5323	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
5324	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
5325	.get_strings	= mvneta_ethtool_get_strings,
5326	.get_ethtool_stats = mvneta_ethtool_get_stats,
5327	.get_sset_count	= mvneta_ethtool_get_sset_count,
5328	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
5329	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
5330	.get_rxfh	= mvneta_ethtool_get_rxfh,
5331	.set_rxfh	= mvneta_ethtool_set_rxfh,
5332	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
5333	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
5334	.get_wol        = mvneta_ethtool_get_wol,
5335	.set_wol        = mvneta_ethtool_set_wol,
5336	.get_eee	= mvneta_ethtool_get_eee,
5337	.set_eee	= mvneta_ethtool_set_eee,
5338};
5339
5340/* Initialize hw */
5341static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5342{
5343	int queue;
5344
5345	/* Disable port */
5346	mvneta_port_disable(pp);
5347
5348	/* Set port default values */
5349	mvneta_defaults_set(pp);
5350
5351	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5352	if (!pp->txqs)
5353		return -ENOMEM;
5354
5355	/* Initialize TX descriptor rings */
5356	for (queue = 0; queue < txq_number; queue++) {
5357		struct mvneta_tx_queue *txq = &pp->txqs[queue];
5358		txq->id = queue;
5359		txq->size = pp->tx_ring_size;
5360		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
5361	}
5362
5363	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5364	if (!pp->rxqs)
5365		return -ENOMEM;
5366
5367	/* Create Rx descriptor rings */
5368	for (queue = 0; queue < rxq_number; queue++) {
5369		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5370		rxq->id = queue;
5371		rxq->size = pp->rx_ring_size;
5372		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
5373		rxq->time_coal = MVNETA_RX_COAL_USEC;
5374		rxq->buf_virt_addr
5375			= devm_kmalloc_array(pp->dev->dev.parent,
5376					     rxq->size,
5377					     sizeof(*rxq->buf_virt_addr),
5378					     GFP_KERNEL);
5379		if (!rxq->buf_virt_addr)
5380			return -ENOMEM;
5381	}
5382
5383	return 0;
5384}
5385
5386/* platform glue : initialize decoding windows */
5387static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5388				     const struct mbus_dram_target_info *dram)
5389{
5390	u32 win_enable;
5391	u32 win_protect;
5392	int i;
5393
5394	for (i = 0; i < 6; i++) {
5395		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5396		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5397
5398		if (i < 4)
5399			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5400	}
5401
5402	win_enable = 0x3f;
5403	win_protect = 0;
5404
5405	if (dram) {
5406		for (i = 0; i < dram->num_cs; i++) {
5407			const struct mbus_dram_window *cs = dram->cs + i;
5408
5409			mvreg_write(pp, MVNETA_WIN_BASE(i),
5410				    (cs->base & 0xffff0000) |
5411				    (cs->mbus_attr << 8) |
5412				    dram->mbus_dram_target_id);
5413
5414			mvreg_write(pp, MVNETA_WIN_SIZE(i),
5415				    (cs->size - 1) & 0xffff0000);
5416
5417			win_enable &= ~(1 << i);
5418			win_protect |= 3 << (2 * i);
5419		}
5420	} else {
5421		if (pp->neta_ac5)
5422			mvreg_write(pp, MVNETA_WIN_BASE(0),
5423				    (MVNETA_AC5_CNM_DDR_ATTR << 8) |
5424				    MVNETA_AC5_CNM_DDR_TARGET);
5425		/* For Armada3700 open default 4GB Mbus window, leaving
5426		 * arbitration of target/attribute to a different layer
5427		 * of configuration.
5428		 */
5429		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5430		win_enable &= ~BIT(0);
5431		win_protect = 3;
5432	}
5433
5434	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5435	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5436}
5437
5438/* Power up the port */
5439static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5440{
5441	/* MAC Cause register should be cleared */
5442	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5443
5444	if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
5445	    phy_mode != PHY_INTERFACE_MODE_SGMII &&
5446	    !phy_interface_mode_is_8023z(phy_mode) &&
5447	    !phy_interface_mode_is_rgmii(phy_mode))
 
 
5448		return -EINVAL;
5449
5450	return 0;
5451}
5452
5453/* Device initialization routine */
5454static int mvneta_probe(struct platform_device *pdev)
5455{
5456	struct device_node *dn = pdev->dev.of_node;
5457	struct device_node *bm_node;
5458	struct mvneta_port *pp;
5459	struct net_device *dev;
5460	struct phylink *phylink;
5461	struct phy *comphy;
 
5462	char hw_mac_addr[ETH_ALEN];
5463	phy_interface_t phy_mode;
5464	const char *mac_from;
5465	int tx_csum_limit;
 
5466	int err;
5467	int cpu;
5468
5469	dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
5470				      txq_number, rxq_number);
5471	if (!dev)
5472		return -ENOMEM;
5473
5474	dev->tx_queue_len = MVNETA_MAX_TXD;
5475	dev->watchdog_timeo = 5 * HZ;
5476	dev->netdev_ops = &mvneta_netdev_ops;
5477	dev->ethtool_ops = &mvneta_eth_tool_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5478
5479	pp = netdev_priv(dev);
5480	spin_lock_init(&pp->lock);
5481	pp->dn = dn;
5482
5483	pp->rxq_def = rxq_def;
5484	pp->indir[0] = rxq_def;
5485
5486	err = of_get_phy_mode(dn, &phy_mode);
5487	if (err) {
5488		dev_err(&pdev->dev, "incorrect phy-mode\n");
5489		return err;
 
5490	}
5491
5492	pp->phy_interface = phy_mode;
5493
5494	comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
5495	if (comphy == ERR_PTR(-EPROBE_DEFER))
5496		return -EPROBE_DEFER;
5497
5498	if (IS_ERR(comphy))
5499		comphy = NULL;
5500
 
5501	pp->comphy = comphy;
 
 
5502
5503	pp->base = devm_platform_ioremap_resource(pdev, 0);
5504	if (IS_ERR(pp->base))
5505		return PTR_ERR(pp->base);
5506
5507	/* Get special SoC configurations */
5508	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
5509		pp->neta_armada3700 = true;
5510	if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
5511		pp->neta_armada3700 = true;
5512		pp->neta_ac5 = true;
5513	}
5514
5515	dev->irq = irq_of_parse_and_map(dn, 0);
5516	if (dev->irq == 0)
5517		return -EINVAL;
5518
5519	pp->clk = devm_clk_get(&pdev->dev, "core");
5520	if (IS_ERR(pp->clk))
5521		pp->clk = devm_clk_get(&pdev->dev, NULL);
5522	if (IS_ERR(pp->clk)) {
5523		err = PTR_ERR(pp->clk);
5524		goto err_free_irq;
5525	}
5526
5527	clk_prepare_enable(pp->clk);
5528
5529	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5530	if (!IS_ERR(pp->clk_bus))
5531		clk_prepare_enable(pp->clk_bus);
5532
5533	pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
5534	pp->phylink_pcs.neg_mode = true;
5535
5536	pp->phylink_config.dev = &dev->dev;
5537	pp->phylink_config.type = PHYLINK_NETDEV;
5538	pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
5539		MAC_100 | MAC_1000FD | MAC_2500FD;
5540
5541	phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
5542	__set_bit(PHY_INTERFACE_MODE_QSGMII,
5543		  pp->phylink_config.supported_interfaces);
5544	if (comphy) {
5545		/* If a COMPHY is present, we can support any of the serdes
5546		 * modes and switch between them.
5547		 */
5548		__set_bit(PHY_INTERFACE_MODE_SGMII,
5549			  pp->phylink_config.supported_interfaces);
5550		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
5551			  pp->phylink_config.supported_interfaces);
5552		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
5553			  pp->phylink_config.supported_interfaces);
5554	} else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
5555		/* No COMPHY, with only 2500BASE-X mode supported */
5556		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
5557			  pp->phylink_config.supported_interfaces);
5558	} else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
5559		   phy_mode == PHY_INTERFACE_MODE_SGMII) {
5560		/* No COMPHY, we can switch between 1000BASE-X and SGMII */
5561		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
5562			  pp->phylink_config.supported_interfaces);
5563		__set_bit(PHY_INTERFACE_MODE_SGMII,
5564			  pp->phylink_config.supported_interfaces);
5565	}
5566
5567	phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5568				 phy_mode, &mvneta_phylink_ops);
5569	if (IS_ERR(phylink)) {
5570		err = PTR_ERR(phylink);
5571		goto err_clk;
5572	}
5573
5574	pp->phylink = phylink;
5575
5576	/* Alloc per-cpu port structure */
5577	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5578	if (!pp->ports) {
5579		err = -ENOMEM;
5580		goto err_free_phylink;
5581	}
5582
5583	/* Alloc per-cpu stats */
5584	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5585	if (!pp->stats) {
5586		err = -ENOMEM;
5587		goto err_free_ports;
5588	}
5589
5590	err = of_get_ethdev_address(dn, dev);
5591	if (!err) {
5592		mac_from = "device tree";
 
5593	} else {
5594		mvneta_get_mac_addr(pp, hw_mac_addr);
5595		if (is_valid_ether_addr(hw_mac_addr)) {
5596			mac_from = "hardware";
5597			eth_hw_addr_set(dev, hw_mac_addr);
5598		} else {
5599			mac_from = "random";
5600			eth_hw_addr_random(dev);
5601		}
5602	}
5603
5604	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
5605		if (tx_csum_limit < 0 ||
5606		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
5607			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5608			dev_info(&pdev->dev,
5609				 "Wrong TX csum limit in DT, set to %dB\n",
5610				 MVNETA_TX_CSUM_DEF_SIZE);
5611		}
5612	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
5613		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5614	} else {
5615		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
5616	}
5617
5618	pp->tx_csum_limit = tx_csum_limit;
5619
5620	pp->dram_target_info = mv_mbus_dram_info();
5621	/* Armada3700 requires setting default configuration of Mbus
5622	 * windows, however without using filled mbus_dram_target_info
5623	 * structure.
5624	 */
5625	if (pp->dram_target_info || pp->neta_armada3700)
5626		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5627
5628	pp->tx_ring_size = MVNETA_MAX_TXD;
5629	pp->rx_ring_size = MVNETA_MAX_RXD;
5630
5631	pp->dev = dev;
5632	SET_NETDEV_DEV(dev, &pdev->dev);
5633
5634	pp->id = global_port_id++;
 
5635
5636	/* Obtain access to BM resources if enabled and already initialized */
5637	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
5638	if (bm_node) {
5639		pp->bm_priv = mvneta_bm_get(bm_node);
5640		if (pp->bm_priv) {
5641			err = mvneta_bm_port_init(pdev, pp);
5642			if (err < 0) {
5643				dev_info(&pdev->dev,
5644					 "use SW buffer management\n");
5645				mvneta_bm_put(pp->bm_priv);
5646				pp->bm_priv = NULL;
5647			}
5648		}
5649		/* Set RX packet offset correction for platforms, whose
5650		 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
5651		 * platforms and 0B for 32-bit ones.
5652		 */
5653		pp->rx_offset_correction = max(0,
5654					       NET_SKB_PAD -
5655					       MVNETA_RX_PKT_OFFSET_CORRECTION);
5656	}
5657	of_node_put(bm_node);
5658
5659	/* sw buffer management */
5660	if (!pp->bm_priv)
5661		pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5662
5663	err = mvneta_init(&pdev->dev, pp);
5664	if (err < 0)
5665		goto err_netdev;
5666
5667	err = mvneta_port_power_up(pp, pp->phy_interface);
5668	if (err < 0) {
5669		dev_err(&pdev->dev, "can't power up port\n");
5670		goto err_netdev;
5671	}
5672
5673	/* Armada3700 network controller does not support per-cpu
5674	 * operation, so only single NAPI should be initialized.
5675	 */
5676	if (pp->neta_armada3700) {
5677		netif_napi_add(dev, &pp->napi, mvneta_poll);
5678	} else {
5679		for_each_present_cpu(cpu) {
5680			struct mvneta_pcpu_port *port =
5681				per_cpu_ptr(pp->ports, cpu);
5682
5683			netif_napi_add(dev, &port->napi, mvneta_poll);
 
5684			port->pp = pp;
5685		}
5686	}
5687
5688	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5689			NETIF_F_TSO | NETIF_F_RXCSUM;
5690	dev->hw_features |= dev->features;
5691	dev->vlan_features |= dev->features;
5692	if (!pp->bm_priv)
5693		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
5694				    NETDEV_XDP_ACT_REDIRECT |
5695				    NETDEV_XDP_ACT_NDO_XMIT |
5696				    NETDEV_XDP_ACT_RX_SG |
5697				    NETDEV_XDP_ACT_NDO_XMIT_SG;
5698	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5699	netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
5700
5701	/* MTU range: 68 - 9676 */
5702	dev->min_mtu = ETH_MIN_MTU;
5703	/* 9676 == 9700 - 20 and rounding to 8 */
5704	dev->max_mtu = 9676;
5705
5706	err = register_netdev(dev);
5707	if (err < 0) {
5708		dev_err(&pdev->dev, "failed to register\n");
5709		goto err_netdev;
5710	}
5711
5712	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5713		    dev->dev_addr);
5714
5715	platform_set_drvdata(pdev, pp->dev);
5716
5717	return 0;
5718
5719err_netdev:
5720	if (pp->bm_priv) {
5721		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5722		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5723				       1 << pp->id);
5724		mvneta_bm_put(pp->bm_priv);
5725	}
5726	free_percpu(pp->stats);
5727err_free_ports:
5728	free_percpu(pp->ports);
5729err_free_phylink:
5730	if (pp->phylink)
5731		phylink_destroy(pp->phylink);
5732err_clk:
5733	clk_disable_unprepare(pp->clk_bus);
5734	clk_disable_unprepare(pp->clk);
 
 
 
5735err_free_irq:
5736	irq_dispose_mapping(dev->irq);
5737	return err;
5738}
5739
5740/* Device removal routine */
5741static void mvneta_remove(struct platform_device *pdev)
5742{
5743	struct net_device  *dev = platform_get_drvdata(pdev);
5744	struct mvneta_port *pp = netdev_priv(dev);
5745
5746	unregister_netdev(dev);
5747	clk_disable_unprepare(pp->clk_bus);
5748	clk_disable_unprepare(pp->clk);
5749	free_percpu(pp->ports);
5750	free_percpu(pp->stats);
5751	irq_dispose_mapping(dev->irq);
5752	phylink_destroy(pp->phylink);
5753
5754	if (pp->bm_priv) {
5755		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5756		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5757				       1 << pp->id);
5758		mvneta_bm_put(pp->bm_priv);
5759	}
 
 
5760}
5761
5762#ifdef CONFIG_PM_SLEEP
5763static int mvneta_suspend(struct device *device)
5764{
5765	int queue;
5766	struct net_device *dev = dev_get_drvdata(device);
5767	struct mvneta_port *pp = netdev_priv(dev);
5768
5769	if (!netif_running(dev))
5770		goto clean_exit;
5771
5772	if (!pp->neta_armada3700) {
5773		spin_lock(&pp->lock);
5774		pp->is_stopped = true;
5775		spin_unlock(&pp->lock);
5776
5777		cpuhp_state_remove_instance_nocalls(online_hpstate,
5778						    &pp->node_online);
5779		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5780						    &pp->node_dead);
5781	}
5782
5783	rtnl_lock();
5784	mvneta_stop_dev(pp);
5785	rtnl_unlock();
5786
5787	for (queue = 0; queue < rxq_number; queue++) {
5788		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5789
5790		mvneta_rxq_drop_pkts(pp, rxq);
5791	}
5792
5793	for (queue = 0; queue < txq_number; queue++) {
5794		struct mvneta_tx_queue *txq = &pp->txqs[queue];
5795
5796		mvneta_txq_hw_deinit(pp, txq);
5797	}
5798
5799clean_exit:
5800	netif_device_detach(dev);
5801	clk_disable_unprepare(pp->clk_bus);
5802	clk_disable_unprepare(pp->clk);
5803
5804	return 0;
5805}
5806
5807static int mvneta_resume(struct device *device)
5808{
5809	struct platform_device *pdev = to_platform_device(device);
5810	struct net_device *dev = dev_get_drvdata(device);
5811	struct mvneta_port *pp = netdev_priv(dev);
5812	int err, queue;
5813
5814	clk_prepare_enable(pp->clk);
5815	if (!IS_ERR(pp->clk_bus))
5816		clk_prepare_enable(pp->clk_bus);
5817	if (pp->dram_target_info || pp->neta_armada3700)
5818		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5819	if (pp->bm_priv) {
5820		err = mvneta_bm_port_init(pdev, pp);
5821		if (err < 0) {
5822			dev_info(&pdev->dev, "use SW buffer management\n");
5823			pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5824			pp->bm_priv = NULL;
5825		}
5826	}
5827	mvneta_defaults_set(pp);
5828	err = mvneta_port_power_up(pp, pp->phy_interface);
5829	if (err < 0) {
5830		dev_err(device, "can't power up port\n");
5831		return err;
5832	}
5833
5834	netif_device_attach(dev);
5835
5836	if (!netif_running(dev))
5837		return 0;
5838
5839	for (queue = 0; queue < rxq_number; queue++) {
5840		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5841
5842		rxq->next_desc_to_proc = 0;
5843		mvneta_rxq_hw_init(pp, rxq);
5844	}
5845
5846	for (queue = 0; queue < txq_number; queue++) {
5847		struct mvneta_tx_queue *txq = &pp->txqs[queue];
5848
5849		txq->next_desc_to_proc = 0;
5850		mvneta_txq_hw_init(pp, txq);
5851	}
5852
5853	if (!pp->neta_armada3700) {
5854		spin_lock(&pp->lock);
5855		pp->is_stopped = false;
5856		spin_unlock(&pp->lock);
5857		cpuhp_state_add_instance_nocalls(online_hpstate,
5858						 &pp->node_online);
5859		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5860						 &pp->node_dead);
5861	}
5862
5863	rtnl_lock();
5864	mvneta_start_dev(pp);
5865	rtnl_unlock();
5866	mvneta_set_rx_mode(dev);
5867
5868	return 0;
5869}
5870#endif
5871
5872static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
5873
5874static const struct of_device_id mvneta_match[] = {
5875	{ .compatible = "marvell,armada-370-neta" },
5876	{ .compatible = "marvell,armada-xp-neta" },
5877	{ .compatible = "marvell,armada-3700-neta" },
5878	{ .compatible = "marvell,armada-ac5-neta" },
5879	{ }
5880};
5881MODULE_DEVICE_TABLE(of, mvneta_match);
5882
5883static struct platform_driver mvneta_driver = {
5884	.probe = mvneta_probe,
5885	.remove_new = mvneta_remove,
5886	.driver = {
5887		.name = MVNETA_DRIVER_NAME,
5888		.of_match_table = mvneta_match,
5889		.pm = &mvneta_pm_ops,
5890	},
5891};
5892
5893static int __init mvneta_driver_init(void)
5894{
5895	int ret;
5896
5897	BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE);
5898
5899	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
5900				      mvneta_cpu_online,
5901				      mvneta_cpu_down_prepare);
5902	if (ret < 0)
5903		goto out;
5904	online_hpstate = ret;
5905	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
5906				      NULL, mvneta_cpu_dead);
5907	if (ret)
5908		goto err_dead;
5909
5910	ret = platform_driver_register(&mvneta_driver);
5911	if (ret)
5912		goto err;
5913	return 0;
5914
5915err:
5916	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5917err_dead:
5918	cpuhp_remove_multi_state(online_hpstate);
5919out:
5920	return ret;
5921}
5922module_init(mvneta_driver_init);
5923
5924static void __exit mvneta_driver_exit(void)
5925{
5926	platform_driver_unregister(&mvneta_driver);
5927	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5928	cpuhp_remove_multi_state(online_hpstate);
5929}
5930module_exit(mvneta_driver_exit);
5931
5932MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5933MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5934MODULE_LICENSE("GPL");
5935
5936module_param(rxq_number, int, 0444);
5937module_param(txq_number, int, 0444);
5938
5939module_param(rxq_def, int, 0444);
5940module_param(rx_copybreak, int, 0644);
v5.4
   1/*
   2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
   3 *
   4 * Copyright (C) 2012 Marvell
   5 *
   6 * Rami Rosen <rosenr@marvell.com>
   7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
   8 *
   9 * This file is licensed under the terms of the GNU General Public
  10 * License version 2. This program is licensed "as is" without any
  11 * warranty of any kind, whether express or implied.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/cpu.h>
  16#include <linux/etherdevice.h>
  17#include <linux/if_vlan.h>
  18#include <linux/inetdevice.h>
  19#include <linux/interrupt.h>
  20#include <linux/io.h>
  21#include <linux/kernel.h>
  22#include <linux/mbus.h>
  23#include <linux/module.h>
  24#include <linux/netdevice.h>
  25#include <linux/of.h>
  26#include <linux/of_address.h>
  27#include <linux/of_irq.h>
  28#include <linux/of_mdio.h>
  29#include <linux/of_net.h>
  30#include <linux/phy/phy.h>
  31#include <linux/phy.h>
  32#include <linux/phylink.h>
  33#include <linux/platform_device.h>
  34#include <linux/skbuff.h>
  35#include <net/hwbm.h>
  36#include "mvneta_bm.h"
  37#include <net/ip.h>
  38#include <net/ipv6.h>
  39#include <net/tso.h>
 
 
 
  40
  41/* Registers */
  42#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
  43#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
  44#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
  45#define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
  46#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
  47#define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
  48#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
  49#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
  50#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
  51#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
  52#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
  53#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
  54#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
  55#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
  56#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
  57#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
  58#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
  59#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
  60#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
  61#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
  62#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
  63#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
  64#define MVNETA_PORT_RX_RESET                    0x1cc0
  65#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
  66#define MVNETA_PHY_ADDR                         0x2000
  67#define      MVNETA_PHY_ADDR_MASK               0x1f
  68#define MVNETA_MBUS_RETRY                       0x2010
  69#define MVNETA_UNIT_INTR_CAUSE                  0x2080
  70#define MVNETA_UNIT_CONTROL                     0x20B0
  71#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
  72#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
  73#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
  74#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
  75#define MVNETA_BASE_ADDR_ENABLE                 0x2290
 
 
  76#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
  77#define MVNETA_PORT_CONFIG                      0x2400
  78#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
  79#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
  80#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
  81#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
  82#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
  83#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
  84#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
  85#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
  86#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
  87						 MVNETA_DEF_RXQ_ARP(q)	 | \
  88						 MVNETA_DEF_RXQ_TCP(q)	 | \
  89						 MVNETA_DEF_RXQ_UDP(q)	 | \
  90						 MVNETA_DEF_RXQ_BPDU(q)	 | \
  91						 MVNETA_TX_UNSET_ERR_SUM | \
  92						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  93#define MVNETA_PORT_CONFIG_EXTEND                0x2404
  94#define MVNETA_MAC_ADDR_LOW                      0x2414
  95#define MVNETA_MAC_ADDR_HIGH                     0x2418
  96#define MVNETA_SDMA_CONFIG                       0x241c
  97#define      MVNETA_SDMA_BRST_SIZE_16            4
  98#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
  99#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
 100#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 101#define      MVNETA_DESC_SWAP                    BIT(6)
 102#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 
 
 103#define MVNETA_PORT_STATUS                       0x2444
 104#define      MVNETA_TX_IN_PRGRS                  BIT(1)
 105#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 106#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 
 107#define MVNETA_SERDES_CFG			 0x24A0
 108#define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
 109#define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
 
 110#define MVNETA_TYPE_PRIO                         0x24bc
 111#define      MVNETA_FORCE_UNI                    BIT(21)
 112#define MVNETA_TXQ_CMD_1                         0x24e4
 113#define MVNETA_TXQ_CMD                           0x2448
 114#define      MVNETA_TXQ_DISABLE_SHIFT            8
 115#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
 116#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
 117#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
 118#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 119#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 120#define MVNETA_ACC_MODE                          0x2500
 121#define MVNETA_BM_ADDRESS                        0x2504
 122#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 123#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 124#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 125#define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
 126#define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
 127#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 128
 129/* Exception Interrupt Port/Queue Cause register
 130 *
 131 * Their behavior depend of the mapping done using the PCPX2Q
 132 * registers. For a given CPU if the bit associated to a queue is not
 133 * set, then for the register a read from this CPU will always return
 134 * 0 and a write won't do anything
 135 */
 136
 137#define MVNETA_INTR_NEW_CAUSE                    0x25a0
 138#define MVNETA_INTR_NEW_MASK                     0x25a4
 139
 140/* bits  0..7  = TXQ SENT, one bit per queue.
 141 * bits  8..15 = RXQ OCCUP, one bit per queue.
 142 * bits 16..23 = RXQ FREE, one bit per queue.
 143 * bit  29 = OLD_REG_SUM, see old reg ?
 144 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 145 * bit  31 = MISC_SUM,   one bit for 4 ports
 146 */
 147#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
 148#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 149#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 150#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
 151#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 152
 153#define MVNETA_INTR_OLD_CAUSE                    0x25a8
 154#define MVNETA_INTR_OLD_MASK                     0x25ac
 155
 156/* Data Path Port/Queue Cause Register */
 157#define MVNETA_INTR_MISC_CAUSE                   0x25b0
 158#define MVNETA_INTR_MISC_MASK                    0x25b4
 159
 160#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
 161#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
 162#define      MVNETA_CAUSE_PTP                    BIT(4)
 163
 164#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
 165#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
 166#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
 167#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
 168#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
 169#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
 170#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
 171#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
 172
 173#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
 174#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
 175#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
 176
 177#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
 178#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
 179#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
 180
 181#define MVNETA_INTR_ENABLE                       0x25b8
 182#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
 183#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
 184
 185#define MVNETA_RXQ_CMD                           0x2680
 186#define      MVNETA_RXQ_DISABLE_SHIFT            8
 187#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 188#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
 189#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
 190#define MVNETA_GMAC_CTRL_0                       0x2c00
 191#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 192#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 193#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 194#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 195#define MVNETA_GMAC_CTRL_2                       0x2c08
 196#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 197#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 198#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 199#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 200#define MVNETA_GMAC_STATUS                       0x2c10
 201#define      MVNETA_GMAC_LINK_UP                 BIT(0)
 202#define      MVNETA_GMAC_SPEED_1000              BIT(1)
 203#define      MVNETA_GMAC_SPEED_100               BIT(2)
 204#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
 205#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
 206#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 207#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 208#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
 209#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
 210#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 211#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 212#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 213#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 214#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 215#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
 216#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 217#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 218#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 219#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 220#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
 221#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 222#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 223#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 224#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 225#define MVNETA_GMAC_CTRL_4                       0x2c90
 226#define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
 227#define MVNETA_MIB_COUNTERS_BASE                 0x3000
 228#define      MVNETA_MIB_LATE_COLLISION           0x7c
 229#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 230#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 231#define MVNETA_DA_FILT_UCAST_BASE                0x3600
 232#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
 233#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
 234#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
 235#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 236#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 237#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
 238#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
 239#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 240#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 241#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
 242#define MVNETA_PORT_TX_RESET                     0x3cf0
 243#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
 
 
 
 
 
 244#define MVNETA_TX_MTU                            0x3e0c
 245#define MVNETA_TX_TOKEN_SIZE                     0x3e14
 246#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
 
 
 
 
 247#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 248#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 250#define MVNETA_LPI_CTRL_0                        0x2cc0
 251#define MVNETA_LPI_CTRL_1                        0x2cc4
 252#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
 253#define MVNETA_LPI_CTRL_2                        0x2cc8
 254#define MVNETA_LPI_STATUS                        0x2ccc
 255
 256#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
 257
 258/* Descriptor ring Macros */
 259#define MVNETA_QUEUE_NEXT_DESC(q, index)	\
 260	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
 261
 262/* Various constants */
 263
 264/* Coalescing */
 265#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
 266#define MVNETA_RX_COAL_PKTS		32
 267#define MVNETA_RX_COAL_USEC		100
 268
 269/* The two bytes Marvell header. Either contains a special value used
 270 * by Marvell switches when a specific hardware mode is enabled (not
 271 * supported by this driver) or is filled automatically by zeroes on
 272 * the RX side. Those two bytes being at the front of the Ethernet
 273 * header, they allow to have the IP header aligned on a 4 bytes
 274 * boundary automatically: the hardware skips those two bytes on its
 275 * own.
 276 */
 277#define MVNETA_MH_SIZE			2
 278
 279#define MVNETA_VLAN_TAG_LEN             4
 280
 281#define MVNETA_TX_CSUM_DEF_SIZE		1600
 282#define MVNETA_TX_CSUM_MAX_SIZE		9800
 283#define MVNETA_ACC_MODE_EXT1		1
 284#define MVNETA_ACC_MODE_EXT2		2
 285
 286#define MVNETA_MAX_DECODE_WIN		6
 287
 288/* Timeout constants */
 289#define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
 290#define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
 291#define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
 292
 293#define MVNETA_TX_MTU_MAX		0x3ffff
 294
 295/* The RSS lookup table actually has 256 entries but we do not use
 296 * them yet
 297 */
 298#define MVNETA_RSS_LU_TABLE_SIZE	1
 299
 300/* Max number of Rx descriptors */
 301#define MVNETA_MAX_RXD 512
 302
 303/* Max number of Tx descriptors */
 304#define MVNETA_MAX_TXD 1024
 305
 306/* Max number of allowed TCP segments for software TSO */
 307#define MVNETA_MAX_TSO_SEGS 100
 308
 309#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 310
 
 
 
 
 
 
 
 
 
 311/* descriptor aligned size */
 312#define MVNETA_DESC_ALIGNED_SIZE	32
 313
 314/* Number of bytes to be taken into account by HW when putting incoming data
 315 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 316 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 317 */
 318#define MVNETA_RX_PKT_OFFSET_CORRECTION		64
 319
 320#define MVNETA_RX_PKT_SIZE(mtu) \
 321	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
 322	      ETH_HLEN + ETH_FCS_LEN,			     \
 323	      cache_line_size())
 324
 325#define IS_TSO_HEADER(txq, addr) \
 326	((addr >= txq->tso_hdrs_phys) && \
 327	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
 
 
 328
 329#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 330	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 331
 332enum {
 333	ETHTOOL_STAT_EEE_WAKEUP,
 334	ETHTOOL_STAT_SKB_ALLOC_ERR,
 335	ETHTOOL_STAT_REFILL_ERR,
 
 
 
 
 
 
 
 336	ETHTOOL_MAX_STATS,
 337};
 338
 339struct mvneta_statistic {
 340	unsigned short offset;
 341	unsigned short type;
 342	const char name[ETH_GSTRING_LEN];
 343};
 344
 345#define T_REG_32	32
 346#define T_REG_64	64
 347#define T_SW		1
 348
 
 
 
 
 
 349static const struct mvneta_statistic mvneta_statistics[] = {
 350	{ 0x3000, T_REG_64, "good_octets_received", },
 351	{ 0x3010, T_REG_32, "good_frames_received", },
 352	{ 0x3008, T_REG_32, "bad_octets_received", },
 353	{ 0x3014, T_REG_32, "bad_frames_received", },
 354	{ 0x3018, T_REG_32, "broadcast_frames_received", },
 355	{ 0x301c, T_REG_32, "multicast_frames_received", },
 356	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
 357	{ 0x3058, T_REG_32, "good_fc_received", },
 358	{ 0x305c, T_REG_32, "bad_fc_received", },
 359	{ 0x3060, T_REG_32, "undersize_received", },
 360	{ 0x3064, T_REG_32, "fragments_received", },
 361	{ 0x3068, T_REG_32, "oversize_received", },
 362	{ 0x306c, T_REG_32, "jabber_received", },
 363	{ 0x3070, T_REG_32, "mac_receive_error", },
 364	{ 0x3074, T_REG_32, "bad_crc_event", },
 365	{ 0x3078, T_REG_32, "collision", },
 366	{ 0x307c, T_REG_32, "late_collision", },
 367	{ 0x2484, T_REG_32, "rx_discard", },
 368	{ 0x2488, T_REG_32, "rx_overrun", },
 369	{ 0x3020, T_REG_32, "frames_64_octets", },
 370	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
 371	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
 372	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
 373	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
 374	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
 375	{ 0x3038, T_REG_64, "good_octets_sent", },
 376	{ 0x3040, T_REG_32, "good_frames_sent", },
 377	{ 0x3044, T_REG_32, "excessive_collision", },
 378	{ 0x3048, T_REG_32, "multicast_frames_sent", },
 379	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
 380	{ 0x3054, T_REG_32, "fc_sent", },
 381	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
 382	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 383	{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
 384	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
 
 
 
 
 
 
 
 385};
 386
 387struct mvneta_pcpu_stats {
 388	struct	u64_stats_sync syncp;
 389	u64	rx_packets;
 390	u64	rx_bytes;
 391	u64	tx_packets;
 392	u64	tx_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 393};
 394
 395struct mvneta_pcpu_port {
 396	/* Pointer to the shared port */
 397	struct mvneta_port	*pp;
 398
 399	/* Pointer to the CPU-local NAPI struct */
 400	struct napi_struct	napi;
 401
 402	/* Cause of the previous interrupt */
 403	u32			cause_rx_tx;
 404};
 405
 
 
 
 
 406struct mvneta_port {
 407	u8 id;
 408	struct mvneta_pcpu_port __percpu	*ports;
 409	struct mvneta_pcpu_stats __percpu	*stats;
 410
 
 
 411	int pkt_size;
 412	void __iomem *base;
 413	struct mvneta_rx_queue *rxqs;
 414	struct mvneta_tx_queue *txqs;
 415	struct net_device *dev;
 416	struct hlist_node node_online;
 417	struct hlist_node node_dead;
 418	int rxq_def;
 419	/* Protect the access to the percpu interrupt registers,
 420	 * ensuring that the configuration remains coherent.
 421	 */
 422	spinlock_t lock;
 423	bool is_stopped;
 424
 425	u32 cause_rx_tx;
 426	struct napi_struct napi;
 427
 
 
 428	/* Core clock */
 429	struct clk *clk;
 430	/* AXI clock */
 431	struct clk *clk_bus;
 432	u8 mcast_count[256];
 433	u16 tx_ring_size;
 434	u16 rx_ring_size;
 435
 436	phy_interface_t phy_interface;
 437	struct device_node *dn;
 438	unsigned int tx_csum_limit;
 439	struct phylink *phylink;
 440	struct phylink_config phylink_config;
 
 441	struct phy *comphy;
 442
 443	struct mvneta_bm *bm_priv;
 444	struct mvneta_bm_pool *pool_long;
 445	struct mvneta_bm_pool *pool_short;
 446	int bm_win_id;
 447
 448	bool eee_enabled;
 449	bool eee_active;
 450	bool tx_lpi_enabled;
 451
 452	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 453
 454	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 455
 456	/* Flags for special SoC configurations */
 457	bool neta_armada3700;
 
 458	u16 rx_offset_correction;
 459	const struct mbus_dram_target_info *dram_target_info;
 460};
 461
 462/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 463 * layout of the transmit and reception DMA descriptors, and their
 464 * layout is therefore defined by the hardware design
 465 */
 466
 467#define MVNETA_TX_L3_OFF_SHIFT	0
 468#define MVNETA_TX_IP_HLEN_SHIFT	8
 469#define MVNETA_TX_L4_UDP	BIT(16)
 470#define MVNETA_TX_L3_IP6	BIT(17)
 471#define MVNETA_TXD_IP_CSUM	BIT(18)
 472#define MVNETA_TXD_Z_PAD	BIT(19)
 473#define MVNETA_TXD_L_DESC	BIT(20)
 474#define MVNETA_TXD_F_DESC	BIT(21)
 475#define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
 476				 MVNETA_TXD_L_DESC | \
 477				 MVNETA_TXD_F_DESC)
 478#define MVNETA_TX_L4_CSUM_FULL	BIT(30)
 479#define MVNETA_TX_L4_CSUM_NOT	BIT(31)
 480
 481#define MVNETA_RXD_ERR_CRC		0x0
 482#define MVNETA_RXD_BM_POOL_SHIFT	13
 483#define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
 484#define MVNETA_RXD_ERR_SUMMARY		BIT(16)
 485#define MVNETA_RXD_ERR_OVERRUN		BIT(17)
 486#define MVNETA_RXD_ERR_LEN		BIT(18)
 487#define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
 488#define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
 489#define MVNETA_RXD_L3_IP4		BIT(25)
 490#define MVNETA_RXD_LAST_DESC		BIT(26)
 491#define MVNETA_RXD_FIRST_DESC		BIT(27)
 492#define MVNETA_RXD_FIRST_LAST_DESC	(MVNETA_RXD_FIRST_DESC | \
 493					 MVNETA_RXD_LAST_DESC)
 494#define MVNETA_RXD_L4_CSUM_OK		BIT(30)
 495
 496#if defined(__LITTLE_ENDIAN)
 497struct mvneta_tx_desc {
 498	u32  command;		/* Options used by HW for packet transmitting.*/
 499	u16  reserved1;		/* csum_l4 (for future use)		*/
 500	u16  data_size;		/* Data size of transmitted packet in bytes */
 501	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
 502	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
 503	u32  reserved3[4];	/* Reserved - (for future use)		*/
 504};
 505
 506struct mvneta_rx_desc {
 507	u32  status;		/* Info about received packet		*/
 508	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
 509	u16  data_size;		/* Size of received packet in bytes	*/
 510
 511	u32  buf_phys_addr;	/* Physical address of the buffer	*/
 512	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
 513
 514	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
 515	u16  reserved3;		/* prefetch_cmd, for future use		*/
 516	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
 517
 518	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
 519	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
 520};
 521#else
 522struct mvneta_tx_desc {
 523	u16  data_size;		/* Data size of transmitted packet in bytes */
 524	u16  reserved1;		/* csum_l4 (for future use)		*/
 525	u32  command;		/* Options used by HW for packet transmitting.*/
 526	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
 527	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
 528	u32  reserved3[4];	/* Reserved - (for future use)		*/
 529};
 530
 531struct mvneta_rx_desc {
 532	u16  data_size;		/* Size of received packet in bytes	*/
 533	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
 534	u32  status;		/* Info about received packet		*/
 535
 536	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
 537	u32  buf_phys_addr;	/* Physical address of the buffer	*/
 538
 539	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
 540	u16  reserved3;		/* prefetch_cmd, for future use		*/
 541	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
 542
 543	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
 544	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
 545};
 546#endif
 547
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548struct mvneta_tx_queue {
 549	/* Number of this TX queue, in the range 0-7 */
 550	u8 id;
 551
 552	/* Number of TX DMA descriptors in the descriptor ring */
 553	int size;
 554
 555	/* Number of currently used TX DMA descriptor in the
 556	 * descriptor ring
 557	 */
 558	int count;
 559	int pending;
 560	int tx_stop_threshold;
 561	int tx_wake_threshold;
 562
 563	/* Array of transmitted skb */
 564	struct sk_buff **tx_skb;
 565
 566	/* Index of last TX DMA descriptor that was inserted */
 567	int txq_put_index;
 568
 569	/* Index of the TX DMA descriptor to be cleaned up */
 570	int txq_get_index;
 571
 572	u32 done_pkts_coal;
 573
 574	/* Virtual address of the TX DMA descriptors array */
 575	struct mvneta_tx_desc *descs;
 576
 577	/* DMA address of the TX DMA descriptors array */
 578	dma_addr_t descs_phys;
 579
 580	/* Index of the last TX DMA descriptor */
 581	int last_desc;
 582
 583	/* Index of the next TX DMA descriptor to process */
 584	int next_desc_to_proc;
 585
 586	/* DMA buffers for TSO headers */
 587	char *tso_hdrs;
 588
 589	/* DMA address of TSO headers */
 590	dma_addr_t tso_hdrs_phys;
 591
 592	/* Affinity mask for CPUs*/
 593	cpumask_t affinity_mask;
 594};
 595
 596struct mvneta_rx_queue {
 597	/* rx queue number, in the range 0-7 */
 598	u8 id;
 599
 600	/* num of rx descriptors in the rx descriptor ring */
 601	int size;
 602
 603	u32 pkts_coal;
 604	u32 time_coal;
 605
 
 
 
 
 606	/* Virtual address of the RX buffer */
 607	void  **buf_virt_addr;
 608
 609	/* Virtual address of the RX DMA descriptors array */
 610	struct mvneta_rx_desc *descs;
 611
 612	/* DMA address of the RX DMA descriptors array */
 613	dma_addr_t descs_phys;
 614
 615	/* Index of the last RX DMA descriptor */
 616	int last_desc;
 617
 618	/* Index of the next RX DMA descriptor to process */
 619	int next_desc_to_proc;
 620
 621	/* Index of first RX DMA descriptor to refill */
 622	int first_to_refill;
 623	u32 refill_num;
 624
 625	/* pointer to uncomplete skb buffer */
 626	struct sk_buff *skb;
 627	int left_size;
 628
 629	/* error counters */
 630	u32 skb_alloc_err;
 631	u32 refill_err;
 632};
 633
 634static enum cpuhp_state online_hpstate;
 635/* The hardware supports eight (8) rx queues, but we are only allowing
 636 * the first one to be used. Therefore, let's just allocate one queue.
 637 */
 638static int rxq_number = 8;
 639static int txq_number = 8;
 640
 641static int rxq_def;
 642
 643static int rx_copybreak __read_mostly = 256;
 644static int rx_header_size __read_mostly = 128;
 645
 646/* HW BM need that each port be identify by a unique ID */
 647static int global_port_id;
 648
 649#define MVNETA_DRIVER_NAME "mvneta"
 650#define MVNETA_DRIVER_VERSION "1.0"
 651
 652/* Utility/helper methods */
 653
 654/* Write helper method */
 655static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
 656{
 657	writel(data, pp->base + offset);
 658}
 659
 660/* Read helper method */
 661static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
 662{
 663	return readl(pp->base + offset);
 664}
 665
 666/* Increment txq get counter */
 667static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
 668{
 669	txq->txq_get_index++;
 670	if (txq->txq_get_index == txq->size)
 671		txq->txq_get_index = 0;
 672}
 673
 674/* Increment txq put counter */
 675static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
 676{
 677	txq->txq_put_index++;
 678	if (txq->txq_put_index == txq->size)
 679		txq->txq_put_index = 0;
 680}
 681
 682
 683/* Clear all MIB counters */
 684static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 685{
 686	int i;
 687	u32 dummy;
 688
 689	/* Perform dummy reads from MIB counters */
 690	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 691		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
 692	dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
 693	dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 694}
 695
 696/* Get System Network Statistics */
 697static void
 698mvneta_get_stats64(struct net_device *dev,
 699		   struct rtnl_link_stats64 *stats)
 700{
 701	struct mvneta_port *pp = netdev_priv(dev);
 702	unsigned int start;
 703	int cpu;
 704
 705	for_each_possible_cpu(cpu) {
 706		struct mvneta_pcpu_stats *cpu_stats;
 707		u64 rx_packets;
 708		u64 rx_bytes;
 
 
 709		u64 tx_packets;
 710		u64 tx_bytes;
 711
 712		cpu_stats = per_cpu_ptr(pp->stats, cpu);
 713		do {
 714			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
 715			rx_packets = cpu_stats->rx_packets;
 716			rx_bytes   = cpu_stats->rx_bytes;
 717			tx_packets = cpu_stats->tx_packets;
 718			tx_bytes   = cpu_stats->tx_bytes;
 719		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 
 
 720
 721		stats->rx_packets += rx_packets;
 722		stats->rx_bytes   += rx_bytes;
 
 
 723		stats->tx_packets += tx_packets;
 724		stats->tx_bytes   += tx_bytes;
 725	}
 726
 727	stats->rx_errors	= dev->stats.rx_errors;
 728	stats->rx_dropped	= dev->stats.rx_dropped;
 729
 730	stats->tx_dropped	= dev->stats.tx_dropped;
 731}
 732
 733/* Rx descriptors helper methods */
 734
 735/* Checks whether the RX descriptor having this status is both the first
 736 * and the last descriptor for the RX packet. Each RX packet is currently
 737 * received through a single RX descriptor, so not having each RX
 738 * descriptor with its first and last bits set is an error
 739 */
 740static int mvneta_rxq_desc_is_first_last(u32 status)
 741{
 742	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 743		MVNETA_RXD_FIRST_LAST_DESC;
 744}
 745
 746/* Add number of descriptors ready to receive new packets */
 747static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
 748					  struct mvneta_rx_queue *rxq,
 749					  int ndescs)
 750{
 751	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
 752	 * be added at once
 753	 */
 754	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
 755		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 756			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
 757			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 758		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
 759	}
 760
 761	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 762		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 763}
 764
 765/* Get number of RX descriptors occupied by received packets */
 766static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
 767					struct mvneta_rx_queue *rxq)
 768{
 769	u32 val;
 770
 771	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
 772	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
 773}
 774
 775/* Update num of rx desc called upon return from rx path or
 776 * from mvneta_rxq_drop_pkts().
 777 */
 778static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
 779				       struct mvneta_rx_queue *rxq,
 780				       int rx_done, int rx_filled)
 781{
 782	u32 val;
 783
 784	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
 785		val = rx_done |
 786		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
 787		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 788		return;
 789	}
 790
 791	/* Only 255 descriptors can be added at once */
 792	while ((rx_done > 0) || (rx_filled > 0)) {
 793		if (rx_done <= 0xff) {
 794			val = rx_done;
 795			rx_done = 0;
 796		} else {
 797			val = 0xff;
 798			rx_done -= 0xff;
 799		}
 800		if (rx_filled <= 0xff) {
 801			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 802			rx_filled = 0;
 803		} else {
 804			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 805			rx_filled -= 0xff;
 806		}
 807		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 808	}
 809}
 810
 811/* Get pointer to next RX descriptor to be processed by SW */
 812static struct mvneta_rx_desc *
 813mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 814{
 815	int rx_desc = rxq->next_desc_to_proc;
 816
 817	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
 818	prefetch(rxq->descs + rxq->next_desc_to_proc);
 819	return rxq->descs + rx_desc;
 820}
 821
 822/* Change maximum receive size of the port. */
 823static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
 824{
 825	u32 val;
 826
 827	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 828	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
 829	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
 830		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
 831	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 832}
 833
 834
 835/* Set rx queue offset */
 836static void mvneta_rxq_offset_set(struct mvneta_port *pp,
 837				  struct mvneta_rx_queue *rxq,
 838				  int offset)
 839{
 840	u32 val;
 841
 842	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 843	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
 844
 845	/* Offset is in */
 846	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
 847	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 848}
 849
 850
 851/* Tx descriptors helper methods */
 852
 853/* Update HW with number of TX descriptors to be sent */
 854static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
 855				     struct mvneta_tx_queue *txq,
 856				     int pend_desc)
 857{
 858	u32 val;
 859
 860	pend_desc += txq->pending;
 861
 862	/* Only 255 Tx descriptors can be added at once */
 863	do {
 864		val = min(pend_desc, 255);
 865		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 866		pend_desc -= val;
 867	} while (pend_desc > 0);
 868	txq->pending = 0;
 869}
 870
 871/* Get pointer to next TX descriptor to be processed (send) by HW */
 872static struct mvneta_tx_desc *
 873mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
 874{
 875	int tx_desc = txq->next_desc_to_proc;
 876
 877	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
 878	return txq->descs + tx_desc;
 879}
 880
 881/* Release the last allocated TX descriptor. Useful to handle DMA
 882 * mapping failures in the TX path.
 883 */
 884static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
 885{
 886	if (txq->next_desc_to_proc == 0)
 887		txq->next_desc_to_proc = txq->last_desc - 1;
 888	else
 889		txq->next_desc_to_proc--;
 890}
 891
 892/* Set rxq buf size */
 893static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
 894				    struct mvneta_rx_queue *rxq,
 895				    int buf_size)
 896{
 897	u32 val;
 898
 899	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
 900
 901	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
 902	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
 903
 904	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
 905}
 906
 907/* Disable buffer management (BM) */
 908static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
 909				  struct mvneta_rx_queue *rxq)
 910{
 911	u32 val;
 912
 913	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 914	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
 915	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 916}
 917
 918/* Enable buffer management (BM) */
 919static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
 920				 struct mvneta_rx_queue *rxq)
 921{
 922	u32 val;
 923
 924	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 925	val |= MVNETA_RXQ_HW_BUF_ALLOC;
 926	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 927}
 928
 929/* Notify HW about port's assignment of pool for bigger packets */
 930static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
 931				     struct mvneta_rx_queue *rxq)
 932{
 933	u32 val;
 934
 935	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 936	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
 937	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
 938
 939	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 940}
 941
 942/* Notify HW about port's assignment of pool for smaller packets */
 943static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
 944				      struct mvneta_rx_queue *rxq)
 945{
 946	u32 val;
 947
 948	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 949	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
 950	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
 951
 952	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 953}
 954
 955/* Set port's receive buffer size for assigned BM pool */
 956static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
 957					      int buf_size,
 958					      u8 pool_id)
 959{
 960	u32 val;
 961
 962	if (!IS_ALIGNED(buf_size, 8)) {
 963		dev_warn(pp->dev->dev.parent,
 964			 "illegal buf_size value %d, round to %d\n",
 965			 buf_size, ALIGN(buf_size, 8));
 966		buf_size = ALIGN(buf_size, 8);
 967	}
 968
 969	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
 970	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
 971	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
 972}
 973
 974/* Configure MBUS window in order to enable access BM internal SRAM */
 975static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
 976				  u8 target, u8 attr)
 977{
 978	u32 win_enable, win_protect;
 979	int i;
 980
 981	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
 982
 983	if (pp->bm_win_id < 0) {
 984		/* Find first not occupied window */
 985		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
 986			if (win_enable & (1 << i)) {
 987				pp->bm_win_id = i;
 988				break;
 989			}
 990		}
 991		if (i == MVNETA_MAX_DECODE_WIN)
 992			return -ENOMEM;
 993	} else {
 994		i = pp->bm_win_id;
 995	}
 996
 997	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
 998	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
 999
1000	if (i < 4)
1001		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1002
1003	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1004		    (attr << 8) | target);
1005
1006	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1007
1008	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1009	win_protect |= 3 << (2 * i);
1010	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1011
1012	win_enable &= ~(1 << i);
1013	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1014
1015	return 0;
1016}
1017
1018static  int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1019{
1020	u32 wsize;
1021	u8 target, attr;
1022	int err;
1023
1024	/* Get BM window information */
1025	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1026					 &target, &attr);
1027	if (err < 0)
1028		return err;
1029
1030	pp->bm_win_id = -1;
1031
1032	/* Open NETA -> BM window */
1033	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1034				     target, attr);
1035	if (err < 0) {
1036		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1037		return err;
1038	}
1039	return 0;
1040}
1041
1042/* Assign and initialize pools for port. In case of fail
1043 * buffer manager will remain disabled for current port.
1044 */
1045static int mvneta_bm_port_init(struct platform_device *pdev,
1046			       struct mvneta_port *pp)
1047{
1048	struct device_node *dn = pdev->dev.of_node;
1049	u32 long_pool_id, short_pool_id;
1050
1051	if (!pp->neta_armada3700) {
1052		int ret;
1053
1054		ret = mvneta_bm_port_mbus_init(pp);
1055		if (ret)
1056			return ret;
1057	}
1058
1059	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1060		netdev_info(pp->dev, "missing long pool id\n");
1061		return -EINVAL;
1062	}
1063
1064	/* Create port's long pool depending on mtu */
1065	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1066					   MVNETA_BM_LONG, pp->id,
1067					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1068	if (!pp->pool_long) {
1069		netdev_info(pp->dev, "fail to obtain long pool for port\n");
1070		return -ENOMEM;
1071	}
1072
1073	pp->pool_long->port_map |= 1 << pp->id;
1074
1075	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1076				   pp->pool_long->id);
1077
1078	/* If short pool id is not defined, assume using single pool */
1079	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1080		short_pool_id = long_pool_id;
1081
1082	/* Create port's short pool */
1083	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1084					    MVNETA_BM_SHORT, pp->id,
1085					    MVNETA_BM_SHORT_PKT_SIZE);
1086	if (!pp->pool_short) {
1087		netdev_info(pp->dev, "fail to obtain short pool for port\n");
1088		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1089		return -ENOMEM;
1090	}
1091
1092	if (short_pool_id != long_pool_id) {
1093		pp->pool_short->port_map |= 1 << pp->id;
1094		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1095					   pp->pool_short->id);
1096	}
1097
1098	return 0;
1099}
1100
1101/* Update settings of a pool for bigger packets */
1102static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1103{
1104	struct mvneta_bm_pool *bm_pool = pp->pool_long;
1105	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1106	int num;
1107
1108	/* Release all buffers from long pool */
1109	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1110	if (hwbm_pool->buf_num) {
1111		WARN(1, "cannot free all buffers in pool %d\n",
1112		     bm_pool->id);
1113		goto bm_mtu_err;
1114	}
1115
1116	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1117	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1118	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1119			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1120
1121	/* Fill entire long pool */
1122	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1123	if (num != hwbm_pool->size) {
1124		WARN(1, "pool %d: %d of %d allocated\n",
1125		     bm_pool->id, num, hwbm_pool->size);
1126		goto bm_mtu_err;
1127	}
1128	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1129
1130	return;
1131
1132bm_mtu_err:
1133	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1134	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1135
1136	pp->bm_priv = NULL;
 
1137	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1138	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1139}
1140
1141/* Start the Ethernet port RX and TX activity */
1142static void mvneta_port_up(struct mvneta_port *pp)
1143{
1144	int queue;
1145	u32 q_map;
1146
1147	/* Enable all initialized TXs. */
1148	q_map = 0;
1149	for (queue = 0; queue < txq_number; queue++) {
1150		struct mvneta_tx_queue *txq = &pp->txqs[queue];
1151		if (txq->descs)
1152			q_map |= (1 << queue);
1153	}
1154	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1155
1156	q_map = 0;
1157	/* Enable all initialized RXQs. */
1158	for (queue = 0; queue < rxq_number; queue++) {
1159		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1160
1161		if (rxq->descs)
1162			q_map |= (1 << queue);
1163	}
1164	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1165}
1166
1167/* Stop the Ethernet port activity */
1168static void mvneta_port_down(struct mvneta_port *pp)
1169{
1170	u32 val;
1171	int count;
1172
1173	/* Stop Rx port activity. Check port Rx activity. */
1174	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1175
1176	/* Issue stop command for active channels only */
1177	if (val != 0)
1178		mvreg_write(pp, MVNETA_RXQ_CMD,
1179			    val << MVNETA_RXQ_DISABLE_SHIFT);
1180
1181	/* Wait for all Rx activity to terminate. */
1182	count = 0;
1183	do {
1184		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1185			netdev_warn(pp->dev,
1186				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1187				    val);
1188			break;
1189		}
1190		mdelay(1);
1191
1192		val = mvreg_read(pp, MVNETA_RXQ_CMD);
1193	} while (val & MVNETA_RXQ_ENABLE_MASK);
1194
1195	/* Stop Tx port activity. Check port Tx activity. Issue stop
1196	 * command for active channels only
1197	 */
1198	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1199
1200	if (val != 0)
1201		mvreg_write(pp, MVNETA_TXQ_CMD,
1202			    (val << MVNETA_TXQ_DISABLE_SHIFT));
1203
1204	/* Wait for all Tx activity to terminate. */
1205	count = 0;
1206	do {
1207		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1208			netdev_warn(pp->dev,
1209				    "TIMEOUT for TX stopped status=0x%08x\n",
1210				    val);
1211			break;
1212		}
1213		mdelay(1);
1214
1215		/* Check TX Command reg that all Txqs are stopped */
1216		val = mvreg_read(pp, MVNETA_TXQ_CMD);
1217
1218	} while (val & MVNETA_TXQ_ENABLE_MASK);
1219
1220	/* Double check to verify that TX FIFO is empty */
1221	count = 0;
1222	do {
1223		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1224			netdev_warn(pp->dev,
1225				    "TX FIFO empty timeout status=0x%08x\n",
1226				    val);
1227			break;
1228		}
1229		mdelay(1);
1230
1231		val = mvreg_read(pp, MVNETA_PORT_STATUS);
1232	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1233		 (val & MVNETA_TX_IN_PRGRS));
1234
1235	udelay(200);
1236}
1237
1238/* Enable the port by setting the port enable bit of the MAC control register */
1239static void mvneta_port_enable(struct mvneta_port *pp)
1240{
1241	u32 val;
1242
1243	/* Enable port */
1244	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1245	val |= MVNETA_GMAC0_PORT_ENABLE;
1246	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1247}
1248
1249/* Disable the port and wait for about 200 usec before retuning */
1250static void mvneta_port_disable(struct mvneta_port *pp)
1251{
1252	u32 val;
1253
1254	/* Reset the Enable bit in the Serial Control Register */
1255	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1256	val &= ~MVNETA_GMAC0_PORT_ENABLE;
1257	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1258
1259	udelay(200);
1260}
1261
1262/* Multicast tables methods */
1263
1264/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1265static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1266{
1267	int offset;
1268	u32 val;
1269
1270	if (queue == -1) {
1271		val = 0;
1272	} else {
1273		val = 0x1 | (queue << 1);
1274		val |= (val << 24) | (val << 16) | (val << 8);
1275	}
1276
1277	for (offset = 0; offset <= 0xc; offset += 4)
1278		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1279}
1280
1281/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1282static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1283{
1284	int offset;
1285	u32 val;
1286
1287	if (queue == -1) {
1288		val = 0;
1289	} else {
1290		val = 0x1 | (queue << 1);
1291		val |= (val << 24) | (val << 16) | (val << 8);
1292	}
1293
1294	for (offset = 0; offset <= 0xfc; offset += 4)
1295		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1296
1297}
1298
1299/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1300static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1301{
1302	int offset;
1303	u32 val;
1304
1305	if (queue == -1) {
1306		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1307		val = 0;
1308	} else {
1309		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1310		val = 0x1 | (queue << 1);
1311		val |= (val << 24) | (val << 16) | (val << 8);
1312	}
1313
1314	for (offset = 0; offset <= 0xfc; offset += 4)
1315		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1316}
1317
1318static void mvneta_percpu_unmask_interrupt(void *arg)
1319{
1320	struct mvneta_port *pp = arg;
1321
1322	/* All the queue are unmasked, but actually only the ones
1323	 * mapped to this CPU will be unmasked
1324	 */
1325	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1326		    MVNETA_RX_INTR_MASK_ALL |
1327		    MVNETA_TX_INTR_MASK_ALL |
1328		    MVNETA_MISCINTR_INTR_MASK);
1329}
1330
1331static void mvneta_percpu_mask_interrupt(void *arg)
1332{
1333	struct mvneta_port *pp = arg;
1334
1335	/* All the queue are masked, but actually only the ones
1336	 * mapped to this CPU will be masked
1337	 */
1338	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1339	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1340	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1341}
1342
1343static void mvneta_percpu_clear_intr_cause(void *arg)
1344{
1345	struct mvneta_port *pp = arg;
1346
1347	/* All the queue are cleared, but actually only the ones
1348	 * mapped to this CPU will be cleared
1349	 */
1350	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1351	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1352	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1353}
1354
1355/* This method sets defaults to the NETA port:
1356 *	Clears interrupt Cause and Mask registers.
1357 *	Clears all MAC tables.
1358 *	Sets defaults to all registers.
1359 *	Resets RX and TX descriptor rings.
1360 *	Resets PHY.
1361 * This method can be called after mvneta_port_down() to return the port
1362 *	settings to defaults.
1363 */
1364static void mvneta_defaults_set(struct mvneta_port *pp)
1365{
1366	int cpu;
1367	int queue;
1368	u32 val;
1369	int max_cpu = num_present_cpus();
1370
1371	/* Clear all Cause registers */
1372	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1373
1374	/* Mask all interrupts */
1375	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1376	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1377
1378	/* Enable MBUS Retry bit16 */
1379	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1380
1381	/* Set CPU queue access map. CPUs are assigned to the RX and
1382	 * TX queues modulo their number. If there is only one TX
1383	 * queue then it is assigned to the CPU associated to the
1384	 * default RX queue.
1385	 */
1386	for_each_present_cpu(cpu) {
1387		int rxq_map = 0, txq_map = 0;
1388		int rxq, txq;
1389		if (!pp->neta_armada3700) {
1390			for (rxq = 0; rxq < rxq_number; rxq++)
1391				if ((rxq % max_cpu) == cpu)
1392					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1393
1394			for (txq = 0; txq < txq_number; txq++)
1395				if ((txq % max_cpu) == cpu)
1396					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1397
1398			/* With only one TX queue we configure a special case
1399			 * which will allow to get all the irq on a single
1400			 * CPU
1401			 */
1402			if (txq_number == 1)
1403				txq_map = (cpu == pp->rxq_def) ?
1404					MVNETA_CPU_TXQ_ACCESS(1) : 0;
1405
1406		} else {
1407			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1408			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1409		}
1410
1411		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1412	}
1413
1414	/* Reset RX and TX DMAs */
1415	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1416	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1417
1418	/* Disable Legacy WRR, Disable EJP, Release from reset */
1419	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1420	for (queue = 0; queue < txq_number; queue++) {
1421		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1422		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1423	}
1424
1425	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1426	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1427
1428	/* Set Port Acceleration Mode */
1429	if (pp->bm_priv)
1430		/* HW buffer management + legacy parser */
1431		val = MVNETA_ACC_MODE_EXT2;
1432	else
1433		/* SW buffer management + legacy parser */
1434		val = MVNETA_ACC_MODE_EXT1;
1435	mvreg_write(pp, MVNETA_ACC_MODE, val);
1436
1437	if (pp->bm_priv)
1438		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1439
1440	/* Update val of portCfg register accordingly with all RxQueue types */
1441	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1442	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1443
1444	val = 0;
1445	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1446	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1447
1448	/* Build PORT_SDMA_CONFIG_REG */
1449	val = 0;
1450
1451	/* Default burst size */
1452	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1453	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1454	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1455
1456#if defined(__BIG_ENDIAN)
1457	val |= MVNETA_DESC_SWAP;
1458#endif
1459
1460	/* Assign port SDMA configuration */
1461	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1462
1463	/* Disable PHY polling in hardware, since we're using the
1464	 * kernel phylib to do this.
1465	 */
1466	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1467	val &= ~MVNETA_PHY_POLLING_ENABLE;
1468	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1469
1470	mvneta_set_ucast_table(pp, -1);
1471	mvneta_set_special_mcast_table(pp, -1);
1472	mvneta_set_other_mcast_table(pp, -1);
1473
1474	/* Set port interrupt enable register - default enable all */
1475	mvreg_write(pp, MVNETA_INTR_ENABLE,
1476		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1477		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1478
1479	mvneta_mib_counters_clear(pp);
1480}
1481
1482/* Set max sizes for tx queues */
1483static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1484
1485{
1486	u32 val, size, mtu;
1487	int queue;
1488
1489	mtu = max_tx_size * 8;
1490	if (mtu > MVNETA_TX_MTU_MAX)
1491		mtu = MVNETA_TX_MTU_MAX;
1492
1493	/* Set MTU */
1494	val = mvreg_read(pp, MVNETA_TX_MTU);
1495	val &= ~MVNETA_TX_MTU_MAX;
1496	val |= mtu;
1497	mvreg_write(pp, MVNETA_TX_MTU, val);
1498
1499	/* TX token size and all TXQs token size must be larger that MTU */
1500	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1501
1502	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1503	if (size < mtu) {
1504		size = mtu;
1505		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1506		val |= size;
1507		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1508	}
1509	for (queue = 0; queue < txq_number; queue++) {
1510		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1511
1512		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1513		if (size < mtu) {
1514			size = mtu;
1515			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1516			val |= size;
1517			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1518		}
1519	}
1520}
1521
1522/* Set unicast address */
1523static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1524				  int queue)
1525{
1526	unsigned int unicast_reg;
1527	unsigned int tbl_offset;
1528	unsigned int reg_offset;
1529
1530	/* Locate the Unicast table entry */
1531	last_nibble = (0xf & last_nibble);
1532
1533	/* offset from unicast tbl base */
1534	tbl_offset = (last_nibble / 4) * 4;
1535
1536	/* offset within the above reg  */
1537	reg_offset = last_nibble % 4;
1538
1539	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1540
1541	if (queue == -1) {
1542		/* Clear accepts frame bit at specified unicast DA tbl entry */
1543		unicast_reg &= ~(0xff << (8 * reg_offset));
1544	} else {
1545		unicast_reg &= ~(0xff << (8 * reg_offset));
1546		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1547	}
1548
1549	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1550}
1551
1552/* Set mac address */
1553static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1554				int queue)
1555{
1556	unsigned int mac_h;
1557	unsigned int mac_l;
1558
1559	if (queue != -1) {
1560		mac_l = (addr[4] << 8) | (addr[5]);
1561		mac_h = (addr[0] << 24) | (addr[1] << 16) |
1562			(addr[2] << 8) | (addr[3] << 0);
1563
1564		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1565		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1566	}
1567
1568	/* Accept frames of this address */
1569	mvneta_set_ucast_addr(pp, addr[5], queue);
1570}
1571
1572/* Set the number of packets that will be received before RX interrupt
1573 * will be generated by HW.
1574 */
1575static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1576				    struct mvneta_rx_queue *rxq, u32 value)
1577{
1578	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1579		    value | MVNETA_RXQ_NON_OCCUPIED(0));
1580}
1581
1582/* Set the time delay in usec before RX interrupt will be generated by
1583 * HW.
1584 */
1585static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1586				    struct mvneta_rx_queue *rxq, u32 value)
1587{
1588	u32 val;
1589	unsigned long clk_rate;
1590
1591	clk_rate = clk_get_rate(pp->clk);
1592	val = (clk_rate / 1000000) * value;
1593
1594	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1595}
1596
1597/* Set threshold for TX_DONE pkts coalescing */
1598static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1599					 struct mvneta_tx_queue *txq, u32 value)
1600{
1601	u32 val;
1602
1603	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1604
1605	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1606	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1607
1608	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1609}
1610
1611/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1612static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1613				u32 phys_addr, void *virt_addr,
1614				struct mvneta_rx_queue *rxq)
1615{
1616	int i;
1617
1618	rx_desc->buf_phys_addr = phys_addr;
1619	i = rx_desc - rxq->descs;
1620	rxq->buf_virt_addr[i] = virt_addr;
1621}
1622
1623/* Decrement sent descriptors counter */
1624static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1625				     struct mvneta_tx_queue *txq,
1626				     int sent_desc)
1627{
1628	u32 val;
1629
1630	/* Only 255 TX descriptors can be updated at once */
1631	while (sent_desc > 0xff) {
1632		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1633		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1634		sent_desc = sent_desc - 0xff;
1635	}
1636
1637	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1638	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1639}
1640
1641/* Get number of TX descriptors already sent by HW */
1642static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1643					struct mvneta_tx_queue *txq)
1644{
1645	u32 val;
1646	int sent_desc;
1647
1648	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1649	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1650		MVNETA_TXQ_SENT_DESC_SHIFT;
1651
1652	return sent_desc;
1653}
1654
1655/* Get number of sent descriptors and decrement counter.
1656 *  The number of sent descriptors is returned.
1657 */
1658static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1659				     struct mvneta_tx_queue *txq)
1660{
1661	int sent_desc;
1662
1663	/* Get number of sent descriptors */
1664	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1665
1666	/* Decrement sent descriptors counter */
1667	if (sent_desc)
1668		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1669
1670	return sent_desc;
1671}
1672
1673/* Set TXQ descriptors fields relevant for CSUM calculation */
1674static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1675				int ip_hdr_len, int l4_proto)
1676{
1677	u32 command;
1678
1679	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1680	 * G_L4_chk, L4_type; required only for checksum
1681	 * calculation
1682	 */
1683	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1684	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1685
1686	if (l3_proto == htons(ETH_P_IP))
1687		command |= MVNETA_TXD_IP_CSUM;
1688	else
1689		command |= MVNETA_TX_L3_IP6;
1690
1691	if (l4_proto == IPPROTO_TCP)
1692		command |=  MVNETA_TX_L4_CSUM_FULL;
1693	else if (l4_proto == IPPROTO_UDP)
1694		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1695	else
1696		command |= MVNETA_TX_L4_CSUM_NOT;
1697
1698	return command;
1699}
1700
1701
1702/* Display more error info */
1703static void mvneta_rx_error(struct mvneta_port *pp,
1704			    struct mvneta_rx_desc *rx_desc)
1705{
 
1706	u32 status = rx_desc->status;
1707
 
 
 
 
 
1708	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1709	case MVNETA_RXD_ERR_CRC:
1710		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1711			   status, rx_desc->data_size);
1712		break;
1713	case MVNETA_RXD_ERR_OVERRUN:
1714		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1715			   status, rx_desc->data_size);
1716		break;
1717	case MVNETA_RXD_ERR_LEN:
1718		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1719			   status, rx_desc->data_size);
1720		break;
1721	case MVNETA_RXD_ERR_RESOURCE:
1722		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1723			   status, rx_desc->data_size);
1724		break;
1725	}
1726}
1727
1728/* Handle RX checksum offload based on the descriptor's status */
1729static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1730			   struct sk_buff *skb)
1731{
1732	if ((pp->dev->features & NETIF_F_RXCSUM) &&
1733	    (status & MVNETA_RXD_L3_IP4) &&
1734	    (status & MVNETA_RXD_L4_CSUM_OK)) {
1735		skb->csum = 0;
1736		skb->ip_summed = CHECKSUM_UNNECESSARY;
1737		return;
1738	}
1739
1740	skb->ip_summed = CHECKSUM_NONE;
1741}
1742
1743/* Return tx queue pointer (find last set bit) according to <cause> returned
1744 * form tx_done reg. <cause> must not be null. The return value is always a
1745 * valid queue for matching the first one found in <cause>.
1746 */
1747static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1748						     u32 cause)
1749{
1750	int queue = fls(cause) - 1;
1751
1752	return &pp->txqs[queue];
1753}
1754
1755/* Free tx queue skbuffs */
1756static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1757				 struct mvneta_tx_queue *txq, int num,
1758				 struct netdev_queue *nq)
1759{
1760	unsigned int bytes_compl = 0, pkts_compl = 0;
 
1761	int i;
1762
 
 
 
 
1763	for (i = 0; i < num; i++) {
 
1764		struct mvneta_tx_desc *tx_desc = txq->descs +
1765			txq->txq_get_index;
1766		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1767
1768		if (skb) {
1769			bytes_compl += skb->len;
1770			pkts_compl++;
1771		}
1772
1773		mvneta_txq_inc_get(txq);
1774
1775		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
 
1776			dma_unmap_single(pp->dev->dev.parent,
1777					 tx_desc->buf_phys_addr,
1778					 tx_desc->data_size, DMA_TO_DEVICE);
1779		if (!skb)
1780			continue;
1781		dev_kfree_skb_any(skb);
 
 
 
 
 
 
 
 
 
1782	}
 
 
 
1783
1784	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1785}
1786
1787/* Handle end of transmission */
1788static void mvneta_txq_done(struct mvneta_port *pp,
1789			   struct mvneta_tx_queue *txq)
1790{
1791	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1792	int tx_done;
1793
1794	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1795	if (!tx_done)
1796		return;
1797
1798	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1799
1800	txq->count -= tx_done;
1801
1802	if (netif_tx_queue_stopped(nq)) {
1803		if (txq->count <= txq->tx_wake_threshold)
1804			netif_tx_wake_queue(nq);
1805	}
1806}
1807
1808/* Refill processing for SW buffer management */
1809/* Allocate page per descriptor */
1810static int mvneta_rx_refill(struct mvneta_port *pp,
1811			    struct mvneta_rx_desc *rx_desc,
1812			    struct mvneta_rx_queue *rxq,
1813			    gfp_t gfp_mask)
1814{
1815	dma_addr_t phys_addr;
1816	struct page *page;
1817
1818	page = __dev_alloc_page(gfp_mask);
 
1819	if (!page)
1820		return -ENOMEM;
1821
1822	/* map page for use */
1823	phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
1824				 DMA_FROM_DEVICE);
1825	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1826		__free_page(page);
1827		return -ENOMEM;
1828	}
1829
1830	phys_addr += pp->rx_offset_correction;
1831	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1832	return 0;
1833}
1834
1835/* Handle tx checksum */
1836static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1837{
1838	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1839		int ip_hdr_len = 0;
1840		__be16 l3_proto = vlan_get_protocol(skb);
1841		u8 l4_proto;
1842
1843		if (l3_proto == htons(ETH_P_IP)) {
1844			struct iphdr *ip4h = ip_hdr(skb);
1845
1846			/* Calculate IPv4 checksum and L4 checksum */
1847			ip_hdr_len = ip4h->ihl;
1848			l4_proto = ip4h->protocol;
1849		} else if (l3_proto == htons(ETH_P_IPV6)) {
1850			struct ipv6hdr *ip6h = ipv6_hdr(skb);
1851
1852			/* Read l4_protocol from one of IPv6 extra headers */
1853			if (skb_network_header_len(skb) > 0)
1854				ip_hdr_len = (skb_network_header_len(skb) >> 2);
1855			l4_proto = ip6h->nexthdr;
1856		} else
1857			return MVNETA_TX_L4_CSUM_NOT;
1858
1859		return mvneta_txq_desc_csum(skb_network_offset(skb),
1860					    l3_proto, ip_hdr_len, l4_proto);
1861	}
1862
1863	return MVNETA_TX_L4_CSUM_NOT;
1864}
1865
1866/* Drop packets received by the RXQ and free buffers */
1867static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1868				 struct mvneta_rx_queue *rxq)
1869{
1870	int rx_done, i;
1871
1872	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1873	if (rx_done)
1874		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1875
1876	if (pp->bm_priv) {
1877		for (i = 0; i < rx_done; i++) {
1878			struct mvneta_rx_desc *rx_desc =
1879						  mvneta_rxq_next_desc_get(rxq);
1880			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1881			struct mvneta_bm_pool *bm_pool;
1882
1883			bm_pool = &pp->bm_priv->bm_pools[pool_id];
1884			/* Return dropped buffer to the pool */
1885			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1886					      rx_desc->buf_phys_addr);
1887		}
1888		return;
1889	}
1890
1891	for (i = 0; i < rxq->size; i++) {
1892		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1893		void *data = rxq->buf_virt_addr[i];
1894		if (!data || !(rx_desc->buf_phys_addr))
1895			continue;
1896
1897		dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1898			       PAGE_SIZE, DMA_FROM_DEVICE);
1899		__free_page(data);
1900	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1901}
1902
1903static inline
1904int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1905{
1906	struct mvneta_rx_desc *rx_desc;
1907	int curr_desc = rxq->first_to_refill;
1908	int i;
1909
1910	for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1911		rx_desc = rxq->descs + curr_desc;
1912		if (!(rx_desc->buf_phys_addr)) {
1913			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
 
 
1914				pr_err("Can't refill queue %d. Done %d from %d\n",
1915				       rxq->id, i, rxq->refill_num);
1916				rxq->refill_err++;
 
 
 
 
1917				break;
1918			}
1919		}
1920		curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
1921	}
1922	rxq->refill_num -= i;
1923	rxq->first_to_refill = curr_desc;
1924
1925	return i;
1926}
1927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1928/* Main rx processing when using software buffer management */
1929static int mvneta_rx_swbm(struct napi_struct *napi,
1930			  struct mvneta_port *pp, int budget,
1931			  struct mvneta_rx_queue *rxq)
1932{
 
1933	struct net_device *dev = pp->dev;
1934	int rx_todo, rx_proc;
1935	int refill = 0;
1936	u32 rcvd_pkts = 0;
1937	u32 rcvd_bytes = 0;
 
 
 
1938
1939	/* Get number of received packets */
1940	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
1941	rx_proc = 0;
 
1942
1943	/* Fairness NAPI loop */
1944	while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
1945		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1946		unsigned char *data;
 
1947		struct page *page;
1948		dma_addr_t phys_addr;
1949		u32 rx_status, index;
1950		int rx_bytes, skb_size, copy_size;
1951		int frag_num, frag_size, frag_offset;
1952
1953		index = rx_desc - rxq->descs;
1954		page = (struct page *)rxq->buf_virt_addr[index];
1955		data = page_address(page);
1956		/* Prefetch header */
1957		prefetch(data);
1958
1959		phys_addr = rx_desc->buf_phys_addr;
1960		rx_status = rx_desc->status;
1961		rx_proc++;
1962		rxq->refill_num++;
1963
1964		if (rx_status & MVNETA_RXD_FIRST_DESC) {
1965			/* Check errors only for FIRST descriptor */
1966			if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
1967				mvneta_rx_error(pp, rx_desc);
1968				dev->stats.rx_errors++;
1969				/* leave the descriptor untouched */
1970				continue;
1971			}
1972			rx_bytes = rx_desc->data_size -
1973				   (ETH_FCS_LEN + MVNETA_MH_SIZE);
1974
1975			/* Allocate small skb for each new packet */
1976			skb_size = max(rx_copybreak, rx_header_size);
1977			rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
1978			if (unlikely(!rxq->skb)) {
1979				netdev_err(dev,
1980					   "Can't allocate skb on queue %d\n",
1981					   rxq->id);
1982				dev->stats.rx_dropped++;
1983				rxq->skb_alloc_err++;
1984				continue;
1985			}
1986			copy_size = min(skb_size, rx_bytes);
1987
1988			/* Copy data from buffer to SKB, skip Marvell header */
1989			memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
1990			       copy_size);
1991			skb_put(rxq->skb, copy_size);
1992			rxq->left_size = rx_bytes - copy_size;
1993
1994			mvneta_rx_csum(pp, rx_status, rxq->skb);
1995			if (rxq->left_size == 0) {
1996				int size = copy_size + MVNETA_MH_SIZE;
1997
1998				dma_sync_single_range_for_cpu(dev->dev.parent,
1999							      phys_addr, 0,
2000							      size,
2001							      DMA_FROM_DEVICE);
2002
2003				/* leave the descriptor and buffer untouched */
2004			} else {
2005				/* refill descriptor with new buffer later */
2006				rx_desc->buf_phys_addr = 0;
2007
2008				frag_num = 0;
2009				frag_offset = copy_size + MVNETA_MH_SIZE;
2010				frag_size = min(rxq->left_size,
2011						(int)(PAGE_SIZE - frag_offset));
2012				skb_add_rx_frag(rxq->skb, frag_num, page,
2013						frag_offset, frag_size,
2014						PAGE_SIZE);
2015				dma_unmap_page(dev->dev.parent, phys_addr,
2016					       PAGE_SIZE, DMA_FROM_DEVICE);
2017				rxq->left_size -= frag_size;
2018			}
2019		} else {
2020			/* Middle or Last descriptor */
2021			if (unlikely(!rxq->skb)) {
2022				pr_debug("no skb for rx_status 0x%x\n",
2023					 rx_status);
2024				continue;
2025			}
2026			if (!rxq->left_size) {
2027				/* last descriptor has only FCS */
2028				/* and can be discarded */
2029				dma_sync_single_range_for_cpu(dev->dev.parent,
2030							      phys_addr, 0,
2031							      ETH_FCS_LEN,
2032							      DMA_FROM_DEVICE);
2033				/* leave the descriptor and buffer untouched */
2034			} else {
2035				/* refill descriptor with new buffer later */
2036				rx_desc->buf_phys_addr = 0;
2037
2038				frag_num = skb_shinfo(rxq->skb)->nr_frags;
2039				frag_offset = 0;
2040				frag_size = min(rxq->left_size,
2041						(int)(PAGE_SIZE - frag_offset));
2042				skb_add_rx_frag(rxq->skb, frag_num, page,
2043						frag_offset, frag_size,
2044						PAGE_SIZE);
2045
2046				dma_unmap_page(dev->dev.parent, phys_addr,
2047					       PAGE_SIZE, DMA_FROM_DEVICE);
2048
2049				rxq->left_size -= frag_size;
2050			}
2051		} /* Middle or Last descriptor */
2052
2053		if (!(rx_status & MVNETA_RXD_LAST_DESC))
2054			/* no last descriptor this time */
2055			continue;
2056
2057		if (rxq->left_size) {
2058			pr_err("get last desc, but left_size (%d) != 0\n",
2059			       rxq->left_size);
2060			dev_kfree_skb_any(rxq->skb);
2061			rxq->left_size = 0;
2062			rxq->skb = NULL;
2063			continue;
2064		}
2065		rcvd_pkts++;
2066		rcvd_bytes += rxq->skb->len;
2067
2068		/* Linux processing */
2069		rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2070
2071		napi_gro_receive(napi, rxq->skb);
 
2072
2073		/* clean uncomplete skb pointer in queue */
2074		rxq->skb = NULL;
2075		rxq->left_size = 0;
 
2076	}
2077
2078	if (rcvd_pkts) {
2079		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 
 
2080
2081		u64_stats_update_begin(&stats->syncp);
2082		stats->rx_packets += rcvd_pkts;
2083		stats->rx_bytes   += rcvd_bytes;
2084		u64_stats_update_end(&stats->syncp);
2085	}
2086
2087	/* return some buffers to hardware queue, one at a time is too slow */
2088	refill = mvneta_rx_refill_queue(pp, rxq);
2089
2090	/* Update rxq management counters */
2091	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2092
2093	return rcvd_pkts;
2094}
2095
2096/* Main rx processing when using hardware buffer management */
2097static int mvneta_rx_hwbm(struct napi_struct *napi,
2098			  struct mvneta_port *pp, int rx_todo,
2099			  struct mvneta_rx_queue *rxq)
2100{
2101	struct net_device *dev = pp->dev;
2102	int rx_done;
2103	u32 rcvd_pkts = 0;
2104	u32 rcvd_bytes = 0;
2105
2106	/* Get number of received packets */
2107	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2108
2109	if (rx_todo > rx_done)
2110		rx_todo = rx_done;
2111
2112	rx_done = 0;
2113
2114	/* Fairness NAPI loop */
2115	while (rx_done < rx_todo) {
2116		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2117		struct mvneta_bm_pool *bm_pool = NULL;
2118		struct sk_buff *skb;
2119		unsigned char *data;
2120		dma_addr_t phys_addr;
2121		u32 rx_status, frag_size;
2122		int rx_bytes, err;
2123		u8 pool_id;
2124
2125		rx_done++;
2126		rx_status = rx_desc->status;
2127		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2128		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2129		phys_addr = rx_desc->buf_phys_addr;
2130		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2131		bm_pool = &pp->bm_priv->bm_pools[pool_id];
2132
2133		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2134		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2135err_drop_frame_ret_pool:
2136			/* Return the buffer to the pool */
2137			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2138					      rx_desc->buf_phys_addr);
2139err_drop_frame:
2140			dev->stats.rx_errors++;
2141			mvneta_rx_error(pp, rx_desc);
2142			/* leave the descriptor untouched */
2143			continue;
2144		}
2145
2146		if (rx_bytes <= rx_copybreak) {
2147			/* better copy a small frame and not unmap the DMA region */
2148			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2149			if (unlikely(!skb))
2150				goto err_drop_frame_ret_pool;
2151
2152			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2153			                              rx_desc->buf_phys_addr,
2154			                              MVNETA_MH_SIZE + NET_SKB_PAD,
2155			                              rx_bytes,
2156			                              DMA_FROM_DEVICE);
2157			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2158				     rx_bytes);
2159
2160			skb->protocol = eth_type_trans(skb, dev);
2161			mvneta_rx_csum(pp, rx_status, skb);
2162			napi_gro_receive(napi, skb);
2163
2164			rcvd_pkts++;
2165			rcvd_bytes += rx_bytes;
2166
2167			/* Return the buffer to the pool */
2168			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2169					      rx_desc->buf_phys_addr);
2170
2171			/* leave the descriptor and buffer untouched */
2172			continue;
2173		}
2174
2175		/* Refill processing */
2176		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2177		if (err) {
 
 
2178			netdev_err(dev, "Linux processing - Can't refill\n");
2179			rxq->refill_err++;
 
 
 
 
 
2180			goto err_drop_frame_ret_pool;
2181		}
2182
2183		frag_size = bm_pool->hwbm_pool.frag_size;
2184
2185		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2186
2187		/* After refill old buffer has to be unmapped regardless
2188		 * the skb is successfully built or not.
2189		 */
2190		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2191				 bm_pool->buf_size, DMA_FROM_DEVICE);
2192		if (!skb)
2193			goto err_drop_frame;
2194
2195		rcvd_pkts++;
2196		rcvd_bytes += rx_bytes;
2197
2198		/* Linux processing */
2199		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2200		skb_put(skb, rx_bytes);
2201
2202		skb->protocol = eth_type_trans(skb, dev);
2203
2204		mvneta_rx_csum(pp, rx_status, skb);
2205
2206		napi_gro_receive(napi, skb);
2207	}
2208
2209	if (rcvd_pkts) {
2210		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2211
2212		u64_stats_update_begin(&stats->syncp);
2213		stats->rx_packets += rcvd_pkts;
2214		stats->rx_bytes   += rcvd_bytes;
2215		u64_stats_update_end(&stats->syncp);
2216	}
2217
2218	/* Update rxq management counters */
2219	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2220
2221	return rx_done;
2222}
2223
2224static inline void
2225mvneta_tso_put_hdr(struct sk_buff *skb,
2226		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227{
 
 
2228	struct mvneta_tx_desc *tx_desc;
2229	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 
 
 
 
2230
2231	txq->tx_skb[txq->txq_put_index] = NULL;
2232	tx_desc = mvneta_txq_next_desc_get(txq);
2233	tx_desc->data_size = hdr_len;
2234	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2235	tx_desc->command |= MVNETA_TXD_F_DESC;
2236	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2237				 txq->txq_put_index * TSO_HEADER_SIZE;
 
 
2238	mvneta_txq_inc_put(txq);
2239}
2240
2241static inline int
2242mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2243		    struct sk_buff *skb, char *data, int size,
2244		    bool last_tcp, bool is_last)
2245{
 
2246	struct mvneta_tx_desc *tx_desc;
2247
2248	tx_desc = mvneta_txq_next_desc_get(txq);
2249	tx_desc->data_size = size;
2250	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2251						size, DMA_TO_DEVICE);
2252	if (unlikely(dma_mapping_error(dev->dev.parent,
2253		     tx_desc->buf_phys_addr))) {
2254		mvneta_txq_desc_put(txq);
2255		return -ENOMEM;
2256	}
2257
2258	tx_desc->command = 0;
2259	txq->tx_skb[txq->txq_put_index] = NULL;
 
2260
2261	if (last_tcp) {
2262		/* last descriptor in the TCP packet */
2263		tx_desc->command = MVNETA_TXD_L_DESC;
2264
2265		/* last descriptor in SKB */
2266		if (is_last)
2267			txq->tx_skb[txq->txq_put_index] = skb;
2268	}
2269	mvneta_txq_inc_put(txq);
2270	return 0;
2271}
2272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2273static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2274			 struct mvneta_tx_queue *txq)
2275{
2276	int total_len, data_left;
2277	int desc_count = 0;
2278	struct mvneta_port *pp = netdev_priv(dev);
2279	struct tso_t tso;
2280	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2281	int i;
2282
2283	/* Count needed descriptors */
2284	if ((txq->count + tso_count_descs(skb)) >= txq->size)
2285		return 0;
2286
2287	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2288		pr_info("*** Is this even  possible???!?!?\n");
2289		return 0;
2290	}
2291
 
 
2292	/* Initialize the TSO handler, and prepare the first payload */
2293	tso_start(skb, &tso);
2294
2295	total_len = skb->len - hdr_len;
2296	while (total_len > 0) {
2297		char *hdr;
2298
2299		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2300		total_len -= data_left;
2301		desc_count++;
2302
2303		/* prepare packet headers: MAC + IP + TCP */
2304		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2305		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2306
2307		mvneta_tso_put_hdr(skb, pp, txq);
2308
2309		while (data_left > 0) {
2310			int size;
2311			desc_count++;
2312
2313			size = min_t(int, tso.size, data_left);
2314
2315			if (mvneta_tso_put_data(dev, txq, skb,
2316						 tso.data, size,
2317						 size == data_left,
2318						 total_len == 0))
2319				goto err_release;
2320			data_left -= size;
2321
2322			tso_build_data(skb, &tso, size);
2323		}
2324	}
2325
2326	return desc_count;
2327
2328err_release:
2329	/* Release all used data descriptors; header descriptors must not
2330	 * be DMA-unmapped.
2331	 */
2332	for (i = desc_count - 1; i >= 0; i--) {
2333		struct mvneta_tx_desc *tx_desc = txq->descs + i;
2334		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2335			dma_unmap_single(pp->dev->dev.parent,
2336					 tx_desc->buf_phys_addr,
2337					 tx_desc->data_size,
2338					 DMA_TO_DEVICE);
2339		mvneta_txq_desc_put(txq);
2340	}
2341	return 0;
2342}
2343
2344/* Handle tx fragmentation processing */
2345static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2346				  struct mvneta_tx_queue *txq)
2347{
2348	struct mvneta_tx_desc *tx_desc;
2349	int i, nr_frags = skb_shinfo(skb)->nr_frags;
 
2350
2351	for (i = 0; i < nr_frags; i++) {
 
2352		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2353		void *addr = skb_frag_address(frag);
2354
2355		tx_desc = mvneta_txq_next_desc_get(txq);
2356		tx_desc->data_size = skb_frag_size(frag);
2357
2358		tx_desc->buf_phys_addr =
2359			dma_map_single(pp->dev->dev.parent, addr,
2360				       tx_desc->data_size, DMA_TO_DEVICE);
2361
2362		if (dma_mapping_error(pp->dev->dev.parent,
2363				      tx_desc->buf_phys_addr)) {
2364			mvneta_txq_desc_put(txq);
2365			goto error;
2366		}
2367
2368		if (i == nr_frags - 1) {
2369			/* Last descriptor */
2370			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2371			txq->tx_skb[txq->txq_put_index] = skb;
2372		} else {
2373			/* Descriptor in the middle: Not First, Not Last */
2374			tx_desc->command = 0;
2375			txq->tx_skb[txq->txq_put_index] = NULL;
2376		}
 
2377		mvneta_txq_inc_put(txq);
2378	}
2379
2380	return 0;
2381
2382error:
2383	/* Release all descriptors that were used to map fragments of
2384	 * this packet, as well as the corresponding DMA mappings
2385	 */
2386	for (i = i - 1; i >= 0; i--) {
2387		tx_desc = txq->descs + i;
2388		dma_unmap_single(pp->dev->dev.parent,
2389				 tx_desc->buf_phys_addr,
2390				 tx_desc->data_size,
2391				 DMA_TO_DEVICE);
2392		mvneta_txq_desc_put(txq);
2393	}
2394
2395	return -ENOMEM;
2396}
2397
2398/* Main tx processing */
2399static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2400{
2401	struct mvneta_port *pp = netdev_priv(dev);
2402	u16 txq_id = skb_get_queue_mapping(skb);
2403	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
 
2404	struct mvneta_tx_desc *tx_desc;
2405	int len = skb->len;
2406	int frags = 0;
2407	u32 tx_cmd;
2408
2409	if (!netif_running(dev))
2410		goto out;
2411
2412	if (skb_is_gso(skb)) {
2413		frags = mvneta_tx_tso(skb, dev, txq);
2414		goto out;
2415	}
2416
2417	frags = skb_shinfo(skb)->nr_frags + 1;
2418
2419	/* Get a descriptor for the first part of the packet */
2420	tx_desc = mvneta_txq_next_desc_get(txq);
2421
2422	tx_cmd = mvneta_skb_tx_csum(pp, skb);
2423
2424	tx_desc->data_size = skb_headlen(skb);
2425
2426	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2427						tx_desc->data_size,
2428						DMA_TO_DEVICE);
2429	if (unlikely(dma_mapping_error(dev->dev.parent,
2430				       tx_desc->buf_phys_addr))) {
2431		mvneta_txq_desc_put(txq);
2432		frags = 0;
2433		goto out;
2434	}
2435
 
2436	if (frags == 1) {
2437		/* First and Last descriptor */
2438		tx_cmd |= MVNETA_TXD_FLZ_DESC;
2439		tx_desc->command = tx_cmd;
2440		txq->tx_skb[txq->txq_put_index] = skb;
2441		mvneta_txq_inc_put(txq);
2442	} else {
2443		/* First but not Last */
2444		tx_cmd |= MVNETA_TXD_F_DESC;
2445		txq->tx_skb[txq->txq_put_index] = NULL;
2446		mvneta_txq_inc_put(txq);
2447		tx_desc->command = tx_cmd;
2448		/* Continue with other skb fragments */
2449		if (mvneta_tx_frag_process(pp, skb, txq)) {
2450			dma_unmap_single(dev->dev.parent,
2451					 tx_desc->buf_phys_addr,
2452					 tx_desc->data_size,
2453					 DMA_TO_DEVICE);
2454			mvneta_txq_desc_put(txq);
2455			frags = 0;
2456			goto out;
2457		}
2458	}
2459
2460out:
2461	if (frags > 0) {
 
2462		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2463		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2464
2465		netdev_tx_sent_queue(nq, len);
2466
2467		txq->count += frags;
2468		if (txq->count >= txq->tx_stop_threshold)
2469			netif_tx_stop_queue(nq);
2470
2471		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2472		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2473			mvneta_txq_pend_desc_add(pp, txq, frags);
2474		else
2475			txq->pending += frags;
2476
2477		u64_stats_update_begin(&stats->syncp);
2478		stats->tx_packets++;
2479		stats->tx_bytes  += len;
2480		u64_stats_update_end(&stats->syncp);
2481	} else {
2482		dev->stats.tx_dropped++;
2483		dev_kfree_skb_any(skb);
2484	}
2485
2486	return NETDEV_TX_OK;
2487}
2488
2489
2490/* Free tx resources, when resetting a port */
2491static void mvneta_txq_done_force(struct mvneta_port *pp,
2492				  struct mvneta_tx_queue *txq)
2493
2494{
2495	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2496	int tx_done = txq->count;
2497
2498	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2499
2500	/* reset txq */
2501	txq->count = 0;
2502	txq->txq_put_index = 0;
2503	txq->txq_get_index = 0;
2504}
2505
2506/* Handle tx done - called in softirq context. The <cause_tx_done> argument
2507 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2508 */
2509static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2510{
2511	struct mvneta_tx_queue *txq;
2512	struct netdev_queue *nq;
2513	int cpu = smp_processor_id();
2514
2515	while (cause_tx_done) {
2516		txq = mvneta_tx_done_policy(pp, cause_tx_done);
2517
2518		nq = netdev_get_tx_queue(pp->dev, txq->id);
2519		__netif_tx_lock(nq, cpu);
2520
2521		if (txq->count)
2522			mvneta_txq_done(pp, txq);
2523
2524		__netif_tx_unlock(nq);
2525		cause_tx_done &= ~((1 << txq->id));
2526	}
2527}
2528
2529/* Compute crc8 of the specified address, using a unique algorithm ,
2530 * according to hw spec, different than generic crc8 algorithm
2531 */
2532static int mvneta_addr_crc(unsigned char *addr)
2533{
2534	int crc = 0;
2535	int i;
2536
2537	for (i = 0; i < ETH_ALEN; i++) {
2538		int j;
2539
2540		crc = (crc ^ addr[i]) << 8;
2541		for (j = 7; j >= 0; j--) {
2542			if (crc & (0x100 << j))
2543				crc ^= 0x107 << j;
2544		}
2545	}
2546
2547	return crc;
2548}
2549
2550/* This method controls the net device special MAC multicast support.
2551 * The Special Multicast Table for MAC addresses supports MAC of the form
2552 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2553 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2554 * Table entries in the DA-Filter table. This method set the Special
2555 * Multicast Table appropriate entry.
2556 */
2557static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2558					  unsigned char last_byte,
2559					  int queue)
2560{
2561	unsigned int smc_table_reg;
2562	unsigned int tbl_offset;
2563	unsigned int reg_offset;
2564
2565	/* Register offset from SMC table base    */
2566	tbl_offset = (last_byte / 4);
2567	/* Entry offset within the above reg */
2568	reg_offset = last_byte % 4;
2569
2570	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2571					+ tbl_offset * 4));
2572
2573	if (queue == -1)
2574		smc_table_reg &= ~(0xff << (8 * reg_offset));
2575	else {
2576		smc_table_reg &= ~(0xff << (8 * reg_offset));
2577		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2578	}
2579
2580	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2581		    smc_table_reg);
2582}
2583
2584/* This method controls the network device Other MAC multicast support.
2585 * The Other Multicast Table is used for multicast of another type.
2586 * A CRC-8 is used as an index to the Other Multicast Table entries
2587 * in the DA-Filter table.
2588 * The method gets the CRC-8 value from the calling routine and
2589 * sets the Other Multicast Table appropriate entry according to the
2590 * specified CRC-8 .
2591 */
2592static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2593					unsigned char crc8,
2594					int queue)
2595{
2596	unsigned int omc_table_reg;
2597	unsigned int tbl_offset;
2598	unsigned int reg_offset;
2599
2600	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2601	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
2602
2603	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2604
2605	if (queue == -1) {
2606		/* Clear accepts frame bit at specified Other DA table entry */
2607		omc_table_reg &= ~(0xff << (8 * reg_offset));
2608	} else {
2609		omc_table_reg &= ~(0xff << (8 * reg_offset));
2610		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2611	}
2612
2613	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2614}
2615
2616/* The network device supports multicast using two tables:
2617 *    1) Special Multicast Table for MAC addresses of the form
2618 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2619 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2620 *       Table entries in the DA-Filter table.
2621 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
2622 *       is used as an index to the Other Multicast Table entries in the
2623 *       DA-Filter table.
2624 */
2625static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2626				 int queue)
2627{
2628	unsigned char crc_result = 0;
2629
2630	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2631		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2632		return 0;
2633	}
2634
2635	crc_result = mvneta_addr_crc(p_addr);
2636	if (queue == -1) {
2637		if (pp->mcast_count[crc_result] == 0) {
2638			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2639				    crc_result);
2640			return -EINVAL;
2641		}
2642
2643		pp->mcast_count[crc_result]--;
2644		if (pp->mcast_count[crc_result] != 0) {
2645			netdev_info(pp->dev,
2646				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
2647				    pp->mcast_count[crc_result], crc_result);
2648			return -EINVAL;
2649		}
2650	} else
2651		pp->mcast_count[crc_result]++;
2652
2653	mvneta_set_other_mcast_addr(pp, crc_result, queue);
2654
2655	return 0;
2656}
2657
2658/* Configure Fitering mode of Ethernet port */
2659static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2660					  int is_promisc)
2661{
2662	u32 port_cfg_reg, val;
2663
2664	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2665
2666	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2667
2668	/* Set / Clear UPM bit in port configuration register */
2669	if (is_promisc) {
2670		/* Accept all Unicast addresses */
2671		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2672		val |= MVNETA_FORCE_UNI;
2673		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2674		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2675	} else {
2676		/* Reject all Unicast addresses */
2677		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2678		val &= ~MVNETA_FORCE_UNI;
2679	}
2680
2681	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2682	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2683}
2684
2685/* register unicast and multicast addresses */
2686static void mvneta_set_rx_mode(struct net_device *dev)
2687{
2688	struct mvneta_port *pp = netdev_priv(dev);
2689	struct netdev_hw_addr *ha;
2690
2691	if (dev->flags & IFF_PROMISC) {
2692		/* Accept all: Multicast + Unicast */
2693		mvneta_rx_unicast_promisc_set(pp, 1);
2694		mvneta_set_ucast_table(pp, pp->rxq_def);
2695		mvneta_set_special_mcast_table(pp, pp->rxq_def);
2696		mvneta_set_other_mcast_table(pp, pp->rxq_def);
2697	} else {
2698		/* Accept single Unicast */
2699		mvneta_rx_unicast_promisc_set(pp, 0);
2700		mvneta_set_ucast_table(pp, -1);
2701		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2702
2703		if (dev->flags & IFF_ALLMULTI) {
2704			/* Accept all multicast */
2705			mvneta_set_special_mcast_table(pp, pp->rxq_def);
2706			mvneta_set_other_mcast_table(pp, pp->rxq_def);
2707		} else {
2708			/* Accept only initialized multicast */
2709			mvneta_set_special_mcast_table(pp, -1);
2710			mvneta_set_other_mcast_table(pp, -1);
2711
2712			if (!netdev_mc_empty(dev)) {
2713				netdev_for_each_mc_addr(ha, dev) {
2714					mvneta_mcast_addr_set(pp, ha->addr,
2715							      pp->rxq_def);
2716				}
2717			}
2718		}
2719	}
2720}
2721
2722/* Interrupt handling - the callback for request_irq() */
2723static irqreturn_t mvneta_isr(int irq, void *dev_id)
2724{
2725	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2726
2727	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2728	napi_schedule(&pp->napi);
2729
2730	return IRQ_HANDLED;
2731}
2732
2733/* Interrupt handling - the callback for request_percpu_irq() */
2734static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2735{
2736	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2737
2738	disable_percpu_irq(port->pp->dev->irq);
2739	napi_schedule(&port->napi);
2740
2741	return IRQ_HANDLED;
2742}
2743
2744static void mvneta_link_change(struct mvneta_port *pp)
2745{
2746	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2747
2748	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
2749}
2750
2751/* NAPI handler
2752 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2753 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2754 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2755 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2756 * Each CPU has its own causeRxTx register
2757 */
2758static int mvneta_poll(struct napi_struct *napi, int budget)
2759{
2760	int rx_done = 0;
2761	u32 cause_rx_tx;
2762	int rx_queue;
2763	struct mvneta_port *pp = netdev_priv(napi->dev);
2764	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2765
2766	if (!netif_running(pp->dev)) {
2767		napi_complete(napi);
2768		return rx_done;
2769	}
2770
2771	/* Read cause register */
2772	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2773	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2774		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2775
2776		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2777
2778		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2779				  MVNETA_CAUSE_LINK_CHANGE))
2780			mvneta_link_change(pp);
2781	}
2782
2783	/* Release Tx descriptors */
2784	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2785		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2786		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2787	}
2788
2789	/* For the case where the last mvneta_poll did not process all
2790	 * RX packets
2791	 */
2792	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2793
2794	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2795		port->cause_rx_tx;
2796
 
2797	if (rx_queue) {
2798		rx_queue = rx_queue - 1;
2799		if (pp->bm_priv)
2800			rx_done = mvneta_rx_hwbm(napi, pp, budget,
2801						 &pp->rxqs[rx_queue]);
2802		else
2803			rx_done = mvneta_rx_swbm(napi, pp, budget,
2804						 &pp->rxqs[rx_queue]);
2805	}
2806
2807	if (rx_done < budget) {
2808		cause_rx_tx = 0;
2809		napi_complete_done(napi, rx_done);
2810
2811		if (pp->neta_armada3700) {
2812			unsigned long flags;
2813
2814			local_irq_save(flags);
2815			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2816				    MVNETA_RX_INTR_MASK(rxq_number) |
2817				    MVNETA_TX_INTR_MASK(txq_number) |
2818				    MVNETA_MISCINTR_INTR_MASK);
2819			local_irq_restore(flags);
2820		} else {
2821			enable_percpu_irq(pp->dev->irq, 0);
2822		}
2823	}
2824
2825	if (pp->neta_armada3700)
2826		pp->cause_rx_tx = cause_rx_tx;
2827	else
2828		port->cause_rx_tx = cause_rx_tx;
2829
2830	return rx_done;
2831}
2832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2833/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2834static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2835			   int num)
2836{
2837	int i;
 
 
 
 
2838
2839	for (i = 0; i < num; i++) {
2840		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2841		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
2842				     GFP_KERNEL) != 0) {
2843			netdev_err(pp->dev,
2844				   "%s:rxq %d, %d of %d buffs  filled\n",
2845				   __func__, rxq->id, i, num);
2846			break;
2847		}
2848	}
2849
2850	/* Add this number of RX descriptors as non occupied (ready to
2851	 * get packets)
2852	 */
2853	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2854
2855	return i;
2856}
2857
2858/* Free all packets pending transmit from all TXQs and reset TX port */
2859static void mvneta_tx_reset(struct mvneta_port *pp)
2860{
2861	int queue;
2862
2863	/* free the skb's in the tx ring */
2864	for (queue = 0; queue < txq_number; queue++)
2865		mvneta_txq_done_force(pp, &pp->txqs[queue]);
2866
2867	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2868	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2869}
2870
2871static void mvneta_rx_reset(struct mvneta_port *pp)
2872{
2873	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2874	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2875}
2876
2877/* Rx/Tx queue initialization/cleanup methods */
2878
2879static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2880			      struct mvneta_rx_queue *rxq)
2881{
2882	rxq->size = pp->rx_ring_size;
2883
2884	/* Allocate memory for RX descriptors */
2885	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2886					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2887					&rxq->descs_phys, GFP_KERNEL);
2888	if (!rxq->descs)
2889		return -ENOMEM;
2890
2891	rxq->last_desc = rxq->size - 1;
2892
2893	return 0;
2894}
2895
2896static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2897			       struct mvneta_rx_queue *rxq)
2898{
2899	/* Set Rx descriptors queue starting address */
2900	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2901	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2902
2903	/* Set coalescing pkts and time */
2904	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2905	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2906
2907	if (!pp->bm_priv) {
2908		/* Set Offset */
2909		mvneta_rxq_offset_set(pp, rxq, 0);
2910		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
2911					PAGE_SIZE :
2912					MVNETA_RX_BUF_SIZE(pp->pkt_size));
2913		mvneta_rxq_bm_disable(pp, rxq);
2914		mvneta_rxq_fill(pp, rxq, rxq->size);
2915	} else {
2916		/* Set Offset */
2917		mvneta_rxq_offset_set(pp, rxq,
2918				      NET_SKB_PAD - pp->rx_offset_correction);
2919
2920		mvneta_rxq_bm_enable(pp, rxq);
2921		/* Fill RXQ with buffers from RX pool */
2922		mvneta_rxq_long_pool_set(pp, rxq);
2923		mvneta_rxq_short_pool_set(pp, rxq);
2924		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2925	}
2926}
2927
2928/* Create a specified RX queue */
2929static int mvneta_rxq_init(struct mvneta_port *pp,
2930			   struct mvneta_rx_queue *rxq)
2931
2932{
2933	int ret;
2934
2935	ret = mvneta_rxq_sw_init(pp, rxq);
2936	if (ret < 0)
2937		return ret;
2938
2939	mvneta_rxq_hw_init(pp, rxq);
2940
2941	return 0;
2942}
2943
2944/* Cleanup Rx queue */
2945static void mvneta_rxq_deinit(struct mvneta_port *pp,
2946			      struct mvneta_rx_queue *rxq)
2947{
2948	mvneta_rxq_drop_pkts(pp, rxq);
2949
2950	if (rxq->skb)
2951		dev_kfree_skb_any(rxq->skb);
2952
2953	if (rxq->descs)
2954		dma_free_coherent(pp->dev->dev.parent,
2955				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2956				  rxq->descs,
2957				  rxq->descs_phys);
2958
2959	rxq->descs             = NULL;
2960	rxq->last_desc         = 0;
2961	rxq->next_desc_to_proc = 0;
2962	rxq->descs_phys        = 0;
2963	rxq->first_to_refill   = 0;
2964	rxq->refill_num        = 0;
2965	rxq->skb               = NULL;
2966	rxq->left_size         = 0;
2967}
2968
2969static int mvneta_txq_sw_init(struct mvneta_port *pp,
2970			      struct mvneta_tx_queue *txq)
2971{
2972	int cpu;
2973
2974	txq->size = pp->tx_ring_size;
2975
2976	/* A queue must always have room for at least one skb.
2977	 * Therefore, stop the queue when the free entries reaches
2978	 * the maximum number of descriptors per skb.
2979	 */
2980	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2981	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2982
2983	/* Allocate memory for TX descriptors */
2984	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2985					txq->size * MVNETA_DESC_ALIGNED_SIZE,
2986					&txq->descs_phys, GFP_KERNEL);
2987	if (!txq->descs)
2988		return -ENOMEM;
2989
2990	txq->last_desc = txq->size - 1;
2991
2992	txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
2993				    GFP_KERNEL);
2994	if (!txq->tx_skb) {
2995		dma_free_coherent(pp->dev->dev.parent,
2996				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2997				  txq->descs, txq->descs_phys);
2998		return -ENOMEM;
2999	}
3000
3001	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3002	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3003					   txq->size * TSO_HEADER_SIZE,
3004					   &txq->tso_hdrs_phys, GFP_KERNEL);
3005	if (!txq->tso_hdrs) {
3006		kfree(txq->tx_skb);
3007		dma_free_coherent(pp->dev->dev.parent,
3008				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
3009				  txq->descs, txq->descs_phys);
3010		return -ENOMEM;
3011	}
3012
3013	/* Setup XPS mapping */
3014	if (txq_number > 1)
 
 
3015		cpu = txq->id % num_present_cpus();
3016	else
3017		cpu = pp->rxq_def % num_present_cpus();
3018	cpumask_set_cpu(cpu, &txq->affinity_mask);
3019	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3020
3021	return 0;
3022}
3023
3024static void mvneta_txq_hw_init(struct mvneta_port *pp,
3025			       struct mvneta_tx_queue *txq)
3026{
3027	/* Set maximum bandwidth for enabled TXQs */
3028	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3029	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3030
3031	/* Set Tx descriptors queue starting address */
3032	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3033	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3034
3035	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3036}
3037
3038/* Create and initialize a tx queue */
3039static int mvneta_txq_init(struct mvneta_port *pp,
3040			   struct mvneta_tx_queue *txq)
3041{
3042	int ret;
3043
3044	ret = mvneta_txq_sw_init(pp, txq);
3045	if (ret < 0)
3046		return ret;
3047
3048	mvneta_txq_hw_init(pp, txq);
3049
3050	return 0;
3051}
3052
3053/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3054static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3055				 struct mvneta_tx_queue *txq)
3056{
3057	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3058
3059	kfree(txq->tx_skb);
3060
3061	if (txq->tso_hdrs)
3062		dma_free_coherent(pp->dev->dev.parent,
3063				  txq->size * TSO_HEADER_SIZE,
3064				  txq->tso_hdrs, txq->tso_hdrs_phys);
3065	if (txq->descs)
3066		dma_free_coherent(pp->dev->dev.parent,
3067				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
3068				  txq->descs, txq->descs_phys);
3069
3070	netdev_tx_reset_queue(nq);
3071
 
3072	txq->descs             = NULL;
3073	txq->last_desc         = 0;
3074	txq->next_desc_to_proc = 0;
3075	txq->descs_phys        = 0;
3076}
3077
3078static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3079				 struct mvneta_tx_queue *txq)
3080{
3081	/* Set minimum bandwidth for disabled TXQs */
3082	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3083	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3084
3085	/* Set Tx descriptors queue starting address and size */
3086	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3087	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3088}
3089
3090static void mvneta_txq_deinit(struct mvneta_port *pp,
3091			      struct mvneta_tx_queue *txq)
3092{
3093	mvneta_txq_sw_deinit(pp, txq);
3094	mvneta_txq_hw_deinit(pp, txq);
3095}
3096
3097/* Cleanup all Tx queues */
3098static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3099{
3100	int queue;
3101
3102	for (queue = 0; queue < txq_number; queue++)
3103		mvneta_txq_deinit(pp, &pp->txqs[queue]);
3104}
3105
3106/* Cleanup all Rx queues */
3107static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3108{
3109	int queue;
3110
3111	for (queue = 0; queue < rxq_number; queue++)
3112		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3113}
3114
3115
3116/* Init all Rx queues */
3117static int mvneta_setup_rxqs(struct mvneta_port *pp)
3118{
3119	int queue;
3120
3121	for (queue = 0; queue < rxq_number; queue++) {
3122		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3123
3124		if (err) {
3125			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3126				   __func__, queue);
3127			mvneta_cleanup_rxqs(pp);
3128			return err;
3129		}
3130	}
3131
3132	return 0;
3133}
3134
3135/* Init all tx queues */
3136static int mvneta_setup_txqs(struct mvneta_port *pp)
3137{
3138	int queue;
3139
3140	for (queue = 0; queue < txq_number; queue++) {
3141		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3142		if (err) {
3143			netdev_err(pp->dev, "%s: can't create txq=%d\n",
3144				   __func__, queue);
3145			mvneta_cleanup_txqs(pp);
3146			return err;
3147		}
3148	}
3149
3150	return 0;
3151}
3152
3153static int mvneta_comphy_init(struct mvneta_port *pp)
3154{
3155	int ret;
3156
3157	if (!pp->comphy)
3158		return 0;
3159
3160	ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
3161			       pp->phy_interface);
3162	if (ret)
3163		return ret;
3164
3165	return phy_power_on(pp->comphy);
3166}
3167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3168static void mvneta_start_dev(struct mvneta_port *pp)
3169{
3170	int cpu;
3171
3172	WARN_ON(mvneta_comphy_init(pp));
3173
3174	mvneta_max_rx_size_set(pp, pp->pkt_size);
3175	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3176
3177	/* start the Rx/Tx activity */
3178	mvneta_port_enable(pp);
3179
3180	if (!pp->neta_armada3700) {
3181		/* Enable polling on the port */
3182		for_each_online_cpu(cpu) {
3183			struct mvneta_pcpu_port *port =
3184				per_cpu_ptr(pp->ports, cpu);
3185
3186			napi_enable(&port->napi);
3187		}
3188	} else {
3189		napi_enable(&pp->napi);
3190	}
3191
3192	/* Unmask interrupts. It has to be done from each CPU */
3193	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3194
3195	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3196		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3197		    MVNETA_CAUSE_LINK_CHANGE);
3198
3199	phylink_start(pp->phylink);
 
 
 
 
3200	netif_tx_start_all_queues(pp->dev);
 
 
3201}
3202
3203static void mvneta_stop_dev(struct mvneta_port *pp)
3204{
3205	unsigned int cpu;
3206
 
 
 
 
 
3207	phylink_stop(pp->phylink);
3208
3209	if (!pp->neta_armada3700) {
3210		for_each_online_cpu(cpu) {
3211			struct mvneta_pcpu_port *port =
3212				per_cpu_ptr(pp->ports, cpu);
3213
3214			napi_disable(&port->napi);
3215		}
3216	} else {
3217		napi_disable(&pp->napi);
3218	}
3219
3220	netif_carrier_off(pp->dev);
3221
3222	mvneta_port_down(pp);
3223	netif_tx_stop_all_queues(pp->dev);
3224
3225	/* Stop the port activity */
3226	mvneta_port_disable(pp);
3227
3228	/* Clear all ethernet port interrupts */
3229	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3230
3231	/* Mask all ethernet port interrupts */
3232	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3233
3234	mvneta_tx_reset(pp);
3235	mvneta_rx_reset(pp);
3236
3237	WARN_ON(phy_power_off(pp->comphy));
3238}
3239
3240static void mvneta_percpu_enable(void *arg)
3241{
3242	struct mvneta_port *pp = arg;
3243
3244	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3245}
3246
3247static void mvneta_percpu_disable(void *arg)
3248{
3249	struct mvneta_port *pp = arg;
3250
3251	disable_percpu_irq(pp->dev->irq);
3252}
3253
3254/* Change the device mtu */
3255static int mvneta_change_mtu(struct net_device *dev, int mtu)
3256{
3257	struct mvneta_port *pp = netdev_priv(dev);
 
3258	int ret;
3259
3260	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3261		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3262			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3263		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3264	}
3265
 
 
 
 
 
 
 
 
3266	dev->mtu = mtu;
3267
3268	if (!netif_running(dev)) {
3269		if (pp->bm_priv)
3270			mvneta_bm_update_mtu(pp, mtu);
3271
3272		netdev_update_features(dev);
3273		return 0;
3274	}
3275
3276	/* The interface is running, so we have to force a
3277	 * reallocation of the queues
3278	 */
3279	mvneta_stop_dev(pp);
3280	on_each_cpu(mvneta_percpu_disable, pp, true);
3281
3282	mvneta_cleanup_txqs(pp);
3283	mvneta_cleanup_rxqs(pp);
3284
3285	if (pp->bm_priv)
3286		mvneta_bm_update_mtu(pp, mtu);
3287
3288	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3289
3290	ret = mvneta_setup_rxqs(pp);
3291	if (ret) {
3292		netdev_err(dev, "unable to setup rxqs after MTU change\n");
3293		return ret;
3294	}
3295
3296	ret = mvneta_setup_txqs(pp);
3297	if (ret) {
3298		netdev_err(dev, "unable to setup txqs after MTU change\n");
3299		return ret;
3300	}
3301
3302	on_each_cpu(mvneta_percpu_enable, pp, true);
3303	mvneta_start_dev(pp);
3304
3305	netdev_update_features(dev);
3306
3307	return 0;
3308}
3309
3310static netdev_features_t mvneta_fix_features(struct net_device *dev,
3311					     netdev_features_t features)
3312{
3313	struct mvneta_port *pp = netdev_priv(dev);
3314
3315	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3316		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3317		netdev_info(dev,
3318			    "Disable IP checksum for MTU greater than %dB\n",
3319			    pp->tx_csum_limit);
3320	}
3321
3322	return features;
3323}
3324
3325/* Get mac address */
3326static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3327{
3328	u32 mac_addr_l, mac_addr_h;
3329
3330	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3331	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3332	addr[0] = (mac_addr_h >> 24) & 0xFF;
3333	addr[1] = (mac_addr_h >> 16) & 0xFF;
3334	addr[2] = (mac_addr_h >> 8) & 0xFF;
3335	addr[3] = mac_addr_h & 0xFF;
3336	addr[4] = (mac_addr_l >> 8) & 0xFF;
3337	addr[5] = mac_addr_l & 0xFF;
3338}
3339
3340/* Handle setting mac address */
3341static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3342{
3343	struct mvneta_port *pp = netdev_priv(dev);
3344	struct sockaddr *sockaddr = addr;
3345	int ret;
3346
3347	ret = eth_prepare_mac_addr_change(dev, addr);
3348	if (ret < 0)
3349		return ret;
3350	/* Remove previous address table entry */
3351	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3352
3353	/* Set new addr in hw */
3354	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3355
3356	eth_commit_mac_addr_change(dev, addr);
3357	return 0;
3358}
3359
3360static void mvneta_validate(struct phylink_config *config,
3361			    unsigned long *supported,
3362			    struct phylink_link_state *state)
3363{
3364	struct net_device *ndev = to_net_dev(config->dev);
3365	struct mvneta_port *pp = netdev_priv(ndev);
3366	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3367
3368	/* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3369	if (state->interface != PHY_INTERFACE_MODE_NA &&
3370	    state->interface != PHY_INTERFACE_MODE_QSGMII &&
3371	    state->interface != PHY_INTERFACE_MODE_SGMII &&
3372	    !phy_interface_mode_is_8023z(state->interface) &&
3373	    !phy_interface_mode_is_rgmii(state->interface)) {
3374		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3375		return;
3376	}
 
 
 
3377
3378	/* Allow all the expected bits */
3379	phylink_set(mask, Autoneg);
3380	phylink_set_port_modes(mask);
3381
3382	/* Asymmetric pause is unsupported */
3383	phylink_set(mask, Pause);
3384
3385	/* Half-duplex at speeds higher than 100Mbit is unsupported */
3386	if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3387		phylink_set(mask, 1000baseT_Full);
3388		phylink_set(mask, 1000baseX_Full);
3389	}
3390	if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3391		phylink_set(mask, 2500baseT_Full);
3392		phylink_set(mask, 2500baseX_Full);
3393	}
3394
3395	if (!phy_interface_mode_is_8023z(state->interface)) {
3396		/* 10M and 100M are only supported in non-802.3z mode */
3397		phylink_set(mask, 10baseT_Half);
3398		phylink_set(mask, 10baseT_Full);
3399		phylink_set(mask, 100baseT_Half);
3400		phylink_set(mask, 100baseT_Full);
3401	}
3402
3403	bitmap_and(supported, supported, mask,
3404		   __ETHTOOL_LINK_MODE_MASK_NBITS);
3405	bitmap_and(state->advertising, state->advertising, mask,
3406		   __ETHTOOL_LINK_MODE_MASK_NBITS);
3407
3408	/* We can only operate at 2500BaseX or 1000BaseX.  If requested
3409	 * to advertise both, only report advertising at 2500BaseX.
3410	 */
3411	phylink_helper_basex_speed(state);
3412}
3413
3414static int mvneta_mac_link_state(struct phylink_config *config,
3415				 struct phylink_link_state *state)
3416{
3417	struct net_device *ndev = to_net_dev(config->dev);
3418	struct mvneta_port *pp = netdev_priv(ndev);
3419	u32 gmac_stat;
3420
3421	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3422
3423	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3424		state->speed =
3425			state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3426			SPEED_2500 : SPEED_1000;
3427	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3428		state->speed = SPEED_100;
3429	else
3430		state->speed = SPEED_10;
3431
3432	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3433	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3434	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3435
3436	state->pause = 0;
3437	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3438		state->pause |= MLO_PAUSE_RX;
3439	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3440		state->pause |= MLO_PAUSE_TX;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3441
3442	return 1;
 
3443}
3444
3445static void mvneta_mac_an_restart(struct phylink_config *config)
3446{
3447	struct net_device *ndev = to_net_dev(config->dev);
3448	struct mvneta_port *pp = netdev_priv(ndev);
3449	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3450
3451	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3452		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3453	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3454		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3455}
3456
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3457static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3458			      const struct phylink_link_state *state)
3459{
3460	struct net_device *ndev = to_net_dev(config->dev);
3461	struct mvneta_port *pp = netdev_priv(ndev);
3462	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3463	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3464	u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3465	u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3466	u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3467
3468	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3469	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3470				   MVNETA_GMAC2_PORT_RESET);
3471	new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3472	new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3473	new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3474			     MVNETA_GMAC_INBAND_RESTART_AN |
3475			     MVNETA_GMAC_CONFIG_MII_SPEED |
3476			     MVNETA_GMAC_CONFIG_GMII_SPEED |
3477			     MVNETA_GMAC_AN_SPEED_EN |
3478			     MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3479			     MVNETA_GMAC_CONFIG_FLOW_CTRL |
3480			     MVNETA_GMAC_AN_FLOW_CTRL_EN |
3481			     MVNETA_GMAC_CONFIG_FULL_DUPLEX |
3482			     MVNETA_GMAC_AN_DUPLEX_EN);
3483
3484	/* Even though it might look weird, when we're configured in
3485	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3486	 */
3487	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3488
3489	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3490	    state->interface == PHY_INTERFACE_MODE_SGMII ||
3491	    phy_interface_mode_is_8023z(state->interface))
3492		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3493
3494	if (phylink_test(state->advertising, Pause))
3495		new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3496	if (state->pause & MLO_PAUSE_TXRX_MASK)
3497		new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3498
3499	if (!phylink_autoneg_inband(mode)) {
3500		/* Phy or fixed speed */
3501		if (state->duplex)
3502			new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3503
3504		if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
3505			new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3506		else if (state->speed == SPEED_100)
3507			new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
3508	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3509		/* SGMII mode receives the state from the PHY */
3510		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3511		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3512		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3513				     MVNETA_GMAC_FORCE_LINK_PASS)) |
3514			 MVNETA_GMAC_INBAND_AN_ENABLE |
3515			 MVNETA_GMAC_AN_SPEED_EN |
3516			 MVNETA_GMAC_AN_DUPLEX_EN;
3517	} else {
3518		/* 802.3z negotiation - only 1000base-X */
3519		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3520		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3521		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3522				     MVNETA_GMAC_FORCE_LINK_PASS)) |
3523			 MVNETA_GMAC_INBAND_AN_ENABLE |
3524			 MVNETA_GMAC_CONFIG_GMII_SPEED |
3525			 /* The MAC only supports FD mode */
3526			 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3527
3528		if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3529			new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3530	}
3531
3532	/* Armada 370 documentation says we can only change the port mode
3533	 * and in-band enable when the link is down, so force it down
3534	 * while making these changes. We also do this for GMAC_CTRL2 */
3535	if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3536	    (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3537	    (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3538		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3539			    (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3540			    MVNETA_GMAC_FORCE_LINK_DOWN);
3541	}
3542
3543
3544	/* When at 2.5G, the link partner can send frames with shortened
3545	 * preambles.
3546	 */
3547	if (state->speed == SPEED_2500)
3548		new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3549
3550	if (pp->comphy && pp->phy_interface != state->interface &&
3551	    (state->interface == PHY_INTERFACE_MODE_SGMII ||
3552	     state->interface == PHY_INTERFACE_MODE_1000BASEX ||
3553	     state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
3554		pp->phy_interface = state->interface;
3555
3556		WARN_ON(phy_power_off(pp->comphy));
3557		WARN_ON(mvneta_comphy_init(pp));
3558	}
3559
3560	if (new_ctrl0 != gmac_ctrl0)
3561		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
3562	if (new_ctrl2 != gmac_ctrl2)
3563		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
3564	if (new_ctrl4 != gmac_ctrl4)
3565		mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
3566	if (new_clk != gmac_clk)
3567		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3568	if (new_an != gmac_an)
3569		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3570
3571	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3572		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3573			MVNETA_GMAC2_PORT_RESET) != 0)
3574			continue;
3575	}
3576}
3577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3578static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3579{
3580	u32 lpi_ctl1;
3581
3582	lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3583	if (enable)
3584		lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3585	else
3586		lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3587	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3588}
3589
3590static void mvneta_mac_link_down(struct phylink_config *config,
3591				 unsigned int mode, phy_interface_t interface)
3592{
3593	struct net_device *ndev = to_net_dev(config->dev);
3594	struct mvneta_port *pp = netdev_priv(ndev);
3595	u32 val;
3596
3597	mvneta_port_down(pp);
3598
3599	if (!phylink_autoneg_inband(mode)) {
3600		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3601		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3602		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3603		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3604	}
3605
3606	pp->eee_active = false;
3607	mvneta_set_eee(pp, false);
3608}
3609
3610static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
3611			       phy_interface_t interface,
3612			       struct phy_device *phy)
 
 
3613{
3614	struct net_device *ndev = to_net_dev(config->dev);
3615	struct mvneta_port *pp = netdev_priv(ndev);
3616	u32 val;
3617
3618	if (!phylink_autoneg_inband(mode)) {
3619		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3620		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
 
 
 
 
3621		val |= MVNETA_GMAC_FORCE_LINK_PASS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3622		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3623	}
3624
3625	mvneta_port_up(pp);
3626
3627	if (phy && pp->eee_enabled) {
3628		pp->eee_active = phy_init_eee(phy, 0) >= 0;
3629		mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
3630	}
3631}
3632
3633static const struct phylink_mac_ops mvneta_phylink_ops = {
3634	.validate = mvneta_validate,
3635	.mac_link_state = mvneta_mac_link_state,
3636	.mac_an_restart = mvneta_mac_an_restart,
3637	.mac_config = mvneta_mac_config,
 
3638	.mac_link_down = mvneta_mac_link_down,
3639	.mac_link_up = mvneta_mac_link_up,
3640};
3641
3642static int mvneta_mdio_probe(struct mvneta_port *pp)
3643{
3644	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3645	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
3646
3647	if (err)
3648		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
3649
3650	phylink_ethtool_get_wol(pp->phylink, &wol);
3651	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3652
 
 
 
 
3653	return err;
3654}
3655
3656static void mvneta_mdio_remove(struct mvneta_port *pp)
3657{
3658	phylink_disconnect_phy(pp->phylink);
3659}
3660
3661/* Electing a CPU must be done in an atomic way: it should be done
3662 * after or before the removal/insertion of a CPU and this function is
3663 * not reentrant.
3664 */
3665static void mvneta_percpu_elect(struct mvneta_port *pp)
3666{
3667	int elected_cpu = 0, max_cpu, cpu, i = 0;
3668
3669	/* Use the cpu associated to the rxq when it is online, in all
3670	 * the other cases, use the cpu 0 which can't be offline.
3671	 */
3672	if (cpu_online(pp->rxq_def))
3673		elected_cpu = pp->rxq_def;
3674
3675	max_cpu = num_present_cpus();
3676
3677	for_each_online_cpu(cpu) {
3678		int rxq_map = 0, txq_map = 0;
3679		int rxq;
3680
3681		for (rxq = 0; rxq < rxq_number; rxq++)
3682			if ((rxq % max_cpu) == cpu)
3683				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3684
3685		if (cpu == elected_cpu)
3686			/* Map the default receive queue queue to the
3687			 * elected CPU
3688			 */
3689			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3690
3691		/* We update the TX queue map only if we have one
3692		 * queue. In this case we associate the TX queue to
3693		 * the CPU bound to the default RX queue
3694		 */
3695		if (txq_number == 1)
3696			txq_map = (cpu == elected_cpu) ?
3697				MVNETA_CPU_TXQ_ACCESS(1) : 0;
3698		else
3699			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3700				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3701
3702		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3703
3704		/* Update the interrupt mask on each CPU according the
3705		 * new mapping
3706		 */
3707		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3708					 pp, true);
3709		i++;
3710
3711	}
3712};
3713
3714static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3715{
3716	int other_cpu;
3717	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3718						  node_online);
3719	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3720
 
 
 
 
 
3721
3722	spin_lock(&pp->lock);
3723	/*
3724	 * Configuring the driver for a new CPU while the driver is
3725	 * stopping is racy, so just avoid it.
3726	 */
3727	if (pp->is_stopped) {
3728		spin_unlock(&pp->lock);
3729		return 0;
3730	}
3731	netif_tx_stop_all_queues(pp->dev);
3732
3733	/*
3734	 * We have to synchronise on tha napi of each CPU except the one
3735	 * just being woken up
3736	 */
3737	for_each_online_cpu(other_cpu) {
3738		if (other_cpu != cpu) {
3739			struct mvneta_pcpu_port *other_port =
3740				per_cpu_ptr(pp->ports, other_cpu);
3741
3742			napi_synchronize(&other_port->napi);
3743		}
3744	}
3745
3746	/* Mask all ethernet port interrupts */
3747	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3748	napi_enable(&port->napi);
3749
3750	/*
3751	 * Enable per-CPU interrupts on the CPU that is
3752	 * brought up.
3753	 */
3754	mvneta_percpu_enable(pp);
3755
3756	/*
3757	 * Enable per-CPU interrupt on the one CPU we care
3758	 * about.
3759	 */
3760	mvneta_percpu_elect(pp);
3761
3762	/* Unmask all ethernet port interrupts */
3763	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3764	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3765		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3766		    MVNETA_CAUSE_LINK_CHANGE);
3767	netif_tx_start_all_queues(pp->dev);
3768	spin_unlock(&pp->lock);
3769	return 0;
3770}
3771
3772static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3773{
3774	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3775						  node_online);
3776	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3777
3778	/*
3779	 * Thanks to this lock we are sure that any pending cpu election is
3780	 * done.
3781	 */
3782	spin_lock(&pp->lock);
3783	/* Mask all ethernet port interrupts */
3784	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3785	spin_unlock(&pp->lock);
3786
3787	napi_synchronize(&port->napi);
3788	napi_disable(&port->napi);
3789	/* Disable per-CPU interrupts on the CPU that is brought down. */
3790	mvneta_percpu_disable(pp);
3791	return 0;
3792}
3793
3794static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3795{
3796	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3797						  node_dead);
3798
3799	/* Check if a new CPU must be elected now this on is down */
3800	spin_lock(&pp->lock);
3801	mvneta_percpu_elect(pp);
3802	spin_unlock(&pp->lock);
3803	/* Unmask all ethernet port interrupts */
3804	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3805	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3806		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3807		    MVNETA_CAUSE_LINK_CHANGE);
3808	netif_tx_start_all_queues(pp->dev);
3809	return 0;
3810}
3811
3812static int mvneta_open(struct net_device *dev)
3813{
3814	struct mvneta_port *pp = netdev_priv(dev);
3815	int ret;
3816
3817	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3818
3819	ret = mvneta_setup_rxqs(pp);
3820	if (ret)
3821		return ret;
3822
3823	ret = mvneta_setup_txqs(pp);
3824	if (ret)
3825		goto err_cleanup_rxqs;
3826
3827	/* Connect to port interrupt line */
3828	if (pp->neta_armada3700)
3829		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3830				  dev->name, pp);
3831	else
3832		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3833					 dev->name, pp->ports);
3834	if (ret) {
3835		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3836		goto err_cleanup_txqs;
3837	}
3838
3839	if (!pp->neta_armada3700) {
3840		/* Enable per-CPU interrupt on all the CPU to handle our RX
3841		 * queue interrupts
3842		 */
3843		on_each_cpu(mvneta_percpu_enable, pp, true);
3844
3845		pp->is_stopped = false;
3846		/* Register a CPU notifier to handle the case where our CPU
3847		 * might be taken offline.
3848		 */
3849		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3850						       &pp->node_online);
3851		if (ret)
3852			goto err_free_irq;
3853
3854		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3855						       &pp->node_dead);
3856		if (ret)
3857			goto err_free_online_hp;
3858	}
3859
3860	ret = mvneta_mdio_probe(pp);
3861	if (ret < 0) {
3862		netdev_err(dev, "cannot probe MDIO bus\n");
3863		goto err_free_dead_hp;
3864	}
3865
3866	mvneta_start_dev(pp);
3867
3868	return 0;
3869
3870err_free_dead_hp:
3871	if (!pp->neta_armada3700)
3872		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3873						    &pp->node_dead);
3874err_free_online_hp:
3875	if (!pp->neta_armada3700)
3876		cpuhp_state_remove_instance_nocalls(online_hpstate,
3877						    &pp->node_online);
3878err_free_irq:
3879	if (pp->neta_armada3700) {
3880		free_irq(pp->dev->irq, pp);
3881	} else {
3882		on_each_cpu(mvneta_percpu_disable, pp, true);
3883		free_percpu_irq(pp->dev->irq, pp->ports);
3884	}
3885err_cleanup_txqs:
3886	mvneta_cleanup_txqs(pp);
3887err_cleanup_rxqs:
3888	mvneta_cleanup_rxqs(pp);
3889	return ret;
3890}
3891
3892/* Stop the port, free port interrupt line */
3893static int mvneta_stop(struct net_device *dev)
3894{
3895	struct mvneta_port *pp = netdev_priv(dev);
3896
3897	if (!pp->neta_armada3700) {
3898		/* Inform that we are stopping so we don't want to setup the
3899		 * driver for new CPUs in the notifiers. The code of the
3900		 * notifier for CPU online is protected by the same spinlock,
3901		 * so when we get the lock, the notifer work is done.
3902		 */
3903		spin_lock(&pp->lock);
3904		pp->is_stopped = true;
3905		spin_unlock(&pp->lock);
3906
3907		mvneta_stop_dev(pp);
3908		mvneta_mdio_remove(pp);
3909
3910		cpuhp_state_remove_instance_nocalls(online_hpstate,
3911						    &pp->node_online);
3912		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3913						    &pp->node_dead);
3914		on_each_cpu(mvneta_percpu_disable, pp, true);
3915		free_percpu_irq(dev->irq, pp->ports);
3916	} else {
3917		mvneta_stop_dev(pp);
3918		mvneta_mdio_remove(pp);
3919		free_irq(dev->irq, pp);
3920	}
3921
3922	mvneta_cleanup_rxqs(pp);
3923	mvneta_cleanup_txqs(pp);
3924
3925	return 0;
3926}
3927
3928static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3929{
3930	struct mvneta_port *pp = netdev_priv(dev);
3931
3932	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
3933}
3934
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3935/* Ethtool methods */
3936
3937/* Set link ksettings (phy address, speed) for ethtools */
3938static int
3939mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3940				  const struct ethtool_link_ksettings *cmd)
3941{
3942	struct mvneta_port *pp = netdev_priv(ndev);
3943
3944	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
3945}
3946
3947/* Get link ksettings for ethtools */
3948static int
3949mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
3950				  struct ethtool_link_ksettings *cmd)
3951{
3952	struct mvneta_port *pp = netdev_priv(ndev);
3953
3954	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
3955}
3956
3957static int mvneta_ethtool_nway_reset(struct net_device *dev)
3958{
3959	struct mvneta_port *pp = netdev_priv(dev);
3960
3961	return phylink_ethtool_nway_reset(pp->phylink);
3962}
3963
3964/* Set interrupt coalescing for ethtools */
3965static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3966				       struct ethtool_coalesce *c)
 
 
 
3967{
3968	struct mvneta_port *pp = netdev_priv(dev);
3969	int queue;
3970
3971	for (queue = 0; queue < rxq_number; queue++) {
3972		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3973		rxq->time_coal = c->rx_coalesce_usecs;
3974		rxq->pkts_coal = c->rx_max_coalesced_frames;
3975		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3976		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3977	}
3978
3979	for (queue = 0; queue < txq_number; queue++) {
3980		struct mvneta_tx_queue *txq = &pp->txqs[queue];
3981		txq->done_pkts_coal = c->tx_max_coalesced_frames;
3982		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3983	}
3984
3985	return 0;
3986}
3987
3988/* get coalescing for ethtools */
3989static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3990				       struct ethtool_coalesce *c)
 
 
 
3991{
3992	struct mvneta_port *pp = netdev_priv(dev);
3993
3994	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
3995	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
3996
3997	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
3998	return 0;
3999}
4000
4001
4002static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4003				    struct ethtool_drvinfo *drvinfo)
4004{
4005	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4006		sizeof(drvinfo->driver));
4007	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4008		sizeof(drvinfo->version));
4009	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4010		sizeof(drvinfo->bus_info));
4011}
4012
4013
4014static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4015					 struct ethtool_ringparam *ring)
 
 
 
4016{
4017	struct mvneta_port *pp = netdev_priv(netdev);
4018
4019	ring->rx_max_pending = MVNETA_MAX_RXD;
4020	ring->tx_max_pending = MVNETA_MAX_TXD;
4021	ring->rx_pending = pp->rx_ring_size;
4022	ring->tx_pending = pp->tx_ring_size;
4023}
4024
4025static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4026					struct ethtool_ringparam *ring)
 
 
 
4027{
4028	struct mvneta_port *pp = netdev_priv(dev);
4029
4030	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4031		return -EINVAL;
4032	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4033		ring->rx_pending : MVNETA_MAX_RXD;
4034
4035	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4036				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4037	if (pp->tx_ring_size != ring->tx_pending)
4038		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4039			    pp->tx_ring_size, ring->tx_pending);
4040
4041	if (netif_running(dev)) {
4042		mvneta_stop(dev);
4043		if (mvneta_open(dev)) {
4044			netdev_err(dev,
4045				   "error on opening device after ring param change\n");
4046			return -ENOMEM;
4047		}
4048	}
4049
4050	return 0;
4051}
4052
4053static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4054					  struct ethtool_pauseparam *pause)
4055{
4056	struct mvneta_port *pp = netdev_priv(dev);
4057
4058	phylink_ethtool_get_pauseparam(pp->phylink, pause);
4059}
4060
4061static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4062					 struct ethtool_pauseparam *pause)
4063{
4064	struct mvneta_port *pp = netdev_priv(dev);
4065
4066	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4067}
4068
4069static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4070				       u8 *data)
4071{
4072	if (sset == ETH_SS_STATS) {
 
4073		int i;
4074
4075		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4076			memcpy(data + i * ETH_GSTRING_LEN,
4077			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4078	}
4079}
4080
4081static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4082{
 
4083	const struct mvneta_statistic *s;
4084	void __iomem *base = pp->base;
4085	u32 high, low;
4086	u64 val;
4087	int i;
4088
 
4089	for (i = 0, s = mvneta_statistics;
4090	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4091	     s++, i++) {
4092		val = 0;
4093
4094		switch (s->type) {
4095		case T_REG_32:
4096			val = readl_relaxed(base + s->offset);
 
4097			break;
4098		case T_REG_64:
4099			/* Docs say to read low 32-bit then high */
4100			low = readl_relaxed(base + s->offset);
4101			high = readl_relaxed(base + s->offset + 4);
4102			val = (u64)high << 32 | low;
 
4103			break;
4104		case T_SW:
4105			switch (s->offset) {
4106			case ETHTOOL_STAT_EEE_WAKEUP:
4107				val = phylink_get_eee_err(pp->phylink);
 
4108				break;
4109			case ETHTOOL_STAT_SKB_ALLOC_ERR:
4110				val = pp->rxqs[0].skb_alloc_err;
4111				break;
4112			case ETHTOOL_STAT_REFILL_ERR:
4113				val = pp->rxqs[0].refill_err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4114				break;
4115			}
4116			break;
4117		}
 
 
4118
4119		pp->ethtool_stats[i] += val;
 
 
 
 
 
 
 
4120	}
 
 
4121}
4122
4123static void mvneta_ethtool_get_stats(struct net_device *dev,
4124				     struct ethtool_stats *stats, u64 *data)
4125{
4126	struct mvneta_port *pp = netdev_priv(dev);
4127	int i;
4128
4129	mvneta_ethtool_update_stats(pp);
4130
4131	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4132		*data++ = pp->ethtool_stats[i];
 
 
 
4133}
4134
4135static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4136{
4137	if (sset == ETH_SS_STATS)
4138		return ARRAY_SIZE(mvneta_statistics);
 
 
 
 
 
 
 
 
4139	return -EOPNOTSUPP;
4140}
4141
4142static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4143{
4144	return MVNETA_RSS_LU_TABLE_SIZE;
4145}
4146
4147static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4148				    struct ethtool_rxnfc *info,
4149				    u32 *rules __always_unused)
4150{
4151	switch (info->cmd) {
4152	case ETHTOOL_GRXRINGS:
4153		info->data =  rxq_number;
4154		return 0;
4155	case ETHTOOL_GRXFH:
4156		return -EOPNOTSUPP;
4157	default:
4158		return -EOPNOTSUPP;
4159	}
4160}
4161
4162static int  mvneta_config_rss(struct mvneta_port *pp)
4163{
4164	int cpu;
4165	u32 val;
4166
4167	netif_tx_stop_all_queues(pp->dev);
4168
4169	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4170
4171	if (!pp->neta_armada3700) {
4172		/* We have to synchronise on the napi of each CPU */
4173		for_each_online_cpu(cpu) {
4174			struct mvneta_pcpu_port *pcpu_port =
4175				per_cpu_ptr(pp->ports, cpu);
4176
4177			napi_synchronize(&pcpu_port->napi);
4178			napi_disable(&pcpu_port->napi);
4179		}
4180	} else {
4181		napi_synchronize(&pp->napi);
4182		napi_disable(&pp->napi);
4183	}
4184
4185	pp->rxq_def = pp->indir[0];
4186
4187	/* Update unicast mapping */
4188	mvneta_set_rx_mode(pp->dev);
4189
4190	/* Update val of portCfg register accordingly with all RxQueue types */
4191	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4192	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4193
4194	/* Update the elected CPU matching the new rxq_def */
4195	spin_lock(&pp->lock);
4196	mvneta_percpu_elect(pp);
4197	spin_unlock(&pp->lock);
4198
4199	if (!pp->neta_armada3700) {
4200		/* We have to synchronise on the napi of each CPU */
4201		for_each_online_cpu(cpu) {
4202			struct mvneta_pcpu_port *pcpu_port =
4203				per_cpu_ptr(pp->ports, cpu);
4204
4205			napi_enable(&pcpu_port->napi);
4206		}
4207	} else {
4208		napi_enable(&pp->napi);
4209	}
4210
4211	netif_tx_start_all_queues(pp->dev);
4212
4213	return 0;
4214}
4215
4216static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4217				   const u8 *key, const u8 hfunc)
 
4218{
4219	struct mvneta_port *pp = netdev_priv(dev);
4220
4221	/* Current code for Armada 3700 doesn't support RSS features yet */
4222	if (pp->neta_armada3700)
4223		return -EOPNOTSUPP;
4224
4225	/* We require at least one supported parameter to be changed
4226	 * and no change in any of the unsupported parameters
4227	 */
4228	if (key ||
4229	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
 
4230		return -EOPNOTSUPP;
4231
4232	if (!indir)
4233		return 0;
4234
4235	memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4236
4237	return mvneta_config_rss(pp);
4238}
4239
4240static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4241				   u8 *hfunc)
4242{
4243	struct mvneta_port *pp = netdev_priv(dev);
4244
4245	/* Current code for Armada 3700 doesn't support RSS features yet */
4246	if (pp->neta_armada3700)
4247		return -EOPNOTSUPP;
4248
4249	if (hfunc)
4250		*hfunc = ETH_RSS_HASH_TOP;
4251
4252	if (!indir)
4253		return 0;
4254
4255	memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4256
4257	return 0;
4258}
4259
4260static void mvneta_ethtool_get_wol(struct net_device *dev,
4261				   struct ethtool_wolinfo *wol)
4262{
4263	struct mvneta_port *pp = netdev_priv(dev);
4264
4265	phylink_ethtool_get_wol(pp->phylink, wol);
4266}
4267
4268static int mvneta_ethtool_set_wol(struct net_device *dev,
4269				  struct ethtool_wolinfo *wol)
4270{
4271	struct mvneta_port *pp = netdev_priv(dev);
4272	int ret;
4273
4274	ret = phylink_ethtool_set_wol(pp->phylink, wol);
4275	if (!ret)
4276		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4277
4278	return ret;
4279}
4280
4281static int mvneta_ethtool_get_eee(struct net_device *dev,
4282				  struct ethtool_eee *eee)
4283{
4284	struct mvneta_port *pp = netdev_priv(dev);
4285	u32 lpi_ctl0;
4286
4287	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4288
4289	eee->eee_enabled = pp->eee_enabled;
4290	eee->eee_active = pp->eee_active;
4291	eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4292	eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4293
4294	return phylink_ethtool_get_eee(pp->phylink, eee);
4295}
4296
4297static int mvneta_ethtool_set_eee(struct net_device *dev,
4298				  struct ethtool_eee *eee)
4299{
4300	struct mvneta_port *pp = netdev_priv(dev);
4301	u32 lpi_ctl0;
4302
4303	/* The Armada 37x documents do not give limits for this other than
4304	 * it being an 8-bit register. */
 
4305	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4306		return -EINVAL;
4307
4308	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4309	lpi_ctl0 &= ~(0xff << 8);
4310	lpi_ctl0 |= eee->tx_lpi_timer << 8;
4311	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4312
4313	pp->eee_enabled = eee->eee_enabled;
4314	pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4315
4316	mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4317
4318	return phylink_ethtool_set_eee(pp->phylink, eee);
4319}
4320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4321static const struct net_device_ops mvneta_netdev_ops = {
4322	.ndo_open            = mvneta_open,
4323	.ndo_stop            = mvneta_stop,
4324	.ndo_start_xmit      = mvneta_tx,
4325	.ndo_set_rx_mode     = mvneta_set_rx_mode,
4326	.ndo_set_mac_address = mvneta_set_mac_addr,
4327	.ndo_change_mtu      = mvneta_change_mtu,
4328	.ndo_fix_features    = mvneta_fix_features,
4329	.ndo_get_stats64     = mvneta_get_stats64,
4330	.ndo_do_ioctl        = mvneta_ioctl,
 
 
 
4331};
4332
4333static const struct ethtool_ops mvneta_eth_tool_ops = {
 
 
4334	.nway_reset	= mvneta_ethtool_nway_reset,
4335	.get_link       = ethtool_op_get_link,
4336	.set_coalesce   = mvneta_ethtool_set_coalesce,
4337	.get_coalesce   = mvneta_ethtool_get_coalesce,
4338	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
4339	.get_ringparam  = mvneta_ethtool_get_ringparam,
4340	.set_ringparam	= mvneta_ethtool_set_ringparam,
4341	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
4342	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
4343	.get_strings	= mvneta_ethtool_get_strings,
4344	.get_ethtool_stats = mvneta_ethtool_get_stats,
4345	.get_sset_count	= mvneta_ethtool_get_sset_count,
4346	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4347	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
4348	.get_rxfh	= mvneta_ethtool_get_rxfh,
4349	.set_rxfh	= mvneta_ethtool_set_rxfh,
4350	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4351	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4352	.get_wol        = mvneta_ethtool_get_wol,
4353	.set_wol        = mvneta_ethtool_set_wol,
4354	.get_eee	= mvneta_ethtool_get_eee,
4355	.set_eee	= mvneta_ethtool_set_eee,
4356};
4357
4358/* Initialize hw */
4359static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4360{
4361	int queue;
4362
4363	/* Disable port */
4364	mvneta_port_disable(pp);
4365
4366	/* Set port default values */
4367	mvneta_defaults_set(pp);
4368
4369	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4370	if (!pp->txqs)
4371		return -ENOMEM;
4372
4373	/* Initialize TX descriptor rings */
4374	for (queue = 0; queue < txq_number; queue++) {
4375		struct mvneta_tx_queue *txq = &pp->txqs[queue];
4376		txq->id = queue;
4377		txq->size = pp->tx_ring_size;
4378		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4379	}
4380
4381	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4382	if (!pp->rxqs)
4383		return -ENOMEM;
4384
4385	/* Create Rx descriptor rings */
4386	for (queue = 0; queue < rxq_number; queue++) {
4387		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4388		rxq->id = queue;
4389		rxq->size = pp->rx_ring_size;
4390		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4391		rxq->time_coal = MVNETA_RX_COAL_USEC;
4392		rxq->buf_virt_addr
4393			= devm_kmalloc_array(pp->dev->dev.parent,
4394					     rxq->size,
4395					     sizeof(*rxq->buf_virt_addr),
4396					     GFP_KERNEL);
4397		if (!rxq->buf_virt_addr)
4398			return -ENOMEM;
4399	}
4400
4401	return 0;
4402}
4403
4404/* platform glue : initialize decoding windows */
4405static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4406				     const struct mbus_dram_target_info *dram)
4407{
4408	u32 win_enable;
4409	u32 win_protect;
4410	int i;
4411
4412	for (i = 0; i < 6; i++) {
4413		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4414		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4415
4416		if (i < 4)
4417			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4418	}
4419
4420	win_enable = 0x3f;
4421	win_protect = 0;
4422
4423	if (dram) {
4424		for (i = 0; i < dram->num_cs; i++) {
4425			const struct mbus_dram_window *cs = dram->cs + i;
4426
4427			mvreg_write(pp, MVNETA_WIN_BASE(i),
4428				    (cs->base & 0xffff0000) |
4429				    (cs->mbus_attr << 8) |
4430				    dram->mbus_dram_target_id);
4431
4432			mvreg_write(pp, MVNETA_WIN_SIZE(i),
4433				    (cs->size - 1) & 0xffff0000);
4434
4435			win_enable &= ~(1 << i);
4436			win_protect |= 3 << (2 * i);
4437		}
4438	} else {
 
 
 
 
4439		/* For Armada3700 open default 4GB Mbus window, leaving
4440		 * arbitration of target/attribute to a different layer
4441		 * of configuration.
4442		 */
4443		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4444		win_enable &= ~BIT(0);
4445		win_protect = 3;
4446	}
4447
4448	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4449	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4450}
4451
4452/* Power up the port */
4453static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4454{
4455	/* MAC Cause register should be cleared */
4456	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4457
4458	if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
4459		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4460	else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4461		 phy_interface_mode_is_8023z(phy_mode))
4462		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4463	else if (!phy_interface_mode_is_rgmii(phy_mode))
4464		return -EINVAL;
4465
4466	return 0;
4467}
4468
4469/* Device initialization routine */
4470static int mvneta_probe(struct platform_device *pdev)
4471{
4472	struct device_node *dn = pdev->dev.of_node;
4473	struct device_node *bm_node;
4474	struct mvneta_port *pp;
4475	struct net_device *dev;
4476	struct phylink *phylink;
4477	struct phy *comphy;
4478	const char *dt_mac_addr;
4479	char hw_mac_addr[ETH_ALEN];
 
4480	const char *mac_from;
4481	int tx_csum_limit;
4482	int phy_mode;
4483	int err;
4484	int cpu;
4485
4486	dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
4487				      txq_number, rxq_number);
4488	if (!dev)
4489		return -ENOMEM;
4490
4491	dev->irq = irq_of_parse_and_map(dn, 0);
4492	if (dev->irq == 0)
4493		return -EINVAL;
4494
4495	phy_mode = of_get_phy_mode(dn);
4496	if (phy_mode < 0) {
4497		dev_err(&pdev->dev, "incorrect phy-mode\n");
4498		err = -EINVAL;
4499		goto err_free_irq;
4500	}
4501
4502	comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
4503	if (comphy == ERR_PTR(-EPROBE_DEFER)) {
4504		err = -EPROBE_DEFER;
4505		goto err_free_irq;
4506	} else if (IS_ERR(comphy)) {
4507		comphy = NULL;
4508	}
4509
4510	pp = netdev_priv(dev);
4511	spin_lock_init(&pp->lock);
 
4512
4513	pp->phylink_config.dev = &dev->dev;
4514	pp->phylink_config.type = PHYLINK_NETDEV;
4515
4516	phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
4517				 phy_mode, &mvneta_phylink_ops);
4518	if (IS_ERR(phylink)) {
4519		err = PTR_ERR(phylink);
4520		goto err_free_irq;
4521	}
4522
4523	dev->tx_queue_len = MVNETA_MAX_TXD;
4524	dev->watchdog_timeo = 5 * HZ;
4525	dev->netdev_ops = &mvneta_netdev_ops;
 
 
4526
4527	dev->ethtool_ops = &mvneta_eth_tool_ops;
 
4528
4529	pp->phylink = phylink;
4530	pp->comphy = comphy;
4531	pp->phy_interface = phy_mode;
4532	pp->dn = dn;
4533
4534	pp->rxq_def = rxq_def;
4535	pp->indir[0] = rxq_def;
 
4536
4537	/* Get special SoC configurations */
4538	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4539		pp->neta_armada3700 = true;
 
 
 
 
 
 
 
 
4540
4541	pp->clk = devm_clk_get(&pdev->dev, "core");
4542	if (IS_ERR(pp->clk))
4543		pp->clk = devm_clk_get(&pdev->dev, NULL);
4544	if (IS_ERR(pp->clk)) {
4545		err = PTR_ERR(pp->clk);
4546		goto err_free_phylink;
4547	}
4548
4549	clk_prepare_enable(pp->clk);
4550
4551	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4552	if (!IS_ERR(pp->clk_bus))
4553		clk_prepare_enable(pp->clk_bus);
4554
4555	pp->base = devm_platform_ioremap_resource(pdev, 0);
4556	if (IS_ERR(pp->base)) {
4557		err = PTR_ERR(pp->base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4558		goto err_clk;
4559	}
4560
 
 
4561	/* Alloc per-cpu port structure */
4562	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4563	if (!pp->ports) {
4564		err = -ENOMEM;
4565		goto err_clk;
4566	}
4567
4568	/* Alloc per-cpu stats */
4569	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4570	if (!pp->stats) {
4571		err = -ENOMEM;
4572		goto err_free_ports;
4573	}
4574
4575	dt_mac_addr = of_get_mac_address(dn);
4576	if (!IS_ERR(dt_mac_addr)) {
4577		mac_from = "device tree";
4578		ether_addr_copy(dev->dev_addr, dt_mac_addr);
4579	} else {
4580		mvneta_get_mac_addr(pp, hw_mac_addr);
4581		if (is_valid_ether_addr(hw_mac_addr)) {
4582			mac_from = "hardware";
4583			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4584		} else {
4585			mac_from = "random";
4586			eth_hw_addr_random(dev);
4587		}
4588	}
4589
4590	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4591		if (tx_csum_limit < 0 ||
4592		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4593			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4594			dev_info(&pdev->dev,
4595				 "Wrong TX csum limit in DT, set to %dB\n",
4596				 MVNETA_TX_CSUM_DEF_SIZE);
4597		}
4598	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4599		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4600	} else {
4601		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4602	}
4603
4604	pp->tx_csum_limit = tx_csum_limit;
4605
4606	pp->dram_target_info = mv_mbus_dram_info();
4607	/* Armada3700 requires setting default configuration of Mbus
4608	 * windows, however without using filled mbus_dram_target_info
4609	 * structure.
4610	 */
4611	if (pp->dram_target_info || pp->neta_armada3700)
4612		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4613
4614	pp->tx_ring_size = MVNETA_MAX_TXD;
4615	pp->rx_ring_size = MVNETA_MAX_RXD;
4616
4617	pp->dev = dev;
4618	SET_NETDEV_DEV(dev, &pdev->dev);
4619
4620	pp->id = global_port_id++;
4621	pp->rx_offset_correction = 0; /* not relevant for SW BM */
4622
4623	/* Obtain access to BM resources if enabled and already initialized */
4624	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4625	if (bm_node) {
4626		pp->bm_priv = mvneta_bm_get(bm_node);
4627		if (pp->bm_priv) {
4628			err = mvneta_bm_port_init(pdev, pp);
4629			if (err < 0) {
4630				dev_info(&pdev->dev,
4631					 "use SW buffer management\n");
4632				mvneta_bm_put(pp->bm_priv);
4633				pp->bm_priv = NULL;
4634			}
4635		}
4636		/* Set RX packet offset correction for platforms, whose
4637		 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4638		 * platforms and 0B for 32-bit ones.
4639		 */
4640		pp->rx_offset_correction = max(0,
4641					       NET_SKB_PAD -
4642					       MVNETA_RX_PKT_OFFSET_CORRECTION);
4643	}
4644	of_node_put(bm_node);
4645
 
 
 
 
4646	err = mvneta_init(&pdev->dev, pp);
4647	if (err < 0)
4648		goto err_netdev;
4649
4650	err = mvneta_port_power_up(pp, phy_mode);
4651	if (err < 0) {
4652		dev_err(&pdev->dev, "can't power up port\n");
4653		goto err_netdev;
4654	}
4655
4656	/* Armada3700 network controller does not support per-cpu
4657	 * operation, so only single NAPI should be initialized.
4658	 */
4659	if (pp->neta_armada3700) {
4660		netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4661	} else {
4662		for_each_present_cpu(cpu) {
4663			struct mvneta_pcpu_port *port =
4664				per_cpu_ptr(pp->ports, cpu);
4665
4666			netif_napi_add(dev, &port->napi, mvneta_poll,
4667				       NAPI_POLL_WEIGHT);
4668			port->pp = pp;
4669		}
4670	}
4671
4672	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4673			NETIF_F_TSO | NETIF_F_RXCSUM;
4674	dev->hw_features |= dev->features;
4675	dev->vlan_features |= dev->features;
 
 
 
 
 
 
4676	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4677	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4678
4679	/* MTU range: 68 - 9676 */
4680	dev->min_mtu = ETH_MIN_MTU;
4681	/* 9676 == 9700 - 20 and rounding to 8 */
4682	dev->max_mtu = 9676;
4683
4684	err = register_netdev(dev);
4685	if (err < 0) {
4686		dev_err(&pdev->dev, "failed to register\n");
4687		goto err_netdev;
4688	}
4689
4690	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4691		    dev->dev_addr);
4692
4693	platform_set_drvdata(pdev, pp->dev);
4694
4695	return 0;
4696
4697err_netdev:
4698	if (pp->bm_priv) {
4699		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4700		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4701				       1 << pp->id);
4702		mvneta_bm_put(pp->bm_priv);
4703	}
4704	free_percpu(pp->stats);
4705err_free_ports:
4706	free_percpu(pp->ports);
 
 
 
4707err_clk:
4708	clk_disable_unprepare(pp->clk_bus);
4709	clk_disable_unprepare(pp->clk);
4710err_free_phylink:
4711	if (pp->phylink)
4712		phylink_destroy(pp->phylink);
4713err_free_irq:
4714	irq_dispose_mapping(dev->irq);
4715	return err;
4716}
4717
4718/* Device removal routine */
4719static int mvneta_remove(struct platform_device *pdev)
4720{
4721	struct net_device  *dev = platform_get_drvdata(pdev);
4722	struct mvneta_port *pp = netdev_priv(dev);
4723
4724	unregister_netdev(dev);
4725	clk_disable_unprepare(pp->clk_bus);
4726	clk_disable_unprepare(pp->clk);
4727	free_percpu(pp->ports);
4728	free_percpu(pp->stats);
4729	irq_dispose_mapping(dev->irq);
4730	phylink_destroy(pp->phylink);
4731
4732	if (pp->bm_priv) {
4733		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4734		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4735				       1 << pp->id);
4736		mvneta_bm_put(pp->bm_priv);
4737	}
4738
4739	return 0;
4740}
4741
4742#ifdef CONFIG_PM_SLEEP
4743static int mvneta_suspend(struct device *device)
4744{
4745	int queue;
4746	struct net_device *dev = dev_get_drvdata(device);
4747	struct mvneta_port *pp = netdev_priv(dev);
4748
4749	if (!netif_running(dev))
4750		goto clean_exit;
4751
4752	if (!pp->neta_armada3700) {
4753		spin_lock(&pp->lock);
4754		pp->is_stopped = true;
4755		spin_unlock(&pp->lock);
4756
4757		cpuhp_state_remove_instance_nocalls(online_hpstate,
4758						    &pp->node_online);
4759		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4760						    &pp->node_dead);
4761	}
4762
4763	rtnl_lock();
4764	mvneta_stop_dev(pp);
4765	rtnl_unlock();
4766
4767	for (queue = 0; queue < rxq_number; queue++) {
4768		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4769
4770		mvneta_rxq_drop_pkts(pp, rxq);
4771	}
4772
4773	for (queue = 0; queue < txq_number; queue++) {
4774		struct mvneta_tx_queue *txq = &pp->txqs[queue];
4775
4776		mvneta_txq_hw_deinit(pp, txq);
4777	}
4778
4779clean_exit:
4780	netif_device_detach(dev);
4781	clk_disable_unprepare(pp->clk_bus);
4782	clk_disable_unprepare(pp->clk);
4783
4784	return 0;
4785}
4786
4787static int mvneta_resume(struct device *device)
4788{
4789	struct platform_device *pdev = to_platform_device(device);
4790	struct net_device *dev = dev_get_drvdata(device);
4791	struct mvneta_port *pp = netdev_priv(dev);
4792	int err, queue;
4793
4794	clk_prepare_enable(pp->clk);
4795	if (!IS_ERR(pp->clk_bus))
4796		clk_prepare_enable(pp->clk_bus);
4797	if (pp->dram_target_info || pp->neta_armada3700)
4798		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4799	if (pp->bm_priv) {
4800		err = mvneta_bm_port_init(pdev, pp);
4801		if (err < 0) {
4802			dev_info(&pdev->dev, "use SW buffer management\n");
 
4803			pp->bm_priv = NULL;
4804		}
4805	}
4806	mvneta_defaults_set(pp);
4807	err = mvneta_port_power_up(pp, pp->phy_interface);
4808	if (err < 0) {
4809		dev_err(device, "can't power up port\n");
4810		return err;
4811	}
4812
4813	netif_device_attach(dev);
4814
4815	if (!netif_running(dev))
4816		return 0;
4817
4818	for (queue = 0; queue < rxq_number; queue++) {
4819		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4820
4821		rxq->next_desc_to_proc = 0;
4822		mvneta_rxq_hw_init(pp, rxq);
4823	}
4824
4825	for (queue = 0; queue < txq_number; queue++) {
4826		struct mvneta_tx_queue *txq = &pp->txqs[queue];
4827
4828		txq->next_desc_to_proc = 0;
4829		mvneta_txq_hw_init(pp, txq);
4830	}
4831
4832	if (!pp->neta_armada3700) {
4833		spin_lock(&pp->lock);
4834		pp->is_stopped = false;
4835		spin_unlock(&pp->lock);
4836		cpuhp_state_add_instance_nocalls(online_hpstate,
4837						 &pp->node_online);
4838		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4839						 &pp->node_dead);
4840	}
4841
4842	rtnl_lock();
4843	mvneta_start_dev(pp);
4844	rtnl_unlock();
4845	mvneta_set_rx_mode(dev);
4846
4847	return 0;
4848}
4849#endif
4850
4851static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4852
4853static const struct of_device_id mvneta_match[] = {
4854	{ .compatible = "marvell,armada-370-neta" },
4855	{ .compatible = "marvell,armada-xp-neta" },
4856	{ .compatible = "marvell,armada-3700-neta" },
 
4857	{ }
4858};
4859MODULE_DEVICE_TABLE(of, mvneta_match);
4860
4861static struct platform_driver mvneta_driver = {
4862	.probe = mvneta_probe,
4863	.remove = mvneta_remove,
4864	.driver = {
4865		.name = MVNETA_DRIVER_NAME,
4866		.of_match_table = mvneta_match,
4867		.pm = &mvneta_pm_ops,
4868	},
4869};
4870
4871static int __init mvneta_driver_init(void)
4872{
4873	int ret;
4874
4875	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
 
 
4876				      mvneta_cpu_online,
4877				      mvneta_cpu_down_prepare);
4878	if (ret < 0)
4879		goto out;
4880	online_hpstate = ret;
4881	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4882				      NULL, mvneta_cpu_dead);
4883	if (ret)
4884		goto err_dead;
4885
4886	ret = platform_driver_register(&mvneta_driver);
4887	if (ret)
4888		goto err;
4889	return 0;
4890
4891err:
4892	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4893err_dead:
4894	cpuhp_remove_multi_state(online_hpstate);
4895out:
4896	return ret;
4897}
4898module_init(mvneta_driver_init);
4899
4900static void __exit mvneta_driver_exit(void)
4901{
4902	platform_driver_unregister(&mvneta_driver);
4903	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4904	cpuhp_remove_multi_state(online_hpstate);
4905}
4906module_exit(mvneta_driver_exit);
4907
4908MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4909MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4910MODULE_LICENSE("GPL");
4911
4912module_param(rxq_number, int, 0444);
4913module_param(txq_number, int, 0444);
4914
4915module_param(rxq_def, int, 0444);
4916module_param(rx_copybreak, int, 0644);