Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
   3 *
   4 * Copyright (C) 2014 Marvell
   5 *
   6 * Marcin Wojtas <mw@semihalf.com>
   7 *
   8 * This file is licensed under the terms of the GNU General Public
   9 * License version 2. This program is licensed "as is" without any
  10 * warranty of any kind, whether express or implied.
  11 */
  12
  13#include <linux/acpi.h>
  14#include <linux/kernel.h>
  15#include <linux/netdevice.h>
  16#include <linux/etherdevice.h>
  17#include <linux/platform_device.h>
  18#include <linux/skbuff.h>
  19#include <linux/inetdevice.h>
  20#include <linux/mbus.h>
  21#include <linux/module.h>
  22#include <linux/mfd/syscon.h>
  23#include <linux/interrupt.h>
  24#include <linux/cpumask.h>
  25#include <linux/of.h>
  26#include <linux/of_irq.h>
  27#include <linux/of_mdio.h>
  28#include <linux/of_net.h>
  29#include <linux/of_address.h>
  30#include <linux/of_device.h>
  31#include <linux/phy.h>
  32#include <linux/phy/phy.h>
  33#include <linux/clk.h>
  34#include <linux/hrtimer.h>
  35#include <linux/ktime.h>
  36#include <linux/regmap.h>
  37#include <uapi/linux/ppp_defs.h>
  38#include <net/ip.h>
  39#include <net/ipv6.h>
  40#include <net/tso.h>
  41
  42/* Fifo Registers */
  43#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
  44#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)	(0x20 + 4 * (port))
  45#define MVPP2_RX_MIN_PKT_SIZE_REG		0x60
  46#define MVPP2_RX_FIFO_INIT_REG			0x64
  47#define MVPP22_TX_FIFO_THRESH_REG(port)		(0x8840 + 4 * (port))
  48#define MVPP22_TX_FIFO_SIZE_REG(port)		(0x8860 + 4 * (port))
  49
  50/* RX DMA Top Registers */
  51#define MVPP2_RX_CTRL_REG(port)			(0x140 + 4 * (port))
  52#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)	(((s) & 0xfff) << 16)
  53#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK	BIT(31)
  54#define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
  55#define     MVPP2_POOL_BUF_SIZE_OFFSET		5
  56#define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
  57#define     MVPP2_SNOOP_PKT_SIZE_MASK		0x1ff
  58#define     MVPP2_SNOOP_BUF_HDR_MASK		BIT(9)
  59#define     MVPP2_RXQ_POOL_SHORT_OFFS		20
  60#define     MVPP21_RXQ_POOL_SHORT_MASK		0x700000
  61#define     MVPP22_RXQ_POOL_SHORT_MASK		0xf00000
  62#define     MVPP2_RXQ_POOL_LONG_OFFS		24
  63#define     MVPP21_RXQ_POOL_LONG_MASK		0x7000000
  64#define     MVPP22_RXQ_POOL_LONG_MASK		0xf000000
  65#define     MVPP2_RXQ_PACKET_OFFSET_OFFS	28
  66#define     MVPP2_RXQ_PACKET_OFFSET_MASK	0x70000000
  67#define     MVPP2_RXQ_DISABLE_MASK		BIT(31)
  68
  69/* Top Registers */
  70#define MVPP2_MH_REG(port)			(0x5040 + 4 * (port))
  71#define MVPP2_DSA_EXTENDED			BIT(5)
  72
  73/* Parser Registers */
  74#define MVPP2_PRS_INIT_LOOKUP_REG		0x1000
  75#define     MVPP2_PRS_PORT_LU_MAX		0xf
  76#define     MVPP2_PRS_PORT_LU_MASK(port)	(0xff << ((port) * 4))
  77#define     MVPP2_PRS_PORT_LU_VAL(port, val)	((val) << ((port) * 4))
  78#define MVPP2_PRS_INIT_OFFS_REG(port)		(0x1004 + ((port) & 4))
  79#define     MVPP2_PRS_INIT_OFF_MASK(port)	(0x3f << (((port) % 4) * 8))
  80#define     MVPP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * 8))
  81#define MVPP2_PRS_MAX_LOOP_REG(port)		(0x100c + ((port) & 4))
  82#define     MVPP2_PRS_MAX_LOOP_MASK(port)	(0xff << (((port) % 4) * 8))
  83#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)	((val) << (((port) % 4) * 8))
  84#define MVPP2_PRS_TCAM_IDX_REG			0x1100
  85#define MVPP2_PRS_TCAM_DATA_REG(idx)		(0x1104 + (idx) * 4)
  86#define     MVPP2_PRS_TCAM_INV_MASK		BIT(31)
  87#define MVPP2_PRS_SRAM_IDX_REG			0x1200
  88#define MVPP2_PRS_SRAM_DATA_REG(idx)		(0x1204 + (idx) * 4)
  89#define MVPP2_PRS_TCAM_CTRL_REG			0x1230
  90#define     MVPP2_PRS_TCAM_EN_MASK		BIT(0)
  91
  92/* RSS Registers */
  93#define MVPP22_RSS_INDEX			0x1500
  94#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)	(idx)
  95#define     MVPP22_RSS_INDEX_TABLE(idx)		((idx) << 8)
  96#define     MVPP22_RSS_INDEX_QUEUE(idx)		((idx) << 16)
  97#define MVPP22_RSS_TABLE_ENTRY			0x1508
  98#define MVPP22_RSS_TABLE			0x1510
  99#define     MVPP22_RSS_TABLE_POINTER(p)		(p)
 100#define MVPP22_RSS_WIDTH			0x150c
 101
 102/* Classifier Registers */
 103#define MVPP2_CLS_MODE_REG			0x1800
 104#define     MVPP2_CLS_MODE_ACTIVE_MASK		BIT(0)
 105#define MVPP2_CLS_PORT_WAY_REG			0x1810
 106#define     MVPP2_CLS_PORT_WAY_MASK(port)	(1 << (port))
 107#define MVPP2_CLS_LKP_INDEX_REG			0x1814
 108#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS	6
 109#define MVPP2_CLS_LKP_TBL_REG			0x1818
 110#define     MVPP2_CLS_LKP_TBL_RXQ_MASK		0xff
 111#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK	BIT(25)
 112#define MVPP2_CLS_FLOW_INDEX_REG		0x1820
 113#define MVPP2_CLS_FLOW_TBL0_REG			0x1824
 114#define MVPP2_CLS_FLOW_TBL1_REG			0x1828
 115#define MVPP2_CLS_FLOW_TBL2_REG			0x182c
 116#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(0x1980 + ((port) * 4))
 117#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
 118#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK	0x7
 119#define MVPP2_CLS_SWFWD_P2HQ_REG(port)		(0x19b0 + ((port) * 4))
 120#define MVPP2_CLS_SWFWD_PCTRL_REG		0x19d0
 121#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << (port))
 122
 123/* Descriptor Manager Top Registers */
 124#define MVPP2_RXQ_NUM_REG			0x2040
 125#define MVPP2_RXQ_DESC_ADDR_REG			0x2044
 126#define     MVPP22_DESC_ADDR_OFFS		8
 127#define MVPP2_RXQ_DESC_SIZE_REG			0x2048
 128#define     MVPP2_RXQ_DESC_SIZE_MASK		0x3ff0
 129#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
 130#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET	0
 131#define     MVPP2_RXQ_NUM_NEW_OFFSET		16
 132#define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
 133#define     MVPP2_RXQ_OCCUPIED_MASK		0x3fff
 134#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET	16
 135#define     MVPP2_RXQ_NON_OCCUPIED_MASK		0x3fff0000
 136#define MVPP2_RXQ_THRESH_REG			0x204c
 137#define     MVPP2_OCCUPIED_THRESH_OFFSET	0
 138#define     MVPP2_OCCUPIED_THRESH_MASK		0x3fff
 139#define MVPP2_RXQ_INDEX_REG			0x2050
 140#define MVPP2_TXQ_NUM_REG			0x2080
 141#define MVPP2_TXQ_DESC_ADDR_REG			0x2084
 142#define MVPP2_TXQ_DESC_SIZE_REG			0x2088
 143#define     MVPP2_TXQ_DESC_SIZE_MASK		0x3ff0
 144#define MVPP2_TXQ_THRESH_REG			0x2094
 145#define	    MVPP2_TXQ_THRESH_OFFSET		16
 146#define	    MVPP2_TXQ_THRESH_MASK		0x3fff
 147#define MVPP2_AGGR_TXQ_UPDATE_REG		0x2090
 148#define MVPP2_TXQ_INDEX_REG			0x2098
 149#define MVPP2_TXQ_PREF_BUF_REG			0x209c
 150#define     MVPP2_PREF_BUF_PTR(desc)		((desc) & 0xfff)
 151#define     MVPP2_PREF_BUF_SIZE_4		(BIT(12) | BIT(13))
 152#define     MVPP2_PREF_BUF_SIZE_16		(BIT(12) | BIT(14))
 153#define     MVPP2_PREF_BUF_THRESH(val)		((val) << 17)
 154#define     MVPP2_TXQ_DRAIN_EN_MASK		BIT(31)
 155#define MVPP2_TXQ_PENDING_REG			0x20a0
 156#define     MVPP2_TXQ_PENDING_MASK		0x3fff
 157#define MVPP2_TXQ_INT_STATUS_REG		0x20a4
 158#define MVPP2_TXQ_SENT_REG(txq)			(0x3c00 + 4 * (txq))
 159#define     MVPP2_TRANSMITTED_COUNT_OFFSET	16
 160#define     MVPP2_TRANSMITTED_COUNT_MASK	0x3fff0000
 161#define MVPP2_TXQ_RSVD_REQ_REG			0x20b0
 162#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET		16
 163#define MVPP2_TXQ_RSVD_RSLT_REG			0x20b4
 164#define     MVPP2_TXQ_RSVD_RSLT_MASK		0x3fff
 165#define MVPP2_TXQ_RSVD_CLR_REG			0x20b8
 166#define     MVPP2_TXQ_RSVD_CLR_OFFSET		16
 167#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
 168#define     MVPP22_AGGR_TXQ_DESC_ADDR_OFFS	8
 169#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
 170#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK	0x3ff0
 171#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
 172#define     MVPP2_AGGR_TXQ_PENDING_MASK		0x3fff
 173#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
 174
 175/* MBUS bridge registers */
 176#define MVPP2_WIN_BASE(w)			(0x4000 + ((w) << 2))
 177#define MVPP2_WIN_SIZE(w)			(0x4020 + ((w) << 2))
 178#define MVPP2_WIN_REMAP(w)			(0x4040 + ((w) << 2))
 179#define MVPP2_BASE_ADDR_ENABLE			0x4060
 180
 181/* AXI Bridge Registers */
 182#define MVPP22_AXI_BM_WR_ATTR_REG		0x4100
 183#define MVPP22_AXI_BM_RD_ATTR_REG		0x4104
 184#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG	0x4110
 185#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG	0x4114
 186#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG	0x4118
 187#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG	0x411c
 188#define MVPP22_AXI_RX_DATA_WR_ATTR_REG		0x4120
 189#define MVPP22_AXI_TX_DATA_RD_ATTR_REG		0x4130
 190#define MVPP22_AXI_RD_NORMAL_CODE_REG		0x4150
 191#define MVPP22_AXI_RD_SNOOP_CODE_REG		0x4154
 192#define MVPP22_AXI_WR_NORMAL_CODE_REG		0x4160
 193#define MVPP22_AXI_WR_SNOOP_CODE_REG		0x4164
 194
 195/* Values for AXI Bridge registers */
 196#define MVPP22_AXI_ATTR_CACHE_OFFS		0
 197#define MVPP22_AXI_ATTR_DOMAIN_OFFS		12
 198
 199#define MVPP22_AXI_CODE_CACHE_OFFS		0
 200#define MVPP22_AXI_CODE_DOMAIN_OFFS		4
 201
 202#define MVPP22_AXI_CODE_CACHE_NON_CACHE		0x3
 203#define MVPP22_AXI_CODE_CACHE_WR_CACHE		0x7
 204#define MVPP22_AXI_CODE_CACHE_RD_CACHE		0xb
 205
 206#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM	2
 207#define MVPP22_AXI_CODE_DOMAIN_SYSTEM		3
 208
 209/* Interrupt Cause and Mask registers */
 210#define MVPP2_ISR_TX_THRESHOLD_REG(port)	(0x5140 + 4 * (port))
 211#define     MVPP2_MAX_ISR_TX_THRESHOLD		0xfffff0
 212
 213#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
 214#define     MVPP2_MAX_ISR_RX_THRESHOLD		0xfffff0
 215#define MVPP21_ISR_RXQ_GROUP_REG(port)		(0x5400 + 4 * (port))
 216
 217#define MVPP22_ISR_RXQ_GROUP_INDEX_REG		0x5400
 218#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
 219#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK	0x380
 220#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET	7
 221
 222#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
 223#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK	0x380
 224
 225#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG	0x5404
 226#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK	0x1f
 227#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK	0xf00
 228#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET	8
 229
 230#define MVPP2_ISR_ENABLE_REG(port)		(0x5420 + 4 * (port))
 231#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
 232#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
 233#define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
 234#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
 235#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
 236#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET	16
 237#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
 238#define     MVPP2_CAUSE_FCS_ERR_MASK		BIT(25)
 239#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK	BIT(26)
 240#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK	BIT(29)
 241#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK	BIT(30)
 242#define     MVPP2_CAUSE_MISC_SUM_MASK		BIT(31)
 243#define MVPP2_ISR_RX_TX_MASK_REG(port)		(0x54a0 + 4 * (port))
 244#define MVPP2_ISR_PON_RX_TX_MASK_REG		0x54bc
 245#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
 246#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK	0x3fc00000
 247#define     MVPP2_PON_CAUSE_MISC_SUM_MASK		BIT(31)
 248#define MVPP2_ISR_MISC_CAUSE_REG		0x55b0
 249
 250/* Buffer Manager registers */
 251#define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
 252#define     MVPP2_BM_POOL_BASE_ADDR_MASK	0xfffff80
 253#define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
 254#define     MVPP2_BM_POOL_SIZE_MASK		0xfff0
 255#define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
 256#define     MVPP2_BM_POOL_GET_READ_PTR_MASK	0xfff0
 257#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
 258#define     MVPP2_BM_POOL_PTRS_NUM_MASK		0xfff0
 259#define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
 260#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
 261#define     MVPP2_BM_BPPI_PTR_NUM_MASK		0x7ff
 262#define MVPP22_BM_POOL_PTRS_NUM_MASK		0xfff8
 263#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK	BIT(16)
 264#define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
 265#define     MVPP2_BM_START_MASK			BIT(0)
 266#define     MVPP2_BM_STOP_MASK			BIT(1)
 267#define     MVPP2_BM_STATE_MASK			BIT(4)
 268#define     MVPP2_BM_LOW_THRESH_OFFS		8
 269#define     MVPP2_BM_LOW_THRESH_MASK		0x7f00
 270#define     MVPP2_BM_LOW_THRESH_VALUE(val)	((val) << \
 271						MVPP2_BM_LOW_THRESH_OFFS)
 272#define     MVPP2_BM_HIGH_THRESH_OFFS		16
 273#define     MVPP2_BM_HIGH_THRESH_MASK		0x7f0000
 274#define     MVPP2_BM_HIGH_THRESH_VALUE(val)	((val) << \
 275						MVPP2_BM_HIGH_THRESH_OFFS)
 276#define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
 277#define     MVPP2_BM_RELEASED_DELAY_MASK	BIT(0)
 278#define     MVPP2_BM_ALLOC_FAILED_MASK		BIT(1)
 279#define     MVPP2_BM_BPPE_EMPTY_MASK		BIT(2)
 280#define     MVPP2_BM_BPPE_FULL_MASK		BIT(3)
 281#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK	BIT(4)
 282#define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
 283#define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
 284#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK	BIT(0)
 285#define MVPP2_BM_VIRT_ALLOC_REG			0x6440
 286#define MVPP22_BM_ADDR_HIGH_ALLOC		0x6444
 287#define     MVPP22_BM_ADDR_HIGH_PHYS_MASK	0xff
 288#define     MVPP22_BM_ADDR_HIGH_VIRT_MASK	0xff00
 289#define     MVPP22_BM_ADDR_HIGH_VIRT_SHIFT	8
 290#define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
 291#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK	BIT(0)
 292#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK	BIT(1)
 293#define     MVPP2_BM_PHY_RLS_GRNTD_MASK		BIT(2)
 294#define MVPP2_BM_VIRT_RLS_REG			0x64c0
 295#define MVPP22_BM_ADDR_HIGH_RLS_REG		0x64c4
 296#define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK	0xff
 297#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK	0xff00
 298#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT	8
 299
 300/* TX Scheduler registers */
 301#define MVPP2_TXP_SCHED_PORT_INDEX_REG		0x8000
 302#define MVPP2_TXP_SCHED_Q_CMD_REG		0x8004
 303#define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
 304#define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
 305#define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
 306#define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
 307#define MVPP2_TXP_SCHED_MTU_REG			0x801c
 308#define     MVPP2_TXP_MTU_MAX			0x7FFFF
 309#define MVPP2_TXP_SCHED_REFILL_REG		0x8020
 310#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK	0x7ffff
 311#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK	0x3ff00000
 312#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)	((v) << 20)
 313#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG		0x8024
 314#define     MVPP2_TXP_TOKEN_SIZE_MAX		0xffffffff
 315#define MVPP2_TXQ_SCHED_REFILL_REG(q)		(0x8040 + ((q) << 2))
 316#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK	0x7ffff
 317#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK	0x3ff00000
 318#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)	((v) << 20)
 319#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(0x8060 + ((q) << 2))
 320#define     MVPP2_TXQ_TOKEN_SIZE_MAX		0x7fffffff
 321#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(0x8080 + ((q) << 2))
 322#define     MVPP2_TXQ_TOKEN_CNTR_MAX		0xffffffff
 323
 324/* TX general registers */
 325#define MVPP2_TX_SNOOP_REG			0x8800
 326#define MVPP2_TX_PORT_FLUSH_REG			0x8810
 327#define     MVPP2_TX_PORT_FLUSH_MASK(port)	(1 << (port))
 328
 329/* LMS registers */
 330#define MVPP2_SRC_ADDR_MIDDLE			0x24
 331#define MVPP2_SRC_ADDR_HIGH			0x28
 332#define MVPP2_PHY_AN_CFG0_REG			0x34
 333#define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
 334#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG	0x305c
 335#define     MVPP2_EXT_GLOBAL_CTRL_DEFAULT	0x27
 336
 337/* Per-port registers */
 338#define MVPP2_GMAC_CTRL_0_REG			0x0
 339#define     MVPP2_GMAC_PORT_EN_MASK		BIT(0)
 340#define     MVPP2_GMAC_PORT_TYPE_MASK		BIT(1)
 341#define     MVPP2_GMAC_MAX_RX_SIZE_OFFS		2
 342#define     MVPP2_GMAC_MAX_RX_SIZE_MASK		0x7ffc
 343#define     MVPP2_GMAC_MIB_CNTR_EN_MASK		BIT(15)
 344#define MVPP2_GMAC_CTRL_1_REG			0x4
 345#define     MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
 346#define     MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
 347#define     MVPP2_GMAC_PCS_LB_EN_BIT		6
 348#define     MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
 349#define     MVPP2_GMAC_SA_LOW_OFFS		7
 350#define MVPP2_GMAC_CTRL_2_REG			0x8
 351#define     MVPP2_GMAC_INBAND_AN_MASK		BIT(0)
 352#define     MVPP2_GMAC_FLOW_CTRL_MASK		GENMASK(2, 1)
 353#define     MVPP2_GMAC_PCS_ENABLE_MASK		BIT(3)
 354#define     MVPP2_GMAC_INTERNAL_CLK_MASK	BIT(4)
 355#define     MVPP2_GMAC_DISABLE_PADDING		BIT(5)
 356#define     MVPP2_GMAC_PORT_RESET_MASK		BIT(6)
 357#define MVPP2_GMAC_AUTONEG_CONFIG		0xc
 358#define     MVPP2_GMAC_FORCE_LINK_DOWN		BIT(0)
 359#define     MVPP2_GMAC_FORCE_LINK_PASS		BIT(1)
 360#define     MVPP2_GMAC_IN_BAND_AUTONEG		BIT(2)
 361#define     MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS	BIT(3)
 362#define     MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
 363#define     MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
 364#define     MVPP2_GMAC_AN_SPEED_EN		BIT(7)
 365#define     MVPP2_GMAC_FC_ADV_EN		BIT(9)
 366#define     MVPP2_GMAC_FLOW_CTRL_AUTONEG	BIT(11)
 367#define     MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
 368#define     MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
 369#define MVPP2_GMAC_STATUS0			0x10
 370#define     MVPP2_GMAC_STATUS0_LINK_UP		BIT(0)
 371#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
 372#define     MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
 373#define     MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK	0x1fc0
 374#define     MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)	(((v) << 6) & \
 375					MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
 376#define MVPP22_GMAC_INT_STAT			0x20
 377#define     MVPP22_GMAC_INT_STAT_LINK		BIT(1)
 378#define MVPP22_GMAC_INT_MASK			0x24
 379#define     MVPP22_GMAC_INT_MASK_LINK_STAT	BIT(1)
 380#define MVPP22_GMAC_CTRL_4_REG			0x90
 381#define     MVPP22_CTRL4_EXT_PIN_GMII_SEL	BIT(0)
 382#define     MVPP22_CTRL4_DP_CLK_SEL		BIT(5)
 383#define     MVPP22_CTRL4_SYNC_BYPASS_DIS	BIT(6)
 384#define     MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE	BIT(7)
 385#define MVPP22_GMAC_INT_SUM_MASK		0xa4
 386#define     MVPP22_GMAC_INT_SUM_MASK_LINK_STAT	BIT(1)
 387
 388/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
 389 * relative to port->base.
 390 */
 391#define MVPP22_XLG_CTRL0_REG			0x100
 392#define     MVPP22_XLG_CTRL0_PORT_EN		BIT(0)
 393#define     MVPP22_XLG_CTRL0_MAC_RESET_DIS	BIT(1)
 394#define     MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN	BIT(7)
 395#define     MVPP22_XLG_CTRL0_MIB_CNT_DIS	BIT(14)
 396#define MVPP22_XLG_CTRL1_REG			0x104
 397#define     MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS	0
 398#define     MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK	0x1fff
 399#define MVPP22_XLG_STATUS			0x10c
 400#define     MVPP22_XLG_STATUS_LINK_UP		BIT(0)
 401#define MVPP22_XLG_INT_STAT			0x114
 402#define     MVPP22_XLG_INT_STAT_LINK		BIT(1)
 403#define MVPP22_XLG_INT_MASK			0x118
 404#define     MVPP22_XLG_INT_MASK_LINK		BIT(1)
 405#define MVPP22_XLG_CTRL3_REG			0x11c
 406#define     MVPP22_XLG_CTRL3_MACMODESELECT_MASK	(7 << 13)
 407#define     MVPP22_XLG_CTRL3_MACMODESELECT_GMAC	(0 << 13)
 408#define     MVPP22_XLG_CTRL3_MACMODESELECT_10G	(1 << 13)
 409#define MVPP22_XLG_EXT_INT_MASK			0x15c
 410#define     MVPP22_XLG_EXT_INT_MASK_XLG		BIT(1)
 411#define     MVPP22_XLG_EXT_INT_MASK_GIG		BIT(2)
 412#define MVPP22_XLG_CTRL4_REG			0x184
 413#define     MVPP22_XLG_CTRL4_FWD_FC		BIT(5)
 414#define     MVPP22_XLG_CTRL4_FWD_PFC		BIT(6)
 415#define     MVPP22_XLG_CTRL4_MACMODSELECT_GMAC	BIT(12)
 416
 417/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
 418#define MVPP22_SMI_MISC_CFG_REG			0x1204
 419#define     MVPP22_SMI_POLLING_EN		BIT(10)
 420
 421#define MVPP22_GMAC_BASE(port)		(0x7000 + (port) * 0x1000 + 0xe00)
 422
 423#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK	0xff
 424
 425/* Descriptor ring Macros */
 426#define MVPP2_QUEUE_NEXT_DESC(q, index) \
 427	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
 428
 429/* XPCS registers. PPv2.2 only */
 430#define MVPP22_MPCS_BASE(port)			(0x7000 + (port) * 0x1000)
 431#define MVPP22_MPCS_CTRL			0x14
 432#define     MVPP22_MPCS_CTRL_FWD_ERR_CONN	BIT(10)
 433#define MVPP22_MPCS_CLK_RESET			0x14c
 434#define     MAC_CLK_RESET_SD_TX			BIT(0)
 435#define     MAC_CLK_RESET_SD_RX			BIT(1)
 436#define     MAC_CLK_RESET_MAC			BIT(2)
 437#define     MVPP22_MPCS_CLK_RESET_DIV_RATIO(n)	((n) << 4)
 438#define     MVPP22_MPCS_CLK_RESET_DIV_SET	BIT(11)
 439
 440/* XPCS registers. PPv2.2 only */
 441#define MVPP22_XPCS_BASE(port)			(0x7400 + (port) * 0x1000)
 442#define MVPP22_XPCS_CFG0			0x0
 443#define     MVPP22_XPCS_CFG0_PCS_MODE(n)	((n) << 3)
 444#define     MVPP22_XPCS_CFG0_ACTIVE_LANE(n)	((n) << 5)
 445
 446/* System controller registers. Accessed through a regmap. */
 447#define GENCONF_SOFT_RESET1				0x1108
 448#define     GENCONF_SOFT_RESET1_GOP			BIT(6)
 449#define GENCONF_PORT_CTRL0				0x1110
 450#define     GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT		BIT(1)
 451#define     GENCONF_PORT_CTRL0_RX_DATA_SAMPLE		BIT(29)
 452#define     GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR	BIT(31)
 453#define GENCONF_PORT_CTRL1				0x1114
 454#define     GENCONF_PORT_CTRL1_EN(p)			BIT(p)
 455#define     GENCONF_PORT_CTRL1_RESET(p)			(BIT(p) << 28)
 456#define GENCONF_CTRL0					0x1120
 457#define     GENCONF_CTRL0_PORT0_RGMII			BIT(0)
 458#define     GENCONF_CTRL0_PORT1_RGMII_MII		BIT(1)
 459#define     GENCONF_CTRL0_PORT1_RGMII			BIT(2)
 460
 461/* Various constants */
 462
 463/* Coalescing */
 464#define MVPP2_TXDONE_COAL_PKTS_THRESH	64
 465#define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
 466#define MVPP2_TXDONE_COAL_USEC		1000
 467#define MVPP2_RX_COAL_PKTS		32
 468#define MVPP2_RX_COAL_USEC		64
 469
 470/* The two bytes Marvell header. Either contains a special value used
 471 * by Marvell switches when a specific hardware mode is enabled (not
 472 * supported by this driver) or is filled automatically by zeroes on
 473 * the RX side. Those two bytes being at the front of the Ethernet
 474 * header, they allow to have the IP header aligned on a 4 bytes
 475 * boundary automatically: the hardware skips those two bytes on its
 476 * own.
 477 */
 478#define MVPP2_MH_SIZE			2
 479#define MVPP2_ETH_TYPE_LEN		2
 480#define MVPP2_PPPOE_HDR_SIZE		8
 481#define MVPP2_VLAN_TAG_LEN		4
 482#define MVPP2_VLAN_TAG_EDSA_LEN		8
 483
 484/* Lbtd 802.3 type */
 485#define MVPP2_IP_LBDT_TYPE		0xfffa
 486
 487#define MVPP2_TX_CSUM_MAX_SIZE		9800
 488
 489/* Timeout constants */
 490#define MVPP2_TX_DISABLE_TIMEOUT_MSEC	1000
 491#define MVPP2_TX_PENDING_TIMEOUT_MSEC	1000
 492
 493#define MVPP2_TX_MTU_MAX		0x7ffff
 494
 495/* Maximum number of T-CONTs of PON port */
 496#define MVPP2_MAX_TCONT			16
 497
 498/* Maximum number of supported ports */
 499#define MVPP2_MAX_PORTS			4
 500
 501/* Maximum number of TXQs used by single port */
 502#define MVPP2_MAX_TXQ			8
 503
 504/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
 505 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
 506 * multiply this value by two to count the maximum number of skb descs needed.
 507 */
 508#define MVPP2_MAX_TSO_SEGS		300
 509#define MVPP2_MAX_SKB_DESCS		(MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 510
 511/* Dfault number of RXQs in use */
 512#define MVPP2_DEFAULT_RXQ		4
 513
 514/* Max number of Rx descriptors */
 515#define MVPP2_MAX_RXD_MAX		1024
 516#define MVPP2_MAX_RXD_DFLT		128
 517
 518/* Max number of Tx descriptors */
 519#define MVPP2_MAX_TXD_MAX		2048
 520#define MVPP2_MAX_TXD_DFLT		1024
 521
 522/* Amount of Tx descriptors that can be reserved at once by CPU */
 523#define MVPP2_CPU_DESC_CHUNK		64
 524
 525/* Max number of Tx descriptors in each aggregated queue */
 526#define MVPP2_AGGR_TXQ_SIZE		256
 527
 528/* Descriptor aligned size */
 529#define MVPP2_DESC_ALIGNED_SIZE		32
 530
 531/* Descriptor alignment mask */
 532#define MVPP2_TX_DESC_ALIGN		(MVPP2_DESC_ALIGNED_SIZE - 1)
 533
 534/* RX FIFO constants */
 535#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB	0x8000
 536#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB	0x2000
 537#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB	0x1000
 538#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB	0x200
 539#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB	0x80
 540#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB	0x40
 541#define MVPP2_RX_FIFO_PORT_MIN_PKT		0x80
 542
 543/* TX FIFO constants */
 544#define MVPP22_TX_FIFO_DATA_SIZE_10KB		0xa
 545#define MVPP22_TX_FIFO_DATA_SIZE_3KB		0x3
 546#define MVPP2_TX_FIFO_THRESHOLD_MIN		256
 547#define MVPP2_TX_FIFO_THRESHOLD_10KB	\
 548	(MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
 549#define MVPP2_TX_FIFO_THRESHOLD_3KB	\
 550	(MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
 551
 552/* RX buffer constants */
 553#define MVPP2_SKB_SHINFO_SIZE \
 554	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 555
 556#define MVPP2_RX_PKT_SIZE(mtu) \
 557	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
 558	      ETH_HLEN + ETH_FCS_LEN, cache_line_size())
 559
 560#define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
 561#define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
 562#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
 563	((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
 564
 565#define MVPP2_BIT_TO_BYTE(bit)		((bit) / 8)
 566
 567/* IPv6 max L3 address size */
 568#define MVPP2_MAX_L3_ADDR_SIZE		16
 569
 570/* Port flags */
 571#define MVPP2_F_LOOPBACK		BIT(0)
 572
 573/* Marvell tag types */
 574enum mvpp2_tag_type {
 575	MVPP2_TAG_TYPE_NONE = 0,
 576	MVPP2_TAG_TYPE_MH   = 1,
 577	MVPP2_TAG_TYPE_DSA  = 2,
 578	MVPP2_TAG_TYPE_EDSA = 3,
 579	MVPP2_TAG_TYPE_VLAN = 4,
 580	MVPP2_TAG_TYPE_LAST = 5
 581};
 582
 583/* Parser constants */
 584#define MVPP2_PRS_TCAM_SRAM_SIZE	256
 585#define MVPP2_PRS_TCAM_WORDS		6
 586#define MVPP2_PRS_SRAM_WORDS		4
 587#define MVPP2_PRS_FLOW_ID_SIZE		64
 588#define MVPP2_PRS_FLOW_ID_MASK		0x3f
 589#define MVPP2_PRS_TCAM_ENTRY_INVALID	1
 590#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT	BIT(5)
 591#define MVPP2_PRS_IPV4_HEAD		0x40
 592#define MVPP2_PRS_IPV4_HEAD_MASK	0xf0
 593#define MVPP2_PRS_IPV4_MC		0xe0
 594#define MVPP2_PRS_IPV4_MC_MASK		0xf0
 595#define MVPP2_PRS_IPV4_BC_MASK		0xff
 596#define MVPP2_PRS_IPV4_IHL		0x5
 597#define MVPP2_PRS_IPV4_IHL_MASK		0xf
 598#define MVPP2_PRS_IPV6_MC		0xff
 599#define MVPP2_PRS_IPV6_MC_MASK		0xff
 600#define MVPP2_PRS_IPV6_HOP_MASK		0xff
 601#define MVPP2_PRS_TCAM_PROTO_MASK	0xff
 602#define MVPP2_PRS_TCAM_PROTO_MASK_L	0x3f
 603#define MVPP2_PRS_DBL_VLANS_MAX		100
 604#define MVPP2_PRS_CAST_MASK		BIT(0)
 605#define MVPP2_PRS_MCAST_VAL		BIT(0)
 606#define MVPP2_PRS_UCAST_VAL		0x0
 607
 608/* Tcam structure:
 609 * - lookup ID - 4 bits
 610 * - port ID - 1 byte
 611 * - additional information - 1 byte
 612 * - header data - 8 bytes
 613 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
 614 */
 615#define MVPP2_PRS_AI_BITS			8
 616#define MVPP2_PRS_PORT_MASK			0xff
 617#define MVPP2_PRS_LU_MASK			0xf
 618#define MVPP2_PRS_TCAM_DATA_BYTE(offs)		\
 619				    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
 620#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)	\
 621					      (((offs) * 2) - ((offs) % 2)  + 2)
 622#define MVPP2_PRS_TCAM_AI_BYTE			16
 623#define MVPP2_PRS_TCAM_PORT_BYTE		17
 624#define MVPP2_PRS_TCAM_LU_BYTE			20
 625#define MVPP2_PRS_TCAM_EN_OFFS(offs)		((offs) + 2)
 626#define MVPP2_PRS_TCAM_INV_WORD			5
 627
 628#define MVPP2_PRS_VID_TCAM_BYTE         2
 629
 630/* TCAM range for unicast and multicast filtering. We have 25 entries per port,
 631 * with 4 dedicated to UC filtering and the rest to multicast filtering.
 632 * Additionnally we reserve one entry for the broadcast address, and one for
 633 * each port's own address.
 634 */
 635#define MVPP2_PRS_MAC_UC_MC_FILT_MAX	25
 636#define MVPP2_PRS_MAC_RANGE_SIZE	80
 637
 638/* Number of entries per port dedicated to UC and MC filtering */
 639#define MVPP2_PRS_MAC_UC_FILT_MAX	4
 640#define MVPP2_PRS_MAC_MC_FILT_MAX	(MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
 641					 MVPP2_PRS_MAC_UC_FILT_MAX)
 642
 643/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
 644 * 10 VLAN ID filter entries per port
 645 * 1 default VLAN filter entry per port
 646 * It is assumed that there are 3 ports for filter, not including loopback port
 647 */
 648#define MVPP2_PRS_VLAN_FILT_MAX		11
 649#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE	33
 650
 651#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY   (MVPP2_PRS_VLAN_FILT_MAX - 2)
 652#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY  (MVPP2_PRS_VLAN_FILT_MAX - 1)
 653
 654/* Tcam entries ID */
 655#define MVPP2_PE_DROP_ALL		0
 656#define MVPP2_PE_FIRST_FREE_TID		1
 657
 658/* MAC filtering range */
 659#define MVPP2_PE_MAC_RANGE_END		(MVPP2_PE_VID_FILT_RANGE_START - 1)
 660#define MVPP2_PE_MAC_RANGE_START	(MVPP2_PE_MAC_RANGE_END - \
 661						MVPP2_PRS_MAC_RANGE_SIZE + 1)
 662/* VLAN filtering range */
 663#define MVPP2_PE_VID_FILT_RANGE_END     (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
 664#define MVPP2_PE_VID_FILT_RANGE_START   (MVPP2_PE_VID_FILT_RANGE_END - \
 665					 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
 666#define MVPP2_PE_LAST_FREE_TID          (MVPP2_PE_MAC_RANGE_START - 1)
 667#define MVPP2_PE_IP6_EXT_PROTO_UN	(MVPP2_PRS_TCAM_SRAM_SIZE - 30)
 668#define MVPP2_PE_IP6_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 29)
 669#define MVPP2_PE_IP4_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 28)
 670#define MVPP2_PE_LAST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 27)
 671#define MVPP2_PE_FIRST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 22)
 672#define MVPP2_PE_EDSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 21)
 673#define MVPP2_PE_EDSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 20)
 674#define MVPP2_PE_DSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 19)
 675#define MVPP2_PE_DSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 18)
 676#define MVPP2_PE_ETYPE_EDSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 17)
 677#define MVPP2_PE_ETYPE_EDSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 16)
 678#define MVPP2_PE_ETYPE_DSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 15)
 679#define MVPP2_PE_ETYPE_DSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 14)
 680#define MVPP2_PE_MH_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 13)
 681#define MVPP2_PE_DSA_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 12)
 682#define MVPP2_PE_IP6_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 11)
 683#define MVPP2_PE_IP4_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 10)
 684#define MVPP2_PE_ETH_TYPE_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 9)
 685#define MVPP2_PE_VID_FLTR_DEFAULT	(MVPP2_PRS_TCAM_SRAM_SIZE - 8)
 686#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT	(MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 687#define MVPP2_PE_VLAN_DBL		(MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 688#define MVPP2_PE_VLAN_NONE		(MVPP2_PRS_TCAM_SRAM_SIZE - 5)
 689/* reserved */
 690#define MVPP2_PE_MAC_MC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 691#define MVPP2_PE_MAC_UC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 692#define MVPP2_PE_MAC_NON_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 1)
 693
 694#define MVPP2_PRS_VID_PORT_FIRST(port)	(MVPP2_PE_VID_FILT_RANGE_START + \
 695					 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
 696#define MVPP2_PRS_VID_PORT_LAST(port)	(MVPP2_PRS_VID_PORT_FIRST(port) \
 697					 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
 698/* Index of default vid filter for given port */
 699#define MVPP2_PRS_VID_PORT_DFLT(port)	(MVPP2_PRS_VID_PORT_FIRST(port) \
 700					 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
 701
 702/* Sram structure
 703 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
 704 */
 705#define MVPP2_PRS_SRAM_RI_OFFS			0
 706#define MVPP2_PRS_SRAM_RI_WORD			0
 707#define MVPP2_PRS_SRAM_RI_CTRL_OFFS		32
 708#define MVPP2_PRS_SRAM_RI_CTRL_WORD		1
 709#define MVPP2_PRS_SRAM_RI_CTRL_BITS		32
 710#define MVPP2_PRS_SRAM_SHIFT_OFFS		64
 711#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT		72
 712#define MVPP2_PRS_SRAM_UDF_OFFS			73
 713#define MVPP2_PRS_SRAM_UDF_BITS			8
 714#define MVPP2_PRS_SRAM_UDF_MASK			0xff
 715#define MVPP2_PRS_SRAM_UDF_SIGN_BIT		81
 716#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS		82
 717#define MVPP2_PRS_SRAM_UDF_TYPE_MASK		0x7
 718#define MVPP2_PRS_SRAM_UDF_TYPE_L3		1
 719#define MVPP2_PRS_SRAM_UDF_TYPE_L4		4
 720#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS	85
 721#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK	0x3
 722#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD		1
 723#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD	2
 724#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD	3
 725#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS		87
 726#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS		2
 727#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK		0x3
 728#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD		0
 729#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD	2
 730#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD	3
 731#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS		89
 732#define MVPP2_PRS_SRAM_AI_OFFS			90
 733#define MVPP2_PRS_SRAM_AI_CTRL_OFFS		98
 734#define MVPP2_PRS_SRAM_AI_CTRL_BITS		8
 735#define MVPP2_PRS_SRAM_AI_MASK			0xff
 736#define MVPP2_PRS_SRAM_NEXT_LU_OFFS		106
 737#define MVPP2_PRS_SRAM_NEXT_LU_MASK		0xf
 738#define MVPP2_PRS_SRAM_LU_DONE_BIT		110
 739#define MVPP2_PRS_SRAM_LU_GEN_BIT		111
 740
 741/* Sram result info bits assignment */
 742#define MVPP2_PRS_RI_MAC_ME_MASK		0x1
 743#define MVPP2_PRS_RI_DSA_MASK			0x2
 744#define MVPP2_PRS_RI_VLAN_MASK			(BIT(2) | BIT(3))
 745#define MVPP2_PRS_RI_VLAN_NONE			0x0
 746#define MVPP2_PRS_RI_VLAN_SINGLE		BIT(2)
 747#define MVPP2_PRS_RI_VLAN_DOUBLE		BIT(3)
 748#define MVPP2_PRS_RI_VLAN_TRIPLE		(BIT(2) | BIT(3))
 749#define MVPP2_PRS_RI_CPU_CODE_MASK		0x70
 750#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC		BIT(4)
 751#define MVPP2_PRS_RI_L2_CAST_MASK		(BIT(9) | BIT(10))
 752#define MVPP2_PRS_RI_L2_UCAST			0x0
 753#define MVPP2_PRS_RI_L2_MCAST			BIT(9)
 754#define MVPP2_PRS_RI_L2_BCAST			BIT(10)
 755#define MVPP2_PRS_RI_PPPOE_MASK			0x800
 756#define MVPP2_PRS_RI_L3_PROTO_MASK		(BIT(12) | BIT(13) | BIT(14))
 757#define MVPP2_PRS_RI_L3_UN			0x0
 758#define MVPP2_PRS_RI_L3_IP4			BIT(12)
 759#define MVPP2_PRS_RI_L3_IP4_OPT			BIT(13)
 760#define MVPP2_PRS_RI_L3_IP4_OTHER		(BIT(12) | BIT(13))
 761#define MVPP2_PRS_RI_L3_IP6			BIT(14)
 762#define MVPP2_PRS_RI_L3_IP6_EXT			(BIT(12) | BIT(14))
 763#define MVPP2_PRS_RI_L3_ARP			(BIT(13) | BIT(14))
 764#define MVPP2_PRS_RI_L3_ADDR_MASK		(BIT(15) | BIT(16))
 765#define MVPP2_PRS_RI_L3_UCAST			0x0
 766#define MVPP2_PRS_RI_L3_MCAST			BIT(15)
 767#define MVPP2_PRS_RI_L3_BCAST			(BIT(15) | BIT(16))
 768#define MVPP2_PRS_RI_IP_FRAG_MASK		0x20000
 769#define MVPP2_PRS_RI_IP_FRAG_TRUE		BIT(17)
 770#define MVPP2_PRS_RI_UDF3_MASK			0x300000
 771#define MVPP2_PRS_RI_UDF3_RX_SPECIAL		BIT(21)
 772#define MVPP2_PRS_RI_L4_PROTO_MASK		0x1c00000
 773#define MVPP2_PRS_RI_L4_TCP			BIT(22)
 774#define MVPP2_PRS_RI_L4_UDP			BIT(23)
 775#define MVPP2_PRS_RI_L4_OTHER			(BIT(22) | BIT(23))
 776#define MVPP2_PRS_RI_UDF7_MASK			0x60000000
 777#define MVPP2_PRS_RI_UDF7_IP6_LITE		BIT(29)
 778#define MVPP2_PRS_RI_DROP_MASK			0x80000000
 779
 780/* Sram additional info bits assignment */
 781#define MVPP2_PRS_IPV4_DIP_AI_BIT		BIT(0)
 782#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT		BIT(0)
 783#define MVPP2_PRS_IPV6_EXT_AI_BIT		BIT(1)
 784#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT		BIT(2)
 785#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT	BIT(3)
 786#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT		BIT(4)
 787#define MVPP2_PRS_SINGLE_VLAN_AI		0
 788#define MVPP2_PRS_DBL_VLAN_AI_BIT		BIT(7)
 789#define MVPP2_PRS_EDSA_VID_AI_BIT		BIT(0)
 790
 791/* DSA/EDSA type */
 792#define MVPP2_PRS_TAGGED		true
 793#define MVPP2_PRS_UNTAGGED		false
 794#define MVPP2_PRS_EDSA			true
 795#define MVPP2_PRS_DSA			false
 796
 797/* MAC entries, shadow udf */
 798enum mvpp2_prs_udf {
 799	MVPP2_PRS_UDF_MAC_DEF,
 800	MVPP2_PRS_UDF_MAC_RANGE,
 801	MVPP2_PRS_UDF_L2_DEF,
 802	MVPP2_PRS_UDF_L2_DEF_COPY,
 803	MVPP2_PRS_UDF_L2_USER,
 804};
 805
 806/* Lookup ID */
 807enum mvpp2_prs_lookup {
 808	MVPP2_PRS_LU_MH,
 809	MVPP2_PRS_LU_MAC,
 810	MVPP2_PRS_LU_DSA,
 811	MVPP2_PRS_LU_VLAN,
 812	MVPP2_PRS_LU_VID,
 813	MVPP2_PRS_LU_L2,
 814	MVPP2_PRS_LU_PPPOE,
 815	MVPP2_PRS_LU_IP4,
 816	MVPP2_PRS_LU_IP6,
 817	MVPP2_PRS_LU_FLOWS,
 818	MVPP2_PRS_LU_LAST,
 819};
 820
 821/* L2 cast enum */
 822enum mvpp2_prs_l2_cast {
 823	MVPP2_PRS_L2_UNI_CAST,
 824	MVPP2_PRS_L2_MULTI_CAST,
 825};
 826
 827/* L3 cast enum */
 828enum mvpp2_prs_l3_cast {
 829	MVPP2_PRS_L3_UNI_CAST,
 830	MVPP2_PRS_L3_MULTI_CAST,
 831	MVPP2_PRS_L3_BROAD_CAST
 832};
 833
 834/* Classifier constants */
 835#define MVPP2_CLS_FLOWS_TBL_SIZE	512
 836#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS	3
 837#define MVPP2_CLS_LKP_TBL_SIZE		64
 838#define MVPP2_CLS_RX_QUEUES		256
 839
 840/* RSS constants */
 841#define MVPP22_RSS_TABLE_ENTRIES	32
 842
 843/* BM constants */
 844#define MVPP2_BM_JUMBO_BUF_NUM		512
 845#define MVPP2_BM_LONG_BUF_NUM		1024
 846#define MVPP2_BM_SHORT_BUF_NUM		2048
 847#define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
 848#define MVPP2_BM_POOL_PTR_ALIGN		128
 849
 850/* BM cookie (32 bits) definition */
 851#define MVPP2_BM_COOKIE_POOL_OFFS	8
 852#define MVPP2_BM_COOKIE_CPU_OFFS	24
 853
 854#define MVPP2_BM_SHORT_FRAME_SIZE		512
 855#define MVPP2_BM_LONG_FRAME_SIZE		2048
 856#define MVPP2_BM_JUMBO_FRAME_SIZE		10240
 857/* BM short pool packet size
 858 * These value assure that for SWF the total number
 859 * of bytes allocated for each buffer will be 512
 860 */
 861#define MVPP2_BM_SHORT_PKT_SIZE	MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
 862#define MVPP2_BM_LONG_PKT_SIZE	MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
 863#define MVPP2_BM_JUMBO_PKT_SIZE	MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
 864
 865#define MVPP21_ADDR_SPACE_SZ		0
 866#define MVPP22_ADDR_SPACE_SZ		SZ_64K
 867
 868#define MVPP2_MAX_THREADS		8
 869#define MVPP2_MAX_QVECS			MVPP2_MAX_THREADS
 870
 871enum mvpp2_bm_pool_log_num {
 872	MVPP2_BM_SHORT,
 873	MVPP2_BM_LONG,
 874	MVPP2_BM_JUMBO,
 875	MVPP2_BM_POOLS_NUM
 876};
 877
 878static struct {
 879	int pkt_size;
 880	int buf_num;
 881} mvpp2_pools[MVPP2_BM_POOLS_NUM];
 882
 883/* GMAC MIB Counters register definitions */
 884#define MVPP21_MIB_COUNTERS_OFFSET		0x1000
 885#define MVPP21_MIB_COUNTERS_PORT_SZ		0x400
 886#define MVPP22_MIB_COUNTERS_OFFSET		0x0
 887#define MVPP22_MIB_COUNTERS_PORT_SZ		0x100
 888
 889#define MVPP2_MIB_GOOD_OCTETS_RCVD		0x0
 890#define MVPP2_MIB_BAD_OCTETS_RCVD		0x8
 891#define MVPP2_MIB_CRC_ERRORS_SENT		0xc
 892#define MVPP2_MIB_UNICAST_FRAMES_RCVD		0x10
 893#define MVPP2_MIB_BROADCAST_FRAMES_RCVD		0x18
 894#define MVPP2_MIB_MULTICAST_FRAMES_RCVD		0x1c
 895#define MVPP2_MIB_FRAMES_64_OCTETS		0x20
 896#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS	0x24
 897#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS	0x28
 898#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS	0x2c
 899#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS	0x30
 900#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS	0x34
 901#define MVPP2_MIB_GOOD_OCTETS_SENT		0x38
 902#define MVPP2_MIB_UNICAST_FRAMES_SENT		0x40
 903#define MVPP2_MIB_MULTICAST_FRAMES_SENT		0x48
 904#define MVPP2_MIB_BROADCAST_FRAMES_SENT		0x4c
 905#define MVPP2_MIB_FC_SENT			0x54
 906#define MVPP2_MIB_FC_RCVD			0x58
 907#define MVPP2_MIB_RX_FIFO_OVERRUN		0x5c
 908#define MVPP2_MIB_UNDERSIZE_RCVD		0x60
 909#define MVPP2_MIB_FRAGMENTS_RCVD		0x64
 910#define MVPP2_MIB_OVERSIZE_RCVD			0x68
 911#define MVPP2_MIB_JABBER_RCVD			0x6c
 912#define MVPP2_MIB_MAC_RCV_ERROR			0x70
 913#define MVPP2_MIB_BAD_CRC_EVENT			0x74
 914#define MVPP2_MIB_COLLISION			0x78
 915#define MVPP2_MIB_LATE_COLLISION		0x7c
 916
 917#define MVPP2_MIB_COUNTERS_STATS_DELAY		(1 * HZ)
 918
 919#define MVPP2_DESC_DMA_MASK	DMA_BIT_MASK(40)
 920
 921/* Definitions */
 922
 923/* Shared Packet Processor resources */
 924struct mvpp2 {
 925	/* Shared registers' base addresses */
 926	void __iomem *lms_base;
 927	void __iomem *iface_base;
 928
 929	/* On PPv2.2, each "software thread" can access the base
 930	 * register through a separate address space, each 64 KB apart
 931	 * from each other. Typically, such address spaces will be
 932	 * used per CPU.
 933	 */
 934	void __iomem *swth_base[MVPP2_MAX_THREADS];
 935
 936	/* On PPv2.2, some port control registers are located into the system
 937	 * controller space. These registers are accessible through a regmap.
 938	 */
 939	struct regmap *sysctrl_base;
 940
 941	/* Common clocks */
 942	struct clk *pp_clk;
 943	struct clk *gop_clk;
 944	struct clk *mg_clk;
 945	struct clk *mg_core_clk;
 946	struct clk *axi_clk;
 947
 948	/* List of pointers to port structures */
 949	int port_count;
 950	struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
 951
 952	/* Aggregated TXQs */
 953	struct mvpp2_tx_queue *aggr_txqs;
 954
 955	/* BM pools */
 956	struct mvpp2_bm_pool *bm_pools;
 957
 958	/* PRS shadow table */
 959	struct mvpp2_prs_shadow *prs_shadow;
 960	/* PRS auxiliary table for double vlan entries control */
 961	bool *prs_double_vlans;
 962
 963	/* Tclk value */
 964	u32 tclk;
 965
 966	/* HW version */
 967	enum { MVPP21, MVPP22 } hw_version;
 968
 969	/* Maximum number of RXQs per port */
 970	unsigned int max_port_rxqs;
 971
 972	/* Workqueue to gather hardware statistics */
 973	char queue_name[30];
 974	struct workqueue_struct *stats_queue;
 975};
 976
 977struct mvpp2_pcpu_stats {
 978	struct	u64_stats_sync syncp;
 979	u64	rx_packets;
 980	u64	rx_bytes;
 981	u64	tx_packets;
 982	u64	tx_bytes;
 983};
 984
 985/* Per-CPU port control */
 986struct mvpp2_port_pcpu {
 987	struct hrtimer tx_done_timer;
 988	bool timer_scheduled;
 989	/* Tasklet for egress finalization */
 990	struct tasklet_struct tx_done_tasklet;
 991};
 992
 993struct mvpp2_queue_vector {
 994	int irq;
 995	struct napi_struct napi;
 996	enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
 997	int sw_thread_id;
 998	u16 sw_thread_mask;
 999	int first_rxq;
1000	int nrxqs;
1001	u32 pending_cause_rx;
1002	struct mvpp2_port *port;
1003};
1004
1005struct mvpp2_port {
1006	u8 id;
1007
1008	/* Index of the port from the "group of ports" complex point
1009	 * of view
1010	 */
1011	int gop_id;
1012
1013	int link_irq;
1014
1015	struct mvpp2 *priv;
1016
1017	/* Firmware node associated to the port */
1018	struct fwnode_handle *fwnode;
1019
1020	/* Per-port registers' base address */
1021	void __iomem *base;
1022	void __iomem *stats_base;
1023
1024	struct mvpp2_rx_queue **rxqs;
1025	unsigned int nrxqs;
1026	struct mvpp2_tx_queue **txqs;
1027	unsigned int ntxqs;
1028	struct net_device *dev;
1029
1030	int pkt_size;
1031
1032	/* Per-CPU port control */
1033	struct mvpp2_port_pcpu __percpu *pcpu;
1034
1035	/* Flags */
1036	unsigned long flags;
1037
1038	u16 tx_ring_size;
1039	u16 rx_ring_size;
1040	struct mvpp2_pcpu_stats __percpu *stats;
1041	u64 *ethtool_stats;
1042
1043	/* Per-port work and its lock to gather hardware statistics */
1044	struct mutex gather_stats_lock;
1045	struct delayed_work stats_work;
1046
1047	phy_interface_t phy_interface;
1048	struct device_node *phy_node;
1049	struct phy *comphy;
1050	unsigned int link;
1051	unsigned int duplex;
1052	unsigned int speed;
1053
1054	struct mvpp2_bm_pool *pool_long;
1055	struct mvpp2_bm_pool *pool_short;
1056
1057	/* Index of first port's physical RXQ */
1058	u8 first_rxq;
1059
1060	struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1061	unsigned int nqvecs;
1062	bool has_tx_irqs;
1063
1064	u32 tx_time_coal;
1065};
1066
1067/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1068 * layout of the transmit and reception DMA descriptors, and their
1069 * layout is therefore defined by the hardware design
1070 */
1071
1072#define MVPP2_TXD_L3_OFF_SHIFT		0
1073#define MVPP2_TXD_IP_HLEN_SHIFT		8
1074#define MVPP2_TXD_L4_CSUM_FRAG		BIT(13)
1075#define MVPP2_TXD_L4_CSUM_NOT		BIT(14)
1076#define MVPP2_TXD_IP_CSUM_DISABLE	BIT(15)
1077#define MVPP2_TXD_PADDING_DISABLE	BIT(23)
1078#define MVPP2_TXD_L4_UDP		BIT(24)
1079#define MVPP2_TXD_L3_IP6		BIT(26)
1080#define MVPP2_TXD_L_DESC		BIT(28)
1081#define MVPP2_TXD_F_DESC		BIT(29)
1082
1083#define MVPP2_RXD_ERR_SUMMARY		BIT(15)
1084#define MVPP2_RXD_ERR_CODE_MASK		(BIT(13) | BIT(14))
1085#define MVPP2_RXD_ERR_CRC		0x0
1086#define MVPP2_RXD_ERR_OVERRUN		BIT(13)
1087#define MVPP2_RXD_ERR_RESOURCE		(BIT(13) | BIT(14))
1088#define MVPP2_RXD_BM_POOL_ID_OFFS	16
1089#define MVPP2_RXD_BM_POOL_ID_MASK	(BIT(16) | BIT(17) | BIT(18))
1090#define MVPP2_RXD_HWF_SYNC		BIT(21)
1091#define MVPP2_RXD_L4_CSUM_OK		BIT(22)
1092#define MVPP2_RXD_IP4_HEADER_ERR	BIT(24)
1093#define MVPP2_RXD_L4_TCP		BIT(25)
1094#define MVPP2_RXD_L4_UDP		BIT(26)
1095#define MVPP2_RXD_L3_IP4		BIT(28)
1096#define MVPP2_RXD_L3_IP6		BIT(30)
1097#define MVPP2_RXD_BUF_HDR		BIT(31)
1098
1099/* HW TX descriptor for PPv2.1 */
1100struct mvpp21_tx_desc {
1101	u32 command;		/* Options used by HW for packet transmitting.*/
1102	u8  packet_offset;	/* the offset from the buffer beginning	*/
1103	u8  phys_txq;		/* destination queue ID			*/
1104	u16 data_size;		/* data size of transmitted packet in bytes */
1105	u32 buf_dma_addr;	/* physical addr of transmitted buffer	*/
1106	u32 buf_cookie;		/* cookie for access to TX buffer in tx path */
1107	u32 reserved1[3];	/* hw_cmd (for future use, BM, PON, PNC) */
1108	u32 reserved2;		/* reserved (for future use)		*/
1109};
1110
1111/* HW RX descriptor for PPv2.1 */
1112struct mvpp21_rx_desc {
1113	u32 status;		/* info about received packet		*/
1114	u16 reserved1;		/* parser_info (for future use, PnC)	*/
1115	u16 data_size;		/* size of received packet in bytes	*/
1116	u32 buf_dma_addr;	/* physical address of the buffer	*/
1117	u32 buf_cookie;		/* cookie for access to RX buffer in rx path */
1118	u16 reserved2;		/* gem_port_id (for future use, PON)	*/
1119	u16 reserved3;		/* csum_l4 (for future use, PnC)	*/
1120	u8  reserved4;		/* bm_qset (for future use, BM)		*/
1121	u8  reserved5;
1122	u16 reserved6;		/* classify_info (for future use, PnC)	*/
1123	u32 reserved7;		/* flow_id (for future use, PnC) */
1124	u32 reserved8;
1125};
1126
1127/* HW TX descriptor for PPv2.2 */
1128struct mvpp22_tx_desc {
1129	u32 command;
1130	u8  packet_offset;
1131	u8  phys_txq;
1132	u16 data_size;
1133	u64 reserved1;
1134	u64 buf_dma_addr_ptp;
1135	u64 buf_cookie_misc;
1136};
1137
1138/* HW RX descriptor for PPv2.2 */
1139struct mvpp22_rx_desc {
1140	u32 status;
1141	u16 reserved1;
1142	u16 data_size;
1143	u32 reserved2;
1144	u32 reserved3;
1145	u64 buf_dma_addr_key_hash;
1146	u64 buf_cookie_misc;
1147};
1148
1149/* Opaque type used by the driver to manipulate the HW TX and RX
1150 * descriptors
1151 */
1152struct mvpp2_tx_desc {
1153	union {
1154		struct mvpp21_tx_desc pp21;
1155		struct mvpp22_tx_desc pp22;
1156	};
1157};
1158
1159struct mvpp2_rx_desc {
1160	union {
1161		struct mvpp21_rx_desc pp21;
1162		struct mvpp22_rx_desc pp22;
1163	};
1164};
1165
1166struct mvpp2_txq_pcpu_buf {
1167	/* Transmitted SKB */
1168	struct sk_buff *skb;
1169
1170	/* Physical address of transmitted buffer */
1171	dma_addr_t dma;
1172
1173	/* Size transmitted */
1174	size_t size;
1175};
1176
1177/* Per-CPU Tx queue control */
1178struct mvpp2_txq_pcpu {
1179	int cpu;
1180
1181	/* Number of Tx DMA descriptors in the descriptor ring */
1182	int size;
1183
1184	/* Number of currently used Tx DMA descriptor in the
1185	 * descriptor ring
1186	 */
1187	int count;
1188
1189	int wake_threshold;
1190	int stop_threshold;
1191
1192	/* Number of Tx DMA descriptors reserved for each CPU */
1193	int reserved_num;
1194
1195	/* Infos about transmitted buffers */
1196	struct mvpp2_txq_pcpu_buf *buffs;
1197
1198	/* Index of last TX DMA descriptor that was inserted */
1199	int txq_put_index;
1200
1201	/* Index of the TX DMA descriptor to be cleaned up */
1202	int txq_get_index;
1203
1204	/* DMA buffer for TSO headers */
1205	char *tso_headers;
1206	dma_addr_t tso_headers_dma;
1207};
1208
1209struct mvpp2_tx_queue {
1210	/* Physical number of this Tx queue */
1211	u8 id;
1212
1213	/* Logical number of this Tx queue */
1214	u8 log_id;
1215
1216	/* Number of Tx DMA descriptors in the descriptor ring */
1217	int size;
1218
1219	/* Number of currently used Tx DMA descriptor in the descriptor ring */
1220	int count;
1221
1222	/* Per-CPU control of physical Tx queues */
1223	struct mvpp2_txq_pcpu __percpu *pcpu;
1224
1225	u32 done_pkts_coal;
1226
1227	/* Virtual address of thex Tx DMA descriptors array */
1228	struct mvpp2_tx_desc *descs;
1229
1230	/* DMA address of the Tx DMA descriptors array */
1231	dma_addr_t descs_dma;
1232
1233	/* Index of the last Tx DMA descriptor */
1234	int last_desc;
1235
1236	/* Index of the next Tx DMA descriptor to process */
1237	int next_desc_to_proc;
1238};
1239
1240struct mvpp2_rx_queue {
1241	/* RX queue number, in the range 0-31 for physical RXQs */
1242	u8 id;
1243
1244	/* Num of rx descriptors in the rx descriptor ring */
1245	int size;
1246
1247	u32 pkts_coal;
1248	u32 time_coal;
1249
1250	/* Virtual address of the RX DMA descriptors array */
1251	struct mvpp2_rx_desc *descs;
1252
1253	/* DMA address of the RX DMA descriptors array */
1254	dma_addr_t descs_dma;
1255
1256	/* Index of the last RX DMA descriptor */
1257	int last_desc;
1258
1259	/* Index of the next RX DMA descriptor to process */
1260	int next_desc_to_proc;
1261
1262	/* ID of port to which physical RXQ is mapped */
1263	int port;
1264
1265	/* Port's logic RXQ number to which physical RXQ is mapped */
1266	int logic_rxq;
1267};
1268
1269union mvpp2_prs_tcam_entry {
1270	u32 word[MVPP2_PRS_TCAM_WORDS];
1271	u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
1272};
1273
1274union mvpp2_prs_sram_entry {
1275	u32 word[MVPP2_PRS_SRAM_WORDS];
1276	u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
1277};
1278
1279struct mvpp2_prs_entry {
1280	u32 index;
1281	union mvpp2_prs_tcam_entry tcam;
1282	union mvpp2_prs_sram_entry sram;
1283};
1284
1285struct mvpp2_prs_shadow {
1286	bool valid;
1287	bool finish;
1288
1289	/* Lookup ID */
1290	int lu;
1291
1292	/* User defined offset */
1293	int udf;
1294
1295	/* Result info */
1296	u32 ri;
1297	u32 ri_mask;
1298};
1299
1300struct mvpp2_cls_flow_entry {
1301	u32 index;
1302	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1303};
1304
1305struct mvpp2_cls_lookup_entry {
1306	u32 lkpid;
1307	u32 way;
1308	u32 data;
1309};
1310
1311struct mvpp2_bm_pool {
1312	/* Pool number in the range 0-7 */
1313	int id;
1314
1315	/* Buffer Pointers Pool External (BPPE) size */
1316	int size;
1317	/* BPPE size in bytes */
1318	int size_bytes;
1319	/* Number of buffers for this pool */
1320	int buf_num;
1321	/* Pool buffer size */
1322	int buf_size;
1323	/* Packet size */
1324	int pkt_size;
1325	int frag_size;
1326
1327	/* BPPE virtual base address */
1328	u32 *virt_addr;
1329	/* BPPE DMA base address */
1330	dma_addr_t dma_addr;
1331
1332	/* Ports using BM pool */
1333	u32 port_map;
1334};
1335
1336#define IS_TSO_HEADER(txq_pcpu, addr) \
1337	((addr) >= (txq_pcpu)->tso_headers_dma && \
1338	 (addr) < (txq_pcpu)->tso_headers_dma + \
1339	 (txq_pcpu)->size * TSO_HEADER_SIZE)
1340
1341/* Queue modes */
1342#define MVPP2_QDIST_SINGLE_MODE	0
1343#define MVPP2_QDIST_MULTI_MODE	1
1344
1345static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1346
1347module_param(queue_mode, int, 0444);
1348MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1349
1350#define MVPP2_DRIVER_NAME "mvpp2"
1351#define MVPP2_DRIVER_VERSION "1.0"
1352
1353/* Utility/helper methods */
1354
1355static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1356{
1357	writel(data, priv->swth_base[0] + offset);
1358}
1359
1360static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1361{
1362	return readl(priv->swth_base[0] + offset);
1363}
1364
1365static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
1366{
1367	return readl_relaxed(priv->swth_base[0] + offset);
1368}
1369/* These accessors should be used to access:
1370 *
1371 * - per-CPU registers, where each CPU has its own copy of the
1372 *   register.
1373 *
1374 *   MVPP2_BM_VIRT_ALLOC_REG
1375 *   MVPP2_BM_ADDR_HIGH_ALLOC
1376 *   MVPP22_BM_ADDR_HIGH_RLS_REG
1377 *   MVPP2_BM_VIRT_RLS_REG
1378 *   MVPP2_ISR_RX_TX_CAUSE_REG
1379 *   MVPP2_ISR_RX_TX_MASK_REG
1380 *   MVPP2_TXQ_NUM_REG
1381 *   MVPP2_AGGR_TXQ_UPDATE_REG
1382 *   MVPP2_TXQ_RSVD_REQ_REG
1383 *   MVPP2_TXQ_RSVD_RSLT_REG
1384 *   MVPP2_TXQ_SENT_REG
1385 *   MVPP2_RXQ_NUM_REG
1386 *
1387 * - global registers that must be accessed through a specific CPU
1388 *   window, because they are related to an access to a per-CPU
1389 *   register
1390 *
1391 *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
1392 *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
1393 *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
1394 *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
1395 *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
1396 *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
1397 *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
1398 *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
1399 *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
1400 *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
1401 *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
1402 *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
1403 *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
1404 */
1405static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1406			       u32 offset, u32 data)
1407{
1408	writel(data, priv->swth_base[cpu] + offset);
1409}
1410
1411static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1412			     u32 offset)
1413{
1414	return readl(priv->swth_base[cpu] + offset);
1415}
1416
1417static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
1418				       u32 offset, u32 data)
1419{
1420	writel_relaxed(data, priv->swth_base[cpu] + offset);
1421}
1422
1423static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
1424				     u32 offset)
1425{
1426	return readl_relaxed(priv->swth_base[cpu] + offset);
1427}
1428
1429static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1430					    struct mvpp2_tx_desc *tx_desc)
1431{
1432	if (port->priv->hw_version == MVPP21)
1433		return tx_desc->pp21.buf_dma_addr;
1434	else
1435		return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
1436}
1437
1438static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1439				      struct mvpp2_tx_desc *tx_desc,
1440				      dma_addr_t dma_addr)
1441{
1442	dma_addr_t addr, offset;
1443
1444	addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1445	offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1446
1447	if (port->priv->hw_version == MVPP21) {
1448		tx_desc->pp21.buf_dma_addr = addr;
1449		tx_desc->pp21.packet_offset = offset;
1450	} else {
1451		u64 val = (u64)addr;
1452
1453		tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
1454		tx_desc->pp22.buf_dma_addr_ptp |= val;
1455		tx_desc->pp22.packet_offset = offset;
1456	}
1457}
1458
1459static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1460				    struct mvpp2_tx_desc *tx_desc)
1461{
1462	if (port->priv->hw_version == MVPP21)
1463		return tx_desc->pp21.data_size;
1464	else
1465		return tx_desc->pp22.data_size;
1466}
1467
1468static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1469				  struct mvpp2_tx_desc *tx_desc,
1470				  size_t size)
1471{
1472	if (port->priv->hw_version == MVPP21)
1473		tx_desc->pp21.data_size = size;
1474	else
1475		tx_desc->pp22.data_size = size;
1476}
1477
1478static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1479				 struct mvpp2_tx_desc *tx_desc,
1480				 unsigned int txq)
1481{
1482	if (port->priv->hw_version == MVPP21)
1483		tx_desc->pp21.phys_txq = txq;
1484	else
1485		tx_desc->pp22.phys_txq = txq;
1486}
1487
1488static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1489				 struct mvpp2_tx_desc *tx_desc,
1490				 unsigned int command)
1491{
1492	if (port->priv->hw_version == MVPP21)
1493		tx_desc->pp21.command = command;
1494	else
1495		tx_desc->pp22.command = command;
1496}
1497
1498static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1499					    struct mvpp2_tx_desc *tx_desc)
1500{
1501	if (port->priv->hw_version == MVPP21)
1502		return tx_desc->pp21.packet_offset;
1503	else
1504		return tx_desc->pp22.packet_offset;
1505}
1506
1507static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1508					    struct mvpp2_rx_desc *rx_desc)
1509{
1510	if (port->priv->hw_version == MVPP21)
1511		return rx_desc->pp21.buf_dma_addr;
1512	else
1513		return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
1514}
1515
1516static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1517					     struct mvpp2_rx_desc *rx_desc)
1518{
1519	if (port->priv->hw_version == MVPP21)
1520		return rx_desc->pp21.buf_cookie;
1521	else
1522		return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
1523}
1524
1525static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1526				    struct mvpp2_rx_desc *rx_desc)
1527{
1528	if (port->priv->hw_version == MVPP21)
1529		return rx_desc->pp21.data_size;
1530	else
1531		return rx_desc->pp22.data_size;
1532}
1533
1534static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1535				   struct mvpp2_rx_desc *rx_desc)
1536{
1537	if (port->priv->hw_version == MVPP21)
1538		return rx_desc->pp21.status;
1539	else
1540		return rx_desc->pp22.status;
1541}
1542
1543static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1544{
1545	txq_pcpu->txq_get_index++;
1546	if (txq_pcpu->txq_get_index == txq_pcpu->size)
1547		txq_pcpu->txq_get_index = 0;
1548}
1549
1550static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1551			      struct mvpp2_txq_pcpu *txq_pcpu,
1552			      struct sk_buff *skb,
1553			      struct mvpp2_tx_desc *tx_desc)
1554{
1555	struct mvpp2_txq_pcpu_buf *tx_buf =
1556		txq_pcpu->buffs + txq_pcpu->txq_put_index;
1557	tx_buf->skb = skb;
1558	tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1559	tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1560		mvpp2_txdesc_offset_get(port, tx_desc);
1561	txq_pcpu->txq_put_index++;
1562	if (txq_pcpu->txq_put_index == txq_pcpu->size)
1563		txq_pcpu->txq_put_index = 0;
1564}
1565
1566/* Get number of physical egress port */
1567static inline int mvpp2_egress_port(struct mvpp2_port *port)
1568{
1569	return MVPP2_MAX_TCONT + port->id;
1570}
1571
1572/* Get number of physical TXQ */
1573static inline int mvpp2_txq_phys(int port, int txq)
1574{
1575	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1576}
1577
1578/* Parser configuration routines */
1579
1580/* Update parser tcam and sram hw entries */
1581static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1582{
1583	int i;
1584
1585	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1586		return -EINVAL;
1587
1588	/* Clear entry invalidation bit */
1589	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1590
1591	/* Write tcam index - indirect access */
1592	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1593	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1594		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1595
1596	/* Write sram index - indirect access */
1597	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1598	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1599		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1600
1601	return 0;
1602}
1603
1604/* Initialize tcam entry from hw */
1605static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
1606				  struct mvpp2_prs_entry *pe, int tid)
1607{
1608	int i;
1609
1610	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1611		return -EINVAL;
1612
1613	memset(pe, 0, sizeof(*pe));
1614	pe->index = tid;
1615
1616	/* Write tcam index - indirect access */
1617	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1618
1619	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1620			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1621	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1622		return MVPP2_PRS_TCAM_ENTRY_INVALID;
1623
1624	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1625		pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1626
1627	/* Write sram index - indirect access */
1628	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1629	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1630		pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1631
1632	return 0;
1633}
1634
1635/* Invalidate tcam hw entry */
1636static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1637{
1638	/* Write index - indirect access */
1639	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1640	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1641		    MVPP2_PRS_TCAM_INV_MASK);
1642}
1643
1644/* Enable shadow table entry and set its lookup ID */
1645static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1646{
1647	priv->prs_shadow[index].valid = true;
1648	priv->prs_shadow[index].lu = lu;
1649}
1650
1651/* Update ri fields in shadow table entry */
1652static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1653				    unsigned int ri, unsigned int ri_mask)
1654{
1655	priv->prs_shadow[index].ri_mask = ri_mask;
1656	priv->prs_shadow[index].ri = ri;
1657}
1658
1659/* Update lookup field in tcam sw entry */
1660static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1661{
1662	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1663
1664	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1665	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1666}
1667
1668/* Update mask for single port in tcam sw entry */
1669static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1670				    unsigned int port, bool add)
1671{
1672	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1673
1674	if (add)
1675		pe->tcam.byte[enable_off] &= ~(1 << port);
1676	else
1677		pe->tcam.byte[enable_off] |= 1 << port;
1678}
1679
1680/* Update port map in tcam sw entry */
1681static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1682					unsigned int ports)
1683{
1684	unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1685	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1686
1687	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1688	pe->tcam.byte[enable_off] &= ~port_mask;
1689	pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1690}
1691
1692/* Obtain port map from tcam sw entry */
1693static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1694{
1695	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1696
1697	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1698}
1699
1700/* Set byte of data and its enable bits in tcam sw entry */
1701static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1702					 unsigned int offs, unsigned char byte,
1703					 unsigned char enable)
1704{
1705	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1706	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1707}
1708
1709/* Get byte of data and its enable bits from tcam sw entry */
1710static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1711					 unsigned int offs, unsigned char *byte,
1712					 unsigned char *enable)
1713{
1714	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1715	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1716}
1717
1718/* Compare tcam data bytes with a pattern */
1719static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1720				    u16 data)
1721{
1722	int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1723	u16 tcam_data;
1724
1725	tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
1726	if (tcam_data != data)
1727		return false;
1728	return true;
1729}
1730
1731/* Update ai bits in tcam sw entry */
1732static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1733				     unsigned int bits, unsigned int enable)
1734{
1735	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1736
1737	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1738
1739		if (!(enable & BIT(i)))
1740			continue;
1741
1742		if (bits & BIT(i))
1743			pe->tcam.byte[ai_idx] |= 1 << i;
1744		else
1745			pe->tcam.byte[ai_idx] &= ~(1 << i);
1746	}
1747
1748	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1749}
1750
1751/* Get ai bits from tcam sw entry */
1752static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1753{
1754	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1755}
1756
1757/* Set ethertype in tcam sw entry */
1758static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1759				  unsigned short ethertype)
1760{
1761	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1762	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1763}
1764
1765/* Set vid in tcam sw entry */
1766static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1767				unsigned short vid)
1768{
1769	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1770	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1771}
1772
1773/* Set bits in sram sw entry */
1774static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1775				    int val)
1776{
1777	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1778}
1779
1780/* Clear bits in sram sw entry */
1781static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1782				      int val)
1783{
1784	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1785}
1786
1787/* Update ri bits in sram sw entry */
1788static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1789				     unsigned int bits, unsigned int mask)
1790{
1791	unsigned int i;
1792
1793	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1794		int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1795
1796		if (!(mask & BIT(i)))
1797			continue;
1798
1799		if (bits & BIT(i))
1800			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1801		else
1802			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1803
1804		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1805	}
1806}
1807
1808/* Obtain ri bits from sram sw entry */
1809static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1810{
1811	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1812}
1813
1814/* Update ai bits in sram sw entry */
1815static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1816				     unsigned int bits, unsigned int mask)
1817{
1818	unsigned int i;
1819	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1820
1821	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1822
1823		if (!(mask & BIT(i)))
1824			continue;
1825
1826		if (bits & BIT(i))
1827			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1828		else
1829			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1830
1831		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1832	}
1833}
1834
1835/* Read ai bits from sram sw entry */
1836static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1837{
1838	u8 bits;
1839	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1840	int ai_en_off = ai_off + 1;
1841	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1842
1843	bits = (pe->sram.byte[ai_off] >> ai_shift) |
1844	       (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1845
1846	return bits;
1847}
1848
1849/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1850 * lookup interation
1851 */
1852static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1853				       unsigned int lu)
1854{
1855	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1856
1857	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1858				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1859	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1860}
1861
1862/* In the sram sw entry set sign and value of the next lookup offset
1863 * and the offset value generated to the classifier
1864 */
1865static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1866				     unsigned int op)
1867{
1868	/* Set sign */
1869	if (shift < 0) {
1870		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1871		shift = 0 - shift;
1872	} else {
1873		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1874	}
1875
1876	/* Set value */
1877	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1878							   (unsigned char)shift;
1879
1880	/* Reset and set operation */
1881	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1882				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1883	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1884
1885	/* Set base offset as current */
1886	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1887}
1888
1889/* In the sram sw entry set sign and value of the user defined offset
1890 * generated to the classifier
1891 */
1892static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1893				      unsigned int type, int offset,
1894				      unsigned int op)
1895{
1896	/* Set sign */
1897	if (offset < 0) {
1898		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1899		offset = 0 - offset;
1900	} else {
1901		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1902	}
1903
1904	/* Set value */
1905	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1906				  MVPP2_PRS_SRAM_UDF_MASK);
1907	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1908	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1909					MVPP2_PRS_SRAM_UDF_BITS)] &=
1910	      ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1911	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1912					MVPP2_PRS_SRAM_UDF_BITS)] |=
1913				(offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1914
1915	/* Set offset type */
1916	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1917				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1918	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1919
1920	/* Set offset operation */
1921	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1922				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1923	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1924
1925	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1926					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1927					     ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1928				    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1929
1930	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1931					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1932			     (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1933
1934	/* Set base offset as current */
1935	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1936}
1937
1938/* Find parser flow entry */
1939static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1940{
1941	struct mvpp2_prs_entry pe;
1942	int tid;
1943
1944	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1945	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1946		u8 bits;
1947
1948		if (!priv->prs_shadow[tid].valid ||
1949		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1950			continue;
1951
1952		mvpp2_prs_init_from_hw(priv, &pe, tid);
1953		bits = mvpp2_prs_sram_ai_get(&pe);
1954
1955		/* Sram store classification lookup ID in AI bits [5:0] */
1956		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1957			return tid;
1958	}
1959
1960	return -ENOENT;
1961}
1962
1963/* Return first free tcam index, seeking from start to end */
1964static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1965				     unsigned char end)
1966{
1967	int tid;
1968
1969	if (start > end)
1970		swap(start, end);
1971
1972	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1973		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1974
1975	for (tid = start; tid <= end; tid++) {
1976		if (!priv->prs_shadow[tid].valid)
1977			return tid;
1978	}
1979
1980	return -EINVAL;
1981}
1982
1983/* Enable/disable dropping all mac da's */
1984static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1985{
1986	struct mvpp2_prs_entry pe;
1987
1988	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1989		/* Entry exist - update port only */
1990		mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
1991	} else {
1992		/* Entry doesn't exist - create new */
1993		memset(&pe, 0, sizeof(pe));
1994		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1995		pe.index = MVPP2_PE_DROP_ALL;
1996
1997		/* Non-promiscuous mode for all ports - DROP unknown packets */
1998		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1999					 MVPP2_PRS_RI_DROP_MASK);
2000
2001		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2002		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2003
2004		/* Update shadow table */
2005		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2006
2007		/* Mask all ports */
2008		mvpp2_prs_tcam_port_map_set(&pe, 0);
2009	}
2010
2011	/* Update port mask */
2012	mvpp2_prs_tcam_port_set(&pe, port, add);
2013
2014	mvpp2_prs_hw_write(priv, &pe);
2015}
2016
2017/* Set port to unicast or multicast promiscuous mode */
2018static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
2019				      enum mvpp2_prs_l2_cast l2_cast, bool add)
2020{
2021	struct mvpp2_prs_entry pe;
2022	unsigned char cast_match;
2023	unsigned int ri;
2024	int tid;
2025
2026	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
2027		cast_match = MVPP2_PRS_UCAST_VAL;
2028		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
2029		ri = MVPP2_PRS_RI_L2_UCAST;
2030	} else {
2031		cast_match = MVPP2_PRS_MCAST_VAL;
2032		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
2033		ri = MVPP2_PRS_RI_L2_MCAST;
2034	}
2035
2036	/* promiscuous mode - Accept unknown unicast or multicast packets */
2037	if (priv->prs_shadow[tid].valid) {
2038		mvpp2_prs_init_from_hw(priv, &pe, tid);
2039	} else {
2040		memset(&pe, 0, sizeof(pe));
2041		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2042		pe.index = tid;
2043
2044		/* Continue - set next lookup */
2045		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2046
2047		/* Set result info bits */
2048		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
2049
2050		/* Match UC or MC addresses */
2051		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
2052					     MVPP2_PRS_CAST_MASK);
2053
2054		/* Shift to ethertype */
2055		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2056					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2057
2058		/* Mask all ports */
2059		mvpp2_prs_tcam_port_map_set(&pe, 0);
2060
2061		/* Update shadow table */
2062		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2063	}
2064
2065	/* Update port mask */
2066	mvpp2_prs_tcam_port_set(&pe, port, add);
2067
2068	mvpp2_prs_hw_write(priv, &pe);
2069}
2070
2071/* Set entry for dsa packets */
2072static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2073				  bool tagged, bool extend)
2074{
2075	struct mvpp2_prs_entry pe;
2076	int tid, shift;
2077
2078	if (extend) {
2079		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2080		shift = 8;
2081	} else {
2082		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2083		shift = 4;
2084	}
2085
2086	if (priv->prs_shadow[tid].valid) {
2087		/* Entry exist - update port only */
2088		mvpp2_prs_init_from_hw(priv, &pe, tid);
2089	} else {
2090		/* Entry doesn't exist - create new */
2091		memset(&pe, 0, sizeof(pe));
2092		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2093		pe.index = tid;
2094
2095		/* Update shadow table */
2096		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2097
2098		if (tagged) {
2099			/* Set tagged bit in DSA tag */
2100			mvpp2_prs_tcam_data_byte_set(&pe, 0,
2101					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2102					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2103
2104			/* Set ai bits for next iteration */
2105			if (extend)
2106				mvpp2_prs_sram_ai_update(&pe, 1,
2107							MVPP2_PRS_SRAM_AI_MASK);
2108			else
2109				mvpp2_prs_sram_ai_update(&pe, 0,
2110							MVPP2_PRS_SRAM_AI_MASK);
2111
2112			/* If packet is tagged continue check vid filtering */
2113			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
2114		} else {
2115			/* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2116			mvpp2_prs_sram_shift_set(&pe, shift,
2117					MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2118
2119			/* Set result info bits to 'no vlans' */
2120			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2121						 MVPP2_PRS_RI_VLAN_MASK);
2122			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2123		}
2124
2125		/* Mask all ports */
2126		mvpp2_prs_tcam_port_map_set(&pe, 0);
2127	}
2128
2129	/* Update port mask */
2130	mvpp2_prs_tcam_port_set(&pe, port, add);
2131
2132	mvpp2_prs_hw_write(priv, &pe);
2133}
2134
2135/* Set entry for dsa ethertype */
2136static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2137					    bool add, bool tagged, bool extend)
2138{
2139	struct mvpp2_prs_entry pe;
2140	int tid, shift, port_mask;
2141
2142	if (extend) {
2143		tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2144		      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2145		port_mask = 0;
2146		shift = 8;
2147	} else {
2148		tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2149		      MVPP2_PE_ETYPE_DSA_UNTAGGED;
2150		port_mask = MVPP2_PRS_PORT_MASK;
2151		shift = 4;
2152	}
2153
2154	if (priv->prs_shadow[tid].valid) {
2155		/* Entry exist - update port only */
2156		mvpp2_prs_init_from_hw(priv, &pe, tid);
2157	} else {
2158		/* Entry doesn't exist - create new */
2159		memset(&pe, 0, sizeof(pe));
2160		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2161		pe.index = tid;
2162
2163		/* Set ethertype */
2164		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2165		mvpp2_prs_match_etype(&pe, 2, 0);
2166
2167		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2168					 MVPP2_PRS_RI_DSA_MASK);
2169		/* Shift ethertype + 2 byte reserved + tag*/
2170		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2171					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2172
2173		/* Update shadow table */
2174		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2175
2176		if (tagged) {
2177			/* Set tagged bit in DSA tag */
2178			mvpp2_prs_tcam_data_byte_set(&pe,
2179						     MVPP2_ETH_TYPE_LEN + 2 + 3,
2180						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2181						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2182			/* Clear all ai bits for next iteration */
2183			mvpp2_prs_sram_ai_update(&pe, 0,
2184						 MVPP2_PRS_SRAM_AI_MASK);
2185			/* If packet is tagged continue check vlans */
2186			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2187		} else {
2188			/* Set result info bits to 'no vlans' */
2189			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2190						 MVPP2_PRS_RI_VLAN_MASK);
2191			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2192		}
2193		/* Mask/unmask all ports, depending on dsa type */
2194		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2195	}
2196
2197	/* Update port mask */
2198	mvpp2_prs_tcam_port_set(&pe, port, add);
2199
2200	mvpp2_prs_hw_write(priv, &pe);
2201}
2202
2203/* Search for existing single/triple vlan entry */
2204static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
2205{
2206	struct mvpp2_prs_entry pe;
2207	int tid;
2208
2209	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
2210	for (tid = MVPP2_PE_FIRST_FREE_TID;
2211	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2212		unsigned int ri_bits, ai_bits;
2213		bool match;
2214
2215		if (!priv->prs_shadow[tid].valid ||
2216		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2217			continue;
2218
2219		mvpp2_prs_init_from_hw(priv, &pe, tid);
2220		match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
2221		if (!match)
2222			continue;
2223
2224		/* Get vlan type */
2225		ri_bits = mvpp2_prs_sram_ri_get(&pe);
2226		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2227
2228		/* Get current ai value from tcam */
2229		ai_bits = mvpp2_prs_tcam_ai_get(&pe);
2230		/* Clear double vlan bit */
2231		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2232
2233		if (ai != ai_bits)
2234			continue;
2235
2236		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2237		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2238			return tid;
2239	}
2240
2241	return -ENOENT;
2242}
2243
2244/* Add/update single/triple vlan entry */
2245static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2246			      unsigned int port_map)
2247{
2248	struct mvpp2_prs_entry pe;
2249	int tid_aux, tid;
2250	int ret = 0;
2251
2252	memset(&pe, 0, sizeof(pe));
2253
2254	tid = mvpp2_prs_vlan_find(priv, tpid, ai);
2255
2256	if (tid < 0) {
2257		/* Create new tcam entry */
2258		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2259						MVPP2_PE_FIRST_FREE_TID);
2260		if (tid < 0)
2261			return tid;
2262
2263		/* Get last double vlan tid */
2264		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2265		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2266			unsigned int ri_bits;
2267
2268			if (!priv->prs_shadow[tid_aux].valid ||
2269			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2270				continue;
2271
2272			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
2273			ri_bits = mvpp2_prs_sram_ri_get(&pe);
2274			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2275			    MVPP2_PRS_RI_VLAN_DOUBLE)
2276				break;
2277		}
2278
2279		if (tid <= tid_aux)
2280			return -EINVAL;
2281
2282		memset(&pe, 0, sizeof(pe));
2283		pe.index = tid;
2284		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2285
2286		mvpp2_prs_match_etype(&pe, 0, tpid);
2287
2288		/* VLAN tag detected, proceed with VID filtering */
2289		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
2290
2291		/* Clear all ai bits for next iteration */
2292		mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2293
2294		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2295			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
2296						 MVPP2_PRS_RI_VLAN_MASK);
2297		} else {
2298			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2299			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2300						 MVPP2_PRS_RI_VLAN_MASK);
2301		}
2302		mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2303
2304		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2305	} else {
2306		mvpp2_prs_init_from_hw(priv, &pe, tid);
2307	}
2308	/* Update ports' mask */
2309	mvpp2_prs_tcam_port_map_set(&pe, port_map);
2310
2311	mvpp2_prs_hw_write(priv, &pe);
2312
2313	return ret;
2314}
2315
2316/* Get first free double vlan ai number */
2317static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2318{
2319	int i;
2320
2321	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2322		if (!priv->prs_double_vlans[i])
2323			return i;
2324	}
2325
2326	return -EINVAL;
2327}
2328
2329/* Search for existing double vlan entry */
2330static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
2331				      unsigned short tpid2)
2332{
2333	struct mvpp2_prs_entry pe;
2334	int tid;
2335
2336	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
2337	for (tid = MVPP2_PE_FIRST_FREE_TID;
2338	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2339		unsigned int ri_mask;
2340		bool match;
2341
2342		if (!priv->prs_shadow[tid].valid ||
2343		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2344			continue;
2345
2346		mvpp2_prs_init_from_hw(priv, &pe, tid);
2347
2348		match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
2349			mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
2350
2351		if (!match)
2352			continue;
2353
2354		ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
2355		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2356			return tid;
2357	}
2358
2359	return -ENOENT;
2360}
2361
2362/* Add or update double vlan entry */
2363static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2364				     unsigned short tpid2,
2365				     unsigned int port_map)
2366{
2367	int tid_aux, tid, ai, ret = 0;
2368	struct mvpp2_prs_entry pe;
2369
2370	memset(&pe, 0, sizeof(pe));
2371
2372	tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2373
2374	if (tid < 0) {
2375		/* Create new tcam entry */
2376		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2377				MVPP2_PE_LAST_FREE_TID);
2378		if (tid < 0)
2379			return tid;
2380
2381		/* Set ai value for new double vlan entry */
2382		ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2383		if (ai < 0)
2384			return ai;
2385
2386		/* Get first single/triple vlan tid */
2387		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2388		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2389			unsigned int ri_bits;
2390
2391			if (!priv->prs_shadow[tid_aux].valid ||
2392			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2393				continue;
2394
2395			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
2396			ri_bits = mvpp2_prs_sram_ri_get(&pe);
2397			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2398			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2399			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2400				break;
2401		}
2402
2403		if (tid >= tid_aux)
2404			return -ERANGE;
2405
2406		memset(&pe, 0, sizeof(pe));
2407		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2408		pe.index = tid;
2409
2410		priv->prs_double_vlans[ai] = true;
2411
2412		mvpp2_prs_match_etype(&pe, 0, tpid1);
2413		mvpp2_prs_match_etype(&pe, 4, tpid2);
2414
2415		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2416		/* Shift 4 bytes - skip outer vlan tag */
2417		mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2418					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2419		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2420					 MVPP2_PRS_RI_VLAN_MASK);
2421		mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2422					 MVPP2_PRS_SRAM_AI_MASK);
2423
2424		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2425	} else {
2426		mvpp2_prs_init_from_hw(priv, &pe, tid);
2427	}
2428
2429	/* Update ports' mask */
2430	mvpp2_prs_tcam_port_map_set(&pe, port_map);
2431	mvpp2_prs_hw_write(priv, &pe);
2432
2433	return ret;
2434}
2435
2436/* IPv4 header parsing for fragmentation and L4 offset */
2437static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2438			       unsigned int ri, unsigned int ri_mask)
2439{
2440	struct mvpp2_prs_entry pe;
2441	int tid;
2442
2443	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2444	    (proto != IPPROTO_IGMP))
2445		return -EINVAL;
2446
2447	/* Not fragmented packet */
2448	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2449					MVPP2_PE_LAST_FREE_TID);
2450	if (tid < 0)
2451		return tid;
2452
2453	memset(&pe, 0, sizeof(pe));
2454	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2455	pe.index = tid;
2456
2457	/* Set next lu to IPv4 */
2458	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2459	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2460	/* Set L4 offset */
2461	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2462				  sizeof(struct iphdr) - 4,
2463				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2464	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2465				 MVPP2_PRS_IPV4_DIP_AI_BIT);
2466	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2467
2468	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2469				     MVPP2_PRS_TCAM_PROTO_MASK_L);
2470	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2471				     MVPP2_PRS_TCAM_PROTO_MASK);
2472
2473	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2474	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2475	/* Unmask all ports */
2476	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2477
2478	/* Update shadow table and hw entry */
2479	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2480	mvpp2_prs_hw_write(priv, &pe);
2481
2482	/* Fragmented packet */
2483	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2484					MVPP2_PE_LAST_FREE_TID);
2485	if (tid < 0)
2486		return tid;
2487
2488	pe.index = tid;
2489	/* Clear ri before updating */
2490	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2491	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2492	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2493
2494	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2495				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2496
2497	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2498	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
2499
2500	/* Update shadow table and hw entry */
2501	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2502	mvpp2_prs_hw_write(priv, &pe);
2503
2504	return 0;
2505}
2506
2507/* IPv4 L3 multicast or broadcast */
2508static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2509{
2510	struct mvpp2_prs_entry pe;
2511	int mask, tid;
2512
2513	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2514					MVPP2_PE_LAST_FREE_TID);
2515	if (tid < 0)
2516		return tid;
2517
2518	memset(&pe, 0, sizeof(pe));
2519	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2520	pe.index = tid;
2521
2522	switch (l3_cast) {
2523	case MVPP2_PRS_L3_MULTI_CAST:
2524		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2525					     MVPP2_PRS_IPV4_MC_MASK);
2526		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2527					 MVPP2_PRS_RI_L3_ADDR_MASK);
2528		break;
2529	case  MVPP2_PRS_L3_BROAD_CAST:
2530		mask = MVPP2_PRS_IPV4_BC_MASK;
2531		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2532		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2533		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2534		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2535		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2536					 MVPP2_PRS_RI_L3_ADDR_MASK);
2537		break;
2538	default:
2539		return -EINVAL;
2540	}
2541
2542	/* Finished: go to flowid generation */
2543	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2544	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2545
2546	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2547				 MVPP2_PRS_IPV4_DIP_AI_BIT);
2548	/* Unmask all ports */
2549	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2550
2551	/* Update shadow table and hw entry */
2552	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2553	mvpp2_prs_hw_write(priv, &pe);
2554
2555	return 0;
2556}
2557
2558/* Set entries for protocols over IPv6  */
2559static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2560			       unsigned int ri, unsigned int ri_mask)
2561{
2562	struct mvpp2_prs_entry pe;
2563	int tid;
2564
2565	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2566	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2567		return -EINVAL;
2568
2569	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2570					MVPP2_PE_LAST_FREE_TID);
2571	if (tid < 0)
2572		return tid;
2573
2574	memset(&pe, 0, sizeof(pe));
2575	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2576	pe.index = tid;
2577
2578	/* Finished: go to flowid generation */
2579	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2580	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2581	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2582	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2583				  sizeof(struct ipv6hdr) - 6,
2584				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2585
2586	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2587	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2588				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2589	/* Unmask all ports */
2590	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2591
2592	/* Write HW */
2593	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2594	mvpp2_prs_hw_write(priv, &pe);
2595
2596	return 0;
2597}
2598
2599/* IPv6 L3 multicast entry */
2600static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2601{
2602	struct mvpp2_prs_entry pe;
2603	int tid;
2604
2605	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2606		return -EINVAL;
2607
2608	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2609					MVPP2_PE_LAST_FREE_TID);
2610	if (tid < 0)
2611		return tid;
2612
2613	memset(&pe, 0, sizeof(pe));
2614	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2615	pe.index = tid;
2616
2617	/* Finished: go to flowid generation */
2618	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2619	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2620				 MVPP2_PRS_RI_L3_ADDR_MASK);
2621	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2622				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2623	/* Shift back to IPv6 NH */
2624	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2625
2626	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2627				     MVPP2_PRS_IPV6_MC_MASK);
2628	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2629	/* Unmask all ports */
2630	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2631
2632	/* Update shadow table and hw entry */
2633	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2634	mvpp2_prs_hw_write(priv, &pe);
2635
2636	return 0;
2637}
2638
2639/* Parser per-port initialization */
2640static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2641				   int lu_max, int offset)
2642{
2643	u32 val;
2644
2645	/* Set lookup ID */
2646	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2647	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2648	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2649	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2650
2651	/* Set maximum number of loops for packet received from port */
2652	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2653	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2654	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2655	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2656
2657	/* Set initial offset for packet header extraction for the first
2658	 * searching loop
2659	 */
2660	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2661	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2662	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2663	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2664}
2665
2666/* Default flow entries initialization for all ports */
2667static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2668{
2669	struct mvpp2_prs_entry pe;
2670	int port;
2671
2672	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2673		memset(&pe, 0, sizeof(pe));
2674		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2675		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2676
2677		/* Mask all ports */
2678		mvpp2_prs_tcam_port_map_set(&pe, 0);
2679
2680		/* Set flow ID*/
2681		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2682		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2683
2684		/* Update shadow table and hw entry */
2685		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2686		mvpp2_prs_hw_write(priv, &pe);
2687	}
2688}
2689
2690/* Set default entry for Marvell Header field */
2691static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2692{
2693	struct mvpp2_prs_entry pe;
2694
2695	memset(&pe, 0, sizeof(pe));
2696
2697	pe.index = MVPP2_PE_MH_DEFAULT;
2698	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2699	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2700				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2701	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2702
2703	/* Unmask all ports */
2704	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2705
2706	/* Update shadow table and hw entry */
2707	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2708	mvpp2_prs_hw_write(priv, &pe);
2709}
2710
2711/* Set default entires (place holder) for promiscuous, non-promiscuous and
2712 * multicast MAC addresses
2713 */
2714static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2715{
2716	struct mvpp2_prs_entry pe;
2717
2718	memset(&pe, 0, sizeof(pe));
2719
2720	/* Non-promiscuous mode for all ports - DROP unknown packets */
2721	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2722	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2723
2724	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2725				 MVPP2_PRS_RI_DROP_MASK);
2726	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2727	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2728
2729	/* Unmask all ports */
2730	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2731
2732	/* Update shadow table and hw entry */
2733	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2734	mvpp2_prs_hw_write(priv, &pe);
2735
2736	/* Create dummy entries for drop all and promiscuous modes */
2737	mvpp2_prs_mac_drop_all_set(priv, 0, false);
2738	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
2739	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
2740}
2741
2742/* Set default entries for various types of dsa packets */
2743static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2744{
2745	struct mvpp2_prs_entry pe;
2746
2747	/* None tagged EDSA entry - place holder */
2748	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2749			      MVPP2_PRS_EDSA);
2750
2751	/* Tagged EDSA entry - place holder */
2752	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2753
2754	/* None tagged DSA entry - place holder */
2755	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2756			      MVPP2_PRS_DSA);
2757
2758	/* Tagged DSA entry - place holder */
2759	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2760
2761	/* None tagged EDSA ethertype entry - place holder*/
2762	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2763					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2764
2765	/* Tagged EDSA ethertype entry - place holder*/
2766	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2767					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2768
2769	/* None tagged DSA ethertype entry */
2770	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2771					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2772
2773	/* Tagged DSA ethertype entry */
2774	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2775					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2776
2777	/* Set default entry, in case DSA or EDSA tag not found */
2778	memset(&pe, 0, sizeof(pe));
2779	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2780	pe.index = MVPP2_PE_DSA_DEFAULT;
2781	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2782
2783	/* Shift 0 bytes */
2784	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2785	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2786
2787	/* Clear all sram ai bits for next iteration */
2788	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2789
2790	/* Unmask all ports */
2791	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2792
2793	mvpp2_prs_hw_write(priv, &pe);
2794}
2795
2796/* Initialize parser entries for VID filtering */
2797static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2798{
2799	struct mvpp2_prs_entry pe;
2800
2801	memset(&pe, 0, sizeof(pe));
2802
2803	/* Set default vid entry */
2804	pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2805	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2806
2807	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2808
2809	/* Skip VLAN header - Set offset to 4 bytes */
2810	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2811				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2812
2813	/* Clear all ai bits for next iteration */
2814	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2815
2816	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2817
2818	/* Unmask all ports */
2819	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2820
2821	/* Update shadow table and hw entry */
2822	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2823	mvpp2_prs_hw_write(priv, &pe);
2824
2825	/* Set default vid entry for extended DSA*/
2826	memset(&pe, 0, sizeof(pe));
2827
2828	/* Set default vid entry */
2829	pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2830	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2831
2832	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2833				 MVPP2_PRS_EDSA_VID_AI_BIT);
2834
2835	/* Skip VLAN header - Set offset to 8 bytes */
2836	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2837				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2838
2839	/* Clear all ai bits for next iteration */
2840	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2841
2842	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2843
2844	/* Unmask all ports */
2845	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2846
2847	/* Update shadow table and hw entry */
2848	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2849	mvpp2_prs_hw_write(priv, &pe);
2850}
2851
2852/* Match basic ethertypes */
2853static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2854{
2855	struct mvpp2_prs_entry pe;
2856	int tid;
2857
2858	/* Ethertype: PPPoE */
2859	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2860					MVPP2_PE_LAST_FREE_TID);
2861	if (tid < 0)
2862		return tid;
2863
2864	memset(&pe, 0, sizeof(pe));
2865	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2866	pe.index = tid;
2867
2868	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2869
2870	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2871				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2872	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2873	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2874				 MVPP2_PRS_RI_PPPOE_MASK);
2875
2876	/* Update shadow table and hw entry */
2877	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2878	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2879	priv->prs_shadow[pe.index].finish = false;
2880	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2881				MVPP2_PRS_RI_PPPOE_MASK);
2882	mvpp2_prs_hw_write(priv, &pe);
2883
2884	/* Ethertype: ARP */
2885	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2886					MVPP2_PE_LAST_FREE_TID);
2887	if (tid < 0)
2888		return tid;
2889
2890	memset(&pe, 0, sizeof(pe));
2891	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2892	pe.index = tid;
2893
2894	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2895
2896	/* Generate flow in the next iteration*/
2897	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2898	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2899	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2900				 MVPP2_PRS_RI_L3_PROTO_MASK);
2901	/* Set L3 offset */
2902	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2903				  MVPP2_ETH_TYPE_LEN,
2904				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2905
2906	/* Update shadow table and hw entry */
2907	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2908	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2909	priv->prs_shadow[pe.index].finish = true;
2910	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2911				MVPP2_PRS_RI_L3_PROTO_MASK);
2912	mvpp2_prs_hw_write(priv, &pe);
2913
2914	/* Ethertype: LBTD */
2915	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2916					MVPP2_PE_LAST_FREE_TID);
2917	if (tid < 0)
2918		return tid;
2919
2920	memset(&pe, 0, sizeof(pe));
2921	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2922	pe.index = tid;
2923
2924	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2925
2926	/* Generate flow in the next iteration*/
2927	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2928	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2929	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2930				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2931				 MVPP2_PRS_RI_CPU_CODE_MASK |
2932				 MVPP2_PRS_RI_UDF3_MASK);
2933	/* Set L3 offset */
2934	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2935				  MVPP2_ETH_TYPE_LEN,
2936				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2937
2938	/* Update shadow table and hw entry */
2939	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2940	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2941	priv->prs_shadow[pe.index].finish = true;
2942	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2943				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2944				MVPP2_PRS_RI_CPU_CODE_MASK |
2945				MVPP2_PRS_RI_UDF3_MASK);
2946	mvpp2_prs_hw_write(priv, &pe);
2947
2948	/* Ethertype: IPv4 without options */
2949	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2950					MVPP2_PE_LAST_FREE_TID);
2951	if (tid < 0)
2952		return tid;
2953
2954	memset(&pe, 0, sizeof(pe));
2955	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2956	pe.index = tid;
2957
2958	mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2959	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2960				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2961				     MVPP2_PRS_IPV4_HEAD_MASK |
2962				     MVPP2_PRS_IPV4_IHL_MASK);
2963
2964	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2965	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2966				 MVPP2_PRS_RI_L3_PROTO_MASK);
2967	/* Skip eth_type + 4 bytes of IP header */
2968	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2969				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2970	/* Set L3 offset */
2971	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2972				  MVPP2_ETH_TYPE_LEN,
2973				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2974
2975	/* Update shadow table and hw entry */
2976	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2977	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2978	priv->prs_shadow[pe.index].finish = false;
2979	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2980				MVPP2_PRS_RI_L3_PROTO_MASK);
2981	mvpp2_prs_hw_write(priv, &pe);
2982
2983	/* Ethertype: IPv4 with options */
2984	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2985					MVPP2_PE_LAST_FREE_TID);
2986	if (tid < 0)
2987		return tid;
2988
2989	pe.index = tid;
2990
2991	/* Clear tcam data before updating */
2992	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2993	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2994
2995	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2996				     MVPP2_PRS_IPV4_HEAD,
2997				     MVPP2_PRS_IPV4_HEAD_MASK);
2998
2999	/* Clear ri before updating */
3000	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3001	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3002	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3003				 MVPP2_PRS_RI_L3_PROTO_MASK);
3004
3005	/* Update shadow table and hw entry */
3006	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3007	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3008	priv->prs_shadow[pe.index].finish = false;
3009	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3010				MVPP2_PRS_RI_L3_PROTO_MASK);
3011	mvpp2_prs_hw_write(priv, &pe);
3012
3013	/* Ethertype: IPv6 without options */
3014	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3015					MVPP2_PE_LAST_FREE_TID);
3016	if (tid < 0)
3017		return tid;
3018
3019	memset(&pe, 0, sizeof(pe));
3020	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3021	pe.index = tid;
3022
3023	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3024
3025	/* Skip DIP of IPV6 header */
3026	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3027				 MVPP2_MAX_L3_ADDR_SIZE,
3028				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3029	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3030	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3031				 MVPP2_PRS_RI_L3_PROTO_MASK);
3032	/* Set L3 offset */
3033	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3034				  MVPP2_ETH_TYPE_LEN,
3035				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3036
3037	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3038	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3039	priv->prs_shadow[pe.index].finish = false;
3040	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3041				MVPP2_PRS_RI_L3_PROTO_MASK);
3042	mvpp2_prs_hw_write(priv, &pe);
3043
3044	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3045	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3046	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3047	pe.index = MVPP2_PE_ETH_TYPE_UN;
3048
3049	/* Unmask all ports */
3050	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3051
3052	/* Generate flow in the next iteration*/
3053	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3054	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3055	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3056				 MVPP2_PRS_RI_L3_PROTO_MASK);
3057	/* Set L3 offset even it's unknown L3 */
3058	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3059				  MVPP2_ETH_TYPE_LEN,
3060				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3061
3062	/* Update shadow table and hw entry */
3063	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3064	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3065	priv->prs_shadow[pe.index].finish = true;
3066	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3067				MVPP2_PRS_RI_L3_PROTO_MASK);
3068	mvpp2_prs_hw_write(priv, &pe);
3069
3070	return 0;
3071}
3072
3073/* Configure vlan entries and detect up to 2 successive VLAN tags.
3074 * Possible options:
3075 * 0x8100, 0x88A8
3076 * 0x8100, 0x8100
3077 * 0x8100
3078 * 0x88A8
3079 */
3080static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3081{
3082	struct mvpp2_prs_entry pe;
3083	int err;
3084
3085	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3086					      MVPP2_PRS_DBL_VLANS_MAX,
3087					      GFP_KERNEL);
3088	if (!priv->prs_double_vlans)
3089		return -ENOMEM;
3090
3091	/* Double VLAN: 0x8100, 0x88A8 */
3092	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3093					MVPP2_PRS_PORT_MASK);
3094	if (err)
3095		return err;
3096
3097	/* Double VLAN: 0x8100, 0x8100 */
3098	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3099					MVPP2_PRS_PORT_MASK);
3100	if (err)
3101		return err;
3102
3103	/* Single VLAN: 0x88a8 */
3104	err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3105				 MVPP2_PRS_PORT_MASK);
3106	if (err)
3107		return err;
3108
3109	/* Single VLAN: 0x8100 */
3110	err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3111				 MVPP2_PRS_PORT_MASK);
3112	if (err)
3113		return err;
3114
3115	/* Set default double vlan entry */
3116	memset(&pe, 0, sizeof(pe));
3117	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3118	pe.index = MVPP2_PE_VLAN_DBL;
3119
3120	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3121
3122	/* Clear ai for next iterations */
3123	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3124	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3125				 MVPP2_PRS_RI_VLAN_MASK);
3126
3127	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3128				 MVPP2_PRS_DBL_VLAN_AI_BIT);
3129	/* Unmask all ports */
3130	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3131
3132	/* Update shadow table and hw entry */
3133	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3134	mvpp2_prs_hw_write(priv, &pe);
3135
3136	/* Set default vlan none entry */
3137	memset(&pe, 0, sizeof(pe));
3138	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3139	pe.index = MVPP2_PE_VLAN_NONE;
3140
3141	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3142	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3143				 MVPP2_PRS_RI_VLAN_MASK);
3144
3145	/* Unmask all ports */
3146	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3147
3148	/* Update shadow table and hw entry */
3149	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3150	mvpp2_prs_hw_write(priv, &pe);
3151
3152	return 0;
3153}
3154
3155/* Set entries for PPPoE ethertype */
3156static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3157{
3158	struct mvpp2_prs_entry pe;
3159	int tid;
3160
3161	/* IPv4 over PPPoE with options */
3162	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3163					MVPP2_PE_LAST_FREE_TID);
3164	if (tid < 0)
3165		return tid;
3166
3167	memset(&pe, 0, sizeof(pe));
3168	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3169	pe.index = tid;
3170
3171	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3172
3173	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3174	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3175				 MVPP2_PRS_RI_L3_PROTO_MASK);
3176	/* Skip eth_type + 4 bytes of IP header */
3177	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3178				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3179	/* Set L3 offset */
3180	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3181				  MVPP2_ETH_TYPE_LEN,
3182				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3183
3184	/* Update shadow table and hw entry */
3185	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3186	mvpp2_prs_hw_write(priv, &pe);
3187
3188	/* IPv4 over PPPoE without options */
3189	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3190					MVPP2_PE_LAST_FREE_TID);
3191	if (tid < 0)
3192		return tid;
3193
3194	pe.index = tid;
3195
3196	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3197				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3198				     MVPP2_PRS_IPV4_HEAD_MASK |
3199				     MVPP2_PRS_IPV4_IHL_MASK);
3200
3201	/* Clear ri before updating */
3202	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3203	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3204	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3205				 MVPP2_PRS_RI_L3_PROTO_MASK);
3206
3207	/* Update shadow table and hw entry */
3208	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3209	mvpp2_prs_hw_write(priv, &pe);
3210
3211	/* IPv6 over PPPoE */
3212	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3213					MVPP2_PE_LAST_FREE_TID);
3214	if (tid < 0)
3215		return tid;
3216
3217	memset(&pe, 0, sizeof(pe));
3218	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3219	pe.index = tid;
3220
3221	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3222
3223	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3224	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3225				 MVPP2_PRS_RI_L3_PROTO_MASK);
3226	/* Skip eth_type + 4 bytes of IPv6 header */
3227	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3228				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3229	/* Set L3 offset */
3230	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3231				  MVPP2_ETH_TYPE_LEN,
3232				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3233
3234	/* Update shadow table and hw entry */
3235	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3236	mvpp2_prs_hw_write(priv, &pe);
3237
3238	/* Non-IP over PPPoE */
3239	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3240					MVPP2_PE_LAST_FREE_TID);
3241	if (tid < 0)
3242		return tid;
3243
3244	memset(&pe, 0, sizeof(pe));
3245	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3246	pe.index = tid;
3247
3248	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3249				 MVPP2_PRS_RI_L3_PROTO_MASK);
3250
3251	/* Finished: go to flowid generation */
3252	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3253	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3254	/* Set L3 offset even if it's unknown L3 */
3255	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3256				  MVPP2_ETH_TYPE_LEN,
3257				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3258
3259	/* Update shadow table and hw entry */
3260	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3261	mvpp2_prs_hw_write(priv, &pe);
3262
3263	return 0;
3264}
3265
3266/* Initialize entries for IPv4 */
3267static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3268{
3269	struct mvpp2_prs_entry pe;
3270	int err;
3271
3272	/* Set entries for TCP, UDP and IGMP over IPv4 */
3273	err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3274				  MVPP2_PRS_RI_L4_PROTO_MASK);
3275	if (err)
3276		return err;
3277
3278	err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3279				  MVPP2_PRS_RI_L4_PROTO_MASK);
3280	if (err)
3281		return err;
3282
3283	err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3284				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3285				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3286				  MVPP2_PRS_RI_CPU_CODE_MASK |
3287				  MVPP2_PRS_RI_UDF3_MASK);
3288	if (err)
3289		return err;
3290
3291	/* IPv4 Broadcast */
3292	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3293	if (err)
3294		return err;
3295
3296	/* IPv4 Multicast */
3297	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3298	if (err)
3299		return err;
3300
3301	/* Default IPv4 entry for unknown protocols */
3302	memset(&pe, 0, sizeof(pe));
3303	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3304	pe.index = MVPP2_PE_IP4_PROTO_UN;
3305
3306	/* Set next lu to IPv4 */
3307	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3308	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3309	/* Set L4 offset */
3310	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3311				  sizeof(struct iphdr) - 4,
3312				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3313	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3314				 MVPP2_PRS_IPV4_DIP_AI_BIT);
3315	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3316				 MVPP2_PRS_RI_L4_PROTO_MASK);
3317
3318	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3319	/* Unmask all ports */
3320	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3321
3322	/* Update shadow table and hw entry */
3323	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3324	mvpp2_prs_hw_write(priv, &pe);
3325
3326	/* Default IPv4 entry for unicast address */
3327	memset(&pe, 0, sizeof(pe));
3328	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3329	pe.index = MVPP2_PE_IP4_ADDR_UN;
3330
3331	/* Finished: go to flowid generation */
3332	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3333	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3334	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3335				 MVPP2_PRS_RI_L3_ADDR_MASK);
3336
3337	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3338				 MVPP2_PRS_IPV4_DIP_AI_BIT);
3339	/* Unmask all ports */
3340	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3341
3342	/* Update shadow table and hw entry */
3343	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3344	mvpp2_prs_hw_write(priv, &pe);
3345
3346	return 0;
3347}
3348
3349/* Initialize entries for IPv6 */
3350static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3351{
3352	struct mvpp2_prs_entry pe;
3353	int tid, err;
3354
3355	/* Set entries for TCP, UDP and ICMP over IPv6 */
3356	err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3357				  MVPP2_PRS_RI_L4_TCP,
3358				  MVPP2_PRS_RI_L4_PROTO_MASK);
3359	if (err)
3360		return err;
3361
3362	err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3363				  MVPP2_PRS_RI_L4_UDP,
3364				  MVPP2_PRS_RI_L4_PROTO_MASK);
3365	if (err)
3366		return err;
3367
3368	err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3369				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3370				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3371				  MVPP2_PRS_RI_CPU_CODE_MASK |
3372				  MVPP2_PRS_RI_UDF3_MASK);
3373	if (err)
3374		return err;
3375
3376	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3377	/* Result Info: UDF7=1, DS lite */
3378	err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3379				  MVPP2_PRS_RI_UDF7_IP6_LITE,
3380				  MVPP2_PRS_RI_UDF7_MASK);
3381	if (err)
3382		return err;
3383
3384	/* IPv6 multicast */
3385	err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3386	if (err)
3387		return err;
3388
3389	/* Entry for checking hop limit */
3390	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3391					MVPP2_PE_LAST_FREE_TID);
3392	if (tid < 0)
3393		return tid;
3394
3395	memset(&pe, 0, sizeof(pe));
3396	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3397	pe.index = tid;
3398
3399	/* Finished: go to flowid generation */
3400	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3401	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3402	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3403				 MVPP2_PRS_RI_DROP_MASK,
3404				 MVPP2_PRS_RI_L3_PROTO_MASK |
3405				 MVPP2_PRS_RI_DROP_MASK);
3406
3407	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3408	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3409				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3410
3411	/* Update shadow table and hw entry */
3412	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3413	mvpp2_prs_hw_write(priv, &pe);
3414
3415	/* Default IPv6 entry for unknown protocols */
3416	memset(&pe, 0, sizeof(pe));
3417	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3418	pe.index = MVPP2_PE_IP6_PROTO_UN;
3419
3420	/* Finished: go to flowid generation */
3421	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3422	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3423	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3424				 MVPP2_PRS_RI_L4_PROTO_MASK);
3425	/* Set L4 offset relatively to our current place */
3426	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3427				  sizeof(struct ipv6hdr) - 4,
3428				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3429
3430	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3431				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3432	/* Unmask all ports */
3433	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3434
3435	/* Update shadow table and hw entry */
3436	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3437	mvpp2_prs_hw_write(priv, &pe);
3438
3439	/* Default IPv6 entry for unknown ext protocols */
3440	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3441	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3442	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3443
3444	/* Finished: go to flowid generation */
3445	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3446	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3447	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3448				 MVPP2_PRS_RI_L4_PROTO_MASK);
3449
3450	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3451				 MVPP2_PRS_IPV6_EXT_AI_BIT);
3452	/* Unmask all ports */
3453	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3454
3455	/* Update shadow table and hw entry */
3456	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3457	mvpp2_prs_hw_write(priv, &pe);
3458
3459	/* Default IPv6 entry for unicast address */
3460	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3461	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3462	pe.index = MVPP2_PE_IP6_ADDR_UN;
3463
3464	/* Finished: go to IPv6 again */
3465	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3466	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3467				 MVPP2_PRS_RI_L3_ADDR_MASK);
3468	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3469				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3470	/* Shift back to IPV6 NH */
3471	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3472
3473	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3474	/* Unmask all ports */
3475	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3476
3477	/* Update shadow table and hw entry */
3478	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3479	mvpp2_prs_hw_write(priv, &pe);
3480
3481	return 0;
3482}
3483
3484/* Find tcam entry with matched pair <vid,port> */
3485static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3486				    u16 mask)
3487{
3488	unsigned char byte[2], enable[2];
3489	struct mvpp2_prs_entry pe;
3490	u16 rvid, rmask;
3491	int tid;
3492
3493	/* Go through the all entries with MVPP2_PRS_LU_VID */
3494	for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3495	     tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3496		if (!priv->prs_shadow[tid].valid ||
3497		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3498			continue;
3499
3500		mvpp2_prs_init_from_hw(priv, &pe, tid);
3501
3502		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3503		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3504
3505		rvid = ((byte[0] & 0xf) << 8) + byte[1];
3506		rmask = ((enable[0] & 0xf) << 8) + enable[1];
3507
3508		if (rvid != vid || rmask != mask)
3509			continue;
3510
3511		return tid;
3512	}
3513
3514	return -ENOENT;
3515}
3516
3517/* Write parser entry for VID filtering */
3518static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3519{
3520	unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3521				 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3522	unsigned int mask = 0xfff, reg_val, shift;
3523	struct mvpp2 *priv = port->priv;
3524	struct mvpp2_prs_entry pe;
3525	int tid;
3526
3527	memset(&pe, 0, sizeof(pe));
3528
3529	/* Scan TCAM and see if entry with this <vid,port> already exist */
3530	tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3531
3532	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3533	if (reg_val & MVPP2_DSA_EXTENDED)
3534		shift = MVPP2_VLAN_TAG_EDSA_LEN;
3535	else
3536		shift = MVPP2_VLAN_TAG_LEN;
3537
3538	/* No such entry */
3539	if (tid < 0) {
3540
3541		/* Go through all entries from first to last in vlan range */
3542		tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3543						vid_start +
3544						MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3545
3546		/* There isn't room for a new VID filter */
3547		if (tid < 0)
3548			return tid;
3549
3550		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3551		pe.index = tid;
3552
3553		/* Mask all ports */
3554		mvpp2_prs_tcam_port_map_set(&pe, 0);
3555	} else {
3556		mvpp2_prs_init_from_hw(priv, &pe, tid);
3557	}
3558
3559	/* Enable the current port */
3560	mvpp2_prs_tcam_port_set(&pe, port->id, true);
3561
3562	/* Continue - set next lookup */
3563	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3564
3565	/* Skip VLAN header - Set offset to 4 or 8 bytes */
3566	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3567
3568	/* Set match on VID */
3569	mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3570
3571	/* Clear all ai bits for next iteration */
3572	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3573
3574	/* Update shadow table */
3575	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3576	mvpp2_prs_hw_write(priv, &pe);
3577
3578	return 0;
3579}
3580
3581/* Write parser entry for VID filtering */
3582static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3583{
3584	struct mvpp2 *priv = port->priv;
3585	int tid;
3586
3587	/* Scan TCAM and see if entry with this <vid,port> already exist */
3588	tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3589
3590	/* No such entry */
3591	if (tid < 0)
3592		return;
3593
3594	mvpp2_prs_hw_inv(priv, tid);
3595	priv->prs_shadow[tid].valid = false;
3596}
3597
3598/* Remove all existing VID filters on this port */
3599static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3600{
3601	struct mvpp2 *priv = port->priv;
3602	int tid;
3603
3604	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3605	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3606		if (priv->prs_shadow[tid].valid)
3607			mvpp2_prs_vid_entry_remove(port, tid);
3608	}
3609}
3610
3611/* Remove VID filering entry for this port */
3612static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3613{
3614	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3615	struct mvpp2 *priv = port->priv;
3616
3617	/* Invalidate the guard entry */
3618	mvpp2_prs_hw_inv(priv, tid);
3619
3620	priv->prs_shadow[tid].valid = false;
3621}
3622
3623/* Add guard entry that drops packets when no VID is matched on this port */
3624static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3625{
3626	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3627	struct mvpp2 *priv = port->priv;
3628	unsigned int reg_val, shift;
3629	struct mvpp2_prs_entry pe;
3630
3631	if (priv->prs_shadow[tid].valid)
3632		return;
3633
3634	memset(&pe, 0, sizeof(pe));
3635
3636	pe.index = tid;
3637
3638	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3639	if (reg_val & MVPP2_DSA_EXTENDED)
3640		shift = MVPP2_VLAN_TAG_EDSA_LEN;
3641	else
3642		shift = MVPP2_VLAN_TAG_LEN;
3643
3644	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3645
3646	/* Mask all ports */
3647	mvpp2_prs_tcam_port_map_set(&pe, 0);
3648
3649	/* Update port mask */
3650	mvpp2_prs_tcam_port_set(&pe, port->id, true);
3651
3652	/* Continue - set next lookup */
3653	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3654
3655	/* Skip VLAN header - Set offset to 4 or 8 bytes */
3656	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3657
3658	/* Drop VLAN packets that don't belong to any VIDs on this port */
3659	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3660				 MVPP2_PRS_RI_DROP_MASK);
3661
3662	/* Clear all ai bits for next iteration */
3663	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3664
3665	/* Update shadow table */
3666	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3667	mvpp2_prs_hw_write(priv, &pe);
3668}
3669
3670/* Parser default initialization */
3671static int mvpp2_prs_default_init(struct platform_device *pdev,
3672				  struct mvpp2 *priv)
3673{
3674	int err, index, i;
3675
3676	/* Enable tcam table */
3677	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3678
3679	/* Clear all tcam and sram entries */
3680	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3681		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3682		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3683			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3684
3685		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3686		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3687			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3688	}
3689
3690	/* Invalidate all tcam entries */
3691	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3692		mvpp2_prs_hw_inv(priv, index);
3693
3694	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3695					sizeof(*priv->prs_shadow),
3696					GFP_KERNEL);
3697	if (!priv->prs_shadow)
3698		return -ENOMEM;
3699
3700	/* Always start from lookup = 0 */
3701	for (index = 0; index < MVPP2_MAX_PORTS; index++)
3702		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3703				       MVPP2_PRS_PORT_LU_MAX, 0);
3704
3705	mvpp2_prs_def_flow_init(priv);
3706
3707	mvpp2_prs_mh_init(priv);
3708
3709	mvpp2_prs_mac_init(priv);
3710
3711	mvpp2_prs_dsa_init(priv);
3712
3713	mvpp2_prs_vid_init(priv);
3714
3715	err = mvpp2_prs_etype_init(priv);
3716	if (err)
3717		return err;
3718
3719	err = mvpp2_prs_vlan_init(pdev, priv);
3720	if (err)
3721		return err;
3722
3723	err = mvpp2_prs_pppoe_init(priv);
3724	if (err)
3725		return err;
3726
3727	err = mvpp2_prs_ip6_init(priv);
3728	if (err)
3729		return err;
3730
3731	err = mvpp2_prs_ip4_init(priv);
3732	if (err)
3733		return err;
3734
3735	return 0;
3736}
3737
3738/* Compare MAC DA with tcam entry data */
3739static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3740				       const u8 *da, unsigned char *mask)
3741{
3742	unsigned char tcam_byte, tcam_mask;
3743	int index;
3744
3745	for (index = 0; index < ETH_ALEN; index++) {
3746		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3747		if (tcam_mask != mask[index])
3748			return false;
3749
3750		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3751			return false;
3752	}
3753
3754	return true;
3755}
3756
3757/* Find tcam entry with matched pair <MAC DA, port> */
3758static int
3759mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3760			    unsigned char *mask, int udf_type)
3761{
3762	struct mvpp2_prs_entry pe;
3763	int tid;
3764
3765	/* Go through the all entires with MVPP2_PRS_LU_MAC */
3766	for (tid = MVPP2_PE_MAC_RANGE_START;
3767	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
3768		unsigned int entry_pmap;
3769
3770		if (!priv->prs_shadow[tid].valid ||
3771		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3772		    (priv->prs_shadow[tid].udf != udf_type))
3773			continue;
3774
3775		mvpp2_prs_init_from_hw(priv, &pe, tid);
3776		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
3777
3778		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
3779		    entry_pmap == pmap)
3780			return tid;
3781	}
3782
3783	return -ENOENT;
3784}
3785
3786/* Update parser's mac da entry */
3787static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
3788				   bool add)
3789{
3790	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3791	struct mvpp2 *priv = port->priv;
3792	unsigned int pmap, len, ri;
3793	struct mvpp2_prs_entry pe;
3794	int tid;
3795
3796	memset(&pe, 0, sizeof(pe));
3797
3798	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3799	tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
3800					  MVPP2_PRS_UDF_MAC_DEF);
3801
3802	/* No such entry */
3803	if (tid < 0) {
3804		if (!add)
3805			return 0;
3806
3807		/* Create new TCAM entry */
3808		/* Go through the all entries from first to last */
3809		tid = mvpp2_prs_tcam_first_free(priv,
3810						MVPP2_PE_MAC_RANGE_START,
3811						MVPP2_PE_MAC_RANGE_END);
3812		if (tid < 0)
3813			return tid;
3814
3815		pe.index = tid;
3816
3817		/* Mask all ports */
3818		mvpp2_prs_tcam_port_map_set(&pe, 0);
3819	} else {
3820		mvpp2_prs_init_from_hw(priv, &pe, tid);
3821	}
3822
3823	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3824
3825	/* Update port mask */
3826	mvpp2_prs_tcam_port_set(&pe, port->id, add);
3827
3828	/* Invalidate the entry if no ports are left enabled */
3829	pmap = mvpp2_prs_tcam_port_map_get(&pe);
3830	if (pmap == 0) {
3831		if (add)
3832			return -EINVAL;
3833
3834		mvpp2_prs_hw_inv(priv, pe.index);
3835		priv->prs_shadow[pe.index].valid = false;
3836		return 0;
3837	}
3838
3839	/* Continue - set next lookup */
3840	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3841
3842	/* Set match on DA */
3843	len = ETH_ALEN;
3844	while (len--)
3845		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
3846
3847	/* Set result info bits */
3848	if (is_broadcast_ether_addr(da)) {
3849		ri = MVPP2_PRS_RI_L2_BCAST;
3850	} else if (is_multicast_ether_addr(da)) {
3851		ri = MVPP2_PRS_RI_L2_MCAST;
3852	} else {
3853		ri = MVPP2_PRS_RI_L2_UCAST;
3854
3855		if (ether_addr_equal(da, port->dev->dev_addr))
3856			ri |= MVPP2_PRS_RI_MAC_ME_MASK;
3857	}
3858
3859	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3860				 MVPP2_PRS_RI_MAC_ME_MASK);
3861	mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3862				MVPP2_PRS_RI_MAC_ME_MASK);
3863
3864	/* Shift to ethertype */
3865	mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
3866				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3867
3868	/* Update shadow table and hw entry */
3869	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
3870	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
3871	mvpp2_prs_hw_write(priv, &pe);
3872
3873	return 0;
3874}
3875
3876static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3877{
3878	struct mvpp2_port *port = netdev_priv(dev);
3879	int err;
3880
3881	/* Remove old parser entry */
3882	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
3883	if (err)
3884		return err;
3885
3886	/* Add new parser entry */
3887	err = mvpp2_prs_mac_da_accept(port, da, true);
3888	if (err)
3889		return err;
3890
3891	/* Set addr in the device */
3892	ether_addr_copy(dev->dev_addr, da);
3893
3894	return 0;
3895}
3896
3897static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
3898{
3899	struct mvpp2 *priv = port->priv;
3900	struct mvpp2_prs_entry pe;
3901	unsigned long pmap;
3902	int index, tid;
3903
3904	for (tid = MVPP2_PE_MAC_RANGE_START;
3905	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
3906		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3907
3908		if (!priv->prs_shadow[tid].valid ||
3909		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3910		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3911			continue;
3912
3913		mvpp2_prs_init_from_hw(priv, &pe, tid);
3914
3915		pmap = mvpp2_prs_tcam_port_map_get(&pe);
3916
3917		/* We only want entries active on this port */
3918		if (!test_bit(port->id, &pmap))
3919			continue;
3920
3921		/* Read mac addr from entry */
3922		for (index = 0; index < ETH_ALEN; index++)
3923			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3924						     &da_mask[index]);
3925
3926		/* Special cases : Don't remove broadcast and port's own
3927		 * address
3928		 */
3929		if (is_broadcast_ether_addr(da) ||
3930		    ether_addr_equal(da, port->dev->dev_addr))
3931			continue;
3932
3933		/* Remove entry from TCAM */
3934		mvpp2_prs_mac_da_accept(port, da, false);
3935	}
3936}
3937
3938static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3939{
3940	switch (type) {
3941	case MVPP2_TAG_TYPE_EDSA:
3942		/* Add port to EDSA entries */
3943		mvpp2_prs_dsa_tag_set(priv, port, true,
3944				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3945		mvpp2_prs_dsa_tag_set(priv, port, true,
3946				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3947		/* Remove port from DSA entries */
3948		mvpp2_prs_dsa_tag_set(priv, port, false,
3949				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3950		mvpp2_prs_dsa_tag_set(priv, port, false,
3951				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3952		break;
3953
3954	case MVPP2_TAG_TYPE_DSA:
3955		/* Add port to DSA entries */
3956		mvpp2_prs_dsa_tag_set(priv, port, true,
3957				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3958		mvpp2_prs_dsa_tag_set(priv, port, true,
3959				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3960		/* Remove port from EDSA entries */
3961		mvpp2_prs_dsa_tag_set(priv, port, false,
3962				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3963		mvpp2_prs_dsa_tag_set(priv, port, false,
3964				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3965		break;
3966
3967	case MVPP2_TAG_TYPE_MH:
3968	case MVPP2_TAG_TYPE_NONE:
3969		/* Remove port form EDSA and DSA entries */
3970		mvpp2_prs_dsa_tag_set(priv, port, false,
3971				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3972		mvpp2_prs_dsa_tag_set(priv, port, false,
3973				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3974		mvpp2_prs_dsa_tag_set(priv, port, false,
3975				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3976		mvpp2_prs_dsa_tag_set(priv, port, false,
3977				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3978		break;
3979
3980	default:
3981		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3982			return -EINVAL;
3983	}
3984
3985	return 0;
3986}
3987
3988/* Set prs flow for the port */
3989static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3990{
3991	struct mvpp2_prs_entry pe;
3992	int tid;
3993
3994	memset(&pe, 0, sizeof(pe));
3995
3996	tid = mvpp2_prs_flow_find(port->priv, port->id);
3997
3998	/* Such entry not exist */
3999	if (tid < 0) {
4000		/* Go through the all entires from last to first */
4001		tid = mvpp2_prs_tcam_first_free(port->priv,
4002						MVPP2_PE_LAST_FREE_TID,
4003					       MVPP2_PE_FIRST_FREE_TID);
4004		if (tid < 0)
4005			return tid;
4006
4007		pe.index = tid;
4008
4009		/* Set flow ID*/
4010		mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4011		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4012
4013		/* Update shadow table */
4014		mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
4015	} else {
4016		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
4017	}
4018
4019	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4020	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
4021	mvpp2_prs_hw_write(port->priv, &pe);
4022
4023	return 0;
4024}
4025
4026/* Classifier configuration routines */
4027
4028/* Update classification flow table registers */
4029static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4030				 struct mvpp2_cls_flow_entry *fe)
4031{
4032	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4033	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
4034	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
4035	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
4036}
4037
4038/* Update classification lookup table register */
4039static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4040				   struct mvpp2_cls_lookup_entry *le)
4041{
4042	u32 val;
4043
4044	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4045	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4046	mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4047}
4048
4049/* Classifier default initialization */
4050static void mvpp2_cls_init(struct mvpp2 *priv)
4051{
4052	struct mvpp2_cls_lookup_entry le;
4053	struct mvpp2_cls_flow_entry fe;
4054	int index;
4055
4056	/* Enable classifier */
4057	mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4058
4059	/* Clear classifier flow table */
4060	memset(&fe.data, 0, sizeof(fe.data));
4061	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4062		fe.index = index;
4063		mvpp2_cls_flow_write(priv, &fe);
4064	}
4065
4066	/* Clear classifier lookup table */
4067	le.data = 0;
4068	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4069		le.lkpid = index;
4070		le.way = 0;
4071		mvpp2_cls_lookup_write(priv, &le);
4072
4073		le.way = 1;
4074		mvpp2_cls_lookup_write(priv, &le);
4075	}
4076}
4077
4078static void mvpp2_cls_port_config(struct mvpp2_port *port)
4079{
4080	struct mvpp2_cls_lookup_entry le;
4081	u32 val;
4082
4083	/* Set way for the port */
4084	val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4085	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4086	mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4087
4088	/* Pick the entry to be accessed in lookup ID decoding table
4089	 * according to the way and lkpid.
4090	 */
4091	le.lkpid = port->id;
4092	le.way = 0;
4093	le.data = 0;
4094
4095	/* Set initial CPU queue for receiving packets */
4096	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4097	le.data |= port->first_rxq;
4098
4099	/* Disable classification engines */
4100	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4101
4102	/* Update lookup ID table entry */
4103	mvpp2_cls_lookup_write(port->priv, &le);
4104}
4105
4106/* Set CPU queue number for oversize packets */
4107static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4108{
4109	u32 val;
4110
4111	mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4112		    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4113
4114	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4115		    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4116
4117	val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4118	val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4119	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4120}
4121
4122static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4123{
4124	if (likely(pool->frag_size <= PAGE_SIZE))
4125		return netdev_alloc_frag(pool->frag_size);
4126	else
4127		return kmalloc(pool->frag_size, GFP_ATOMIC);
4128}
4129
4130static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4131{
4132	if (likely(pool->frag_size <= PAGE_SIZE))
4133		skb_free_frag(data);
4134	else
4135		kfree(data);
4136}
4137
4138/* Buffer Manager configuration routines */
4139
4140/* Create pool */
4141static int mvpp2_bm_pool_create(struct platform_device *pdev,
4142				struct mvpp2 *priv,
4143				struct mvpp2_bm_pool *bm_pool, int size)
4144{
4145	u32 val;
4146
4147	/* Number of buffer pointers must be a multiple of 16, as per
4148	 * hardware constraints
4149	 */
4150	if (!IS_ALIGNED(size, 16))
4151		return -EINVAL;
4152
4153	/* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4154	 * bytes per buffer pointer
4155	 */
4156	if (priv->hw_version == MVPP21)
4157		bm_pool->size_bytes = 2 * sizeof(u32) * size;
4158	else
4159		bm_pool->size_bytes = 2 * sizeof(u64) * size;
4160
4161	bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
4162						&bm_pool->dma_addr,
4163						GFP_KERNEL);
4164	if (!bm_pool->virt_addr)
4165		return -ENOMEM;
4166
4167	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4168			MVPP2_BM_POOL_PTR_ALIGN)) {
4169		dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4170				  bm_pool->virt_addr, bm_pool->dma_addr);
4171		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4172			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4173		return -ENOMEM;
4174	}
4175
4176	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
4177		    lower_32_bits(bm_pool->dma_addr));
4178	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4179
4180	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4181	val |= MVPP2_BM_START_MASK;
4182	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4183
4184	bm_pool->size = size;
4185	bm_pool->pkt_size = 0;
4186	bm_pool->buf_num = 0;
4187
4188	return 0;
4189}
4190
4191/* Set pool buffer size */
4192static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4193				      struct mvpp2_bm_pool *bm_pool,
4194				      int buf_size)
4195{
4196	u32 val;
4197
4198	bm_pool->buf_size = buf_size;
4199
4200	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4201	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4202}
4203
4204static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4205				    struct mvpp2_bm_pool *bm_pool,
4206				    dma_addr_t *dma_addr,
4207				    phys_addr_t *phys_addr)
4208{
4209	int cpu = get_cpu();
4210
4211	*dma_addr = mvpp2_percpu_read(priv, cpu,
4212				      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4213	*phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
4214
4215	if (priv->hw_version == MVPP22) {
4216		u32 val;
4217		u32 dma_addr_highbits, phys_addr_highbits;
4218
4219		val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
4220		dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4221		phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4222			MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4223
4224		if (sizeof(dma_addr_t) == 8)
4225			*dma_addr |= (u64)dma_addr_highbits << 32;
4226
4227		if (sizeof(phys_addr_t) == 8)
4228			*phys_addr |= (u64)phys_addr_highbits << 32;
4229	}
4230
4231	put_cpu();
4232}
4233
4234/* Free all buffers from the pool */
4235static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
4236			       struct mvpp2_bm_pool *bm_pool, int buf_num)
4237{
4238	int i;
4239
4240	if (buf_num > bm_pool->buf_num) {
4241		WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
4242		     bm_pool->id, buf_num);
4243		buf_num = bm_pool->buf_num;
4244	}
4245
4246	for (i = 0; i < buf_num; i++) {
4247		dma_addr_t buf_dma_addr;
4248		phys_addr_t buf_phys_addr;
4249		void *data;
4250
4251		mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4252					&buf_dma_addr, &buf_phys_addr);
4253
4254		dma_unmap_single(dev, buf_dma_addr,
4255				 bm_pool->buf_size, DMA_FROM_DEVICE);
4256
4257		data = (void *)phys_to_virt(buf_phys_addr);
4258		if (!data)
4259			break;
4260
4261		mvpp2_frag_free(bm_pool, data);
4262	}
4263
4264	/* Update BM driver with number of buffers removed from pool */
4265	bm_pool->buf_num -= i;
4266}
4267
4268/* Check number of buffers in BM pool */
4269static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
4270{
4271	int buf_num = 0;
4272
4273	buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
4274				    MVPP22_BM_POOL_PTRS_NUM_MASK;
4275	buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
4276				    MVPP2_BM_BPPI_PTR_NUM_MASK;
4277
4278	/* HW has one buffer ready which is not reflected in the counters */
4279	if (buf_num)
4280		buf_num += 1;
4281
4282	return buf_num;
4283}
4284
4285/* Cleanup pool */
4286static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4287				 struct mvpp2 *priv,
4288				 struct mvpp2_bm_pool *bm_pool)
4289{
4290	int buf_num;
4291	u32 val;
4292
4293	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4294	mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
4295
4296	/* Check buffer counters after free */
4297	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4298	if (buf_num) {
4299		WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
4300		     bm_pool->id, bm_pool->buf_num);
4301		return 0;
4302	}
4303
4304	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4305	val |= MVPP2_BM_STOP_MASK;
4306	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4307
4308	dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4309			  bm_pool->virt_addr,
4310			  bm_pool->dma_addr);
4311	return 0;
4312}
4313
4314static int mvpp2_bm_pools_init(struct platform_device *pdev,
4315			       struct mvpp2 *priv)
4316{
4317	int i, err, size;
4318	struct mvpp2_bm_pool *bm_pool;
4319
4320	/* Create all pools with maximum size */
4321	size = MVPP2_BM_POOL_SIZE_MAX;
4322	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4323		bm_pool = &priv->bm_pools[i];
4324		bm_pool->id = i;
4325		err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4326		if (err)
4327			goto err_unroll_pools;
4328		mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4329	}
4330	return 0;
4331
4332err_unroll_pools:
4333	dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4334	for (i = i - 1; i >= 0; i--)
4335		mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4336	return err;
4337}
4338
4339static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4340{
4341	int i, err;
4342
4343	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4344		/* Mask BM all interrupts */
4345		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4346		/* Clear BM cause register */
4347		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4348	}
4349
4350	/* Allocate and initialize BM pools */
4351	priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
4352				      sizeof(*priv->bm_pools), GFP_KERNEL);
4353	if (!priv->bm_pools)
4354		return -ENOMEM;
4355
4356	err = mvpp2_bm_pools_init(pdev, priv);
4357	if (err < 0)
4358		return err;
4359	return 0;
4360}
4361
4362static void mvpp2_setup_bm_pool(void)
4363{
4364	/* Short pool */
4365	mvpp2_pools[MVPP2_BM_SHORT].buf_num  = MVPP2_BM_SHORT_BUF_NUM;
4366	mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
4367
4368	/* Long pool */
4369	mvpp2_pools[MVPP2_BM_LONG].buf_num  = MVPP2_BM_LONG_BUF_NUM;
4370	mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
4371
4372	/* Jumbo pool */
4373	mvpp2_pools[MVPP2_BM_JUMBO].buf_num  = MVPP2_BM_JUMBO_BUF_NUM;
4374	mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
4375}
4376
4377/* Attach long pool to rxq */
4378static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4379				    int lrxq, int long_pool)
4380{
4381	u32 val, mask;
4382	int prxq;
4383
4384	/* Get queue physical ID */
4385	prxq = port->rxqs[lrxq]->id;
4386
4387	if (port->priv->hw_version == MVPP21)
4388		mask = MVPP21_RXQ_POOL_LONG_MASK;
4389	else
4390		mask = MVPP22_RXQ_POOL_LONG_MASK;
4391
4392	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4393	val &= ~mask;
4394	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
4395	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4396}
4397
4398/* Attach short pool to rxq */
4399static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4400				     int lrxq, int short_pool)
4401{
4402	u32 val, mask;
4403	int prxq;
4404
4405	/* Get queue physical ID */
4406	prxq = port->rxqs[lrxq]->id;
4407
4408	if (port->priv->hw_version == MVPP21)
4409		mask = MVPP21_RXQ_POOL_SHORT_MASK;
4410	else
4411		mask = MVPP22_RXQ_POOL_SHORT_MASK;
4412
4413	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4414	val &= ~mask;
4415	val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
4416	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4417}
4418
4419static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4420			     struct mvpp2_bm_pool *bm_pool,
4421			     dma_addr_t *buf_dma_addr,
4422			     phys_addr_t *buf_phys_addr,
4423			     gfp_t gfp_mask)
4424{
4425	dma_addr_t dma_addr;
4426	void *data;
4427
4428	data = mvpp2_frag_alloc(bm_pool);
4429	if (!data)
4430		return NULL;
4431
4432	dma_addr = dma_map_single(port->dev->dev.parent, data,
4433				  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4434				  DMA_FROM_DEVICE);
4435	if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
4436		mvpp2_frag_free(bm_pool, data);
4437		return NULL;
4438	}
4439	*buf_dma_addr = dma_addr;
4440	*buf_phys_addr = virt_to_phys(data);
4441
4442	return data;
4443}
4444
4445/* Release buffer to BM */
4446static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
4447				     dma_addr_t buf_dma_addr,
4448				     phys_addr_t buf_phys_addr)
4449{
4450	int cpu = get_cpu();
4451
4452	if (port->priv->hw_version == MVPP22) {
4453		u32 val = 0;
4454
4455		if (sizeof(dma_addr_t) == 8)
4456			val |= upper_32_bits(buf_dma_addr) &
4457				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4458
4459		if (sizeof(phys_addr_t) == 8)
4460			val |= (upper_32_bits(buf_phys_addr)
4461				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4462				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4463
4464		mvpp2_percpu_write_relaxed(port->priv, cpu,
4465					   MVPP22_BM_ADDR_HIGH_RLS_REG, val);
4466	}
4467
4468	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4469	 * returned in the "cookie" field of the RX
4470	 * descriptor. Instead of storing the virtual address, we
4471	 * store the physical address
4472	 */
4473	mvpp2_percpu_write_relaxed(port->priv, cpu,
4474				   MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4475	mvpp2_percpu_write_relaxed(port->priv, cpu,
4476				   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
4477
4478	put_cpu();
4479}
4480
4481/* Allocate buffers for the pool */
4482static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4483			     struct mvpp2_bm_pool *bm_pool, int buf_num)
4484{
4485	int i, buf_size, total_size;
4486	dma_addr_t dma_addr;
4487	phys_addr_t phys_addr;
4488	void *buf;
4489
4490	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4491	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4492
4493	if (buf_num < 0 ||
4494	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
4495		netdev_err(port->dev,
4496			   "cannot allocate %d buffers for pool %d\n",
4497			   buf_num, bm_pool->id);
4498		return 0;
4499	}
4500
4501	for (i = 0; i < buf_num; i++) {
4502		buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4503				      &phys_addr, GFP_KERNEL);
4504		if (!buf)
4505			break;
4506
4507		mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
4508				  phys_addr);
4509	}
4510
4511	/* Update BM driver with number of buffers added to pool */
4512	bm_pool->buf_num += i;
4513
4514	netdev_dbg(port->dev,
4515		   "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4516		   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4517
4518	netdev_dbg(port->dev,
4519		   "pool %d: %d of %d buffers added\n",
4520		   bm_pool->id, i, buf_num);
4521	return i;
4522}
4523
4524/* Notify the driver that BM pool is being used as specific type and return the
4525 * pool pointer on success
4526 */
4527static struct mvpp2_bm_pool *
4528mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
4529{
4530	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4531	int num;
4532
4533	if (pool >= MVPP2_BM_POOLS_NUM) {
4534		netdev_err(port->dev, "Invalid pool %d\n", pool);
4535		return NULL;
4536	}
4537
4538	/* Allocate buffers in case BM pool is used as long pool, but packet
4539	 * size doesn't match MTU or BM pool hasn't being used yet
4540	 */
4541	if (new_pool->pkt_size == 0) {
4542		int pkts_num;
4543
4544		/* Set default buffer number or free all the buffers in case
4545		 * the pool is not empty
4546		 */
4547		pkts_num = new_pool->buf_num;
4548		if (pkts_num == 0)
4549			pkts_num = mvpp2_pools[pool].buf_num;
4550		else
4551			mvpp2_bm_bufs_free(port->dev->dev.parent,
4552					   port->priv, new_pool, pkts_num);
4553
4554		new_pool->pkt_size = pkt_size;
4555		new_pool->frag_size =
4556			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4557			MVPP2_SKB_SHINFO_SIZE;
4558
4559		/* Allocate buffers for this pool */
4560		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4561		if (num != pkts_num) {
4562			WARN(1, "pool %d: %d of %d allocated\n",
4563			     new_pool->id, num, pkts_num);
4564			return NULL;
4565		}
4566	}
4567
4568	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4569				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4570
4571	return new_pool;
4572}
4573
4574/* Initialize pools for swf */
4575static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4576{
4577	int rxq;
4578	enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
4579
4580	/* If port pkt_size is higher than 1518B:
4581	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4582	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4583	 */
4584	if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
4585		long_log_pool = MVPP2_BM_JUMBO;
4586		short_log_pool = MVPP2_BM_LONG;
4587	} else {
4588		long_log_pool = MVPP2_BM_LONG;
4589		short_log_pool = MVPP2_BM_SHORT;
4590	}
4591
4592	if (!port->pool_long) {
4593		port->pool_long =
4594			mvpp2_bm_pool_use(port, long_log_pool,
4595					  mvpp2_pools[long_log_pool].pkt_size);
4596		if (!port->pool_long)
4597			return -ENOMEM;
4598
4599		port->pool_long->port_map |= BIT(port->id);
4600
4601		for (rxq = 0; rxq < port->nrxqs; rxq++)
4602			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4603	}
4604
4605	if (!port->pool_short) {
4606		port->pool_short =
4607			mvpp2_bm_pool_use(port, short_log_pool,
4608					  mvpp2_pools[short_log_pool].pkt_size);
4609		if (!port->pool_short)
4610			return -ENOMEM;
4611
4612		port->pool_short->port_map |= BIT(port->id);
4613
4614		for (rxq = 0; rxq < port->nrxqs; rxq++)
4615			mvpp2_rxq_short_pool_set(port, rxq,
4616						 port->pool_short->id);
4617	}
4618
4619	return 0;
4620}
4621
4622static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4623{
4624	struct mvpp2_port *port = netdev_priv(dev);
4625	enum mvpp2_bm_pool_log_num new_long_pool;
4626	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4627
4628	/* If port MTU is higher than 1518B:
4629	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4630	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4631	 */
4632	if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
4633		new_long_pool = MVPP2_BM_JUMBO;
4634	else
4635		new_long_pool = MVPP2_BM_LONG;
4636
4637	if (new_long_pool != port->pool_long->id) {
4638		/* Remove port from old short & long pool */
4639		port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
4640						    port->pool_long->pkt_size);
4641		port->pool_long->port_map &= ~BIT(port->id);
4642		port->pool_long = NULL;
4643
4644		port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
4645						     port->pool_short->pkt_size);
4646		port->pool_short->port_map &= ~BIT(port->id);
4647		port->pool_short = NULL;
4648
4649		port->pkt_size =  pkt_size;
4650
4651		/* Add port to new short & long pool */
4652		mvpp2_swf_bm_pool_init(port);
4653
4654		/* Update L4 checksum when jumbo enable/disable on port */
4655		if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
4656			dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4657			dev->hw_features &= ~(NETIF_F_IP_CSUM |
4658					      NETIF_F_IPV6_CSUM);
4659		} else {
4660			dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4661			dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4662		}
4663	}
4664
4665	dev->mtu = mtu;
4666	dev->wanted_features = dev->features;
4667
4668	netdev_update_features(dev);
4669	return 0;
4670}
4671
4672static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4673{
4674	int i, sw_thread_mask = 0;
4675
4676	for (i = 0; i < port->nqvecs; i++)
4677		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4678
4679	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4680		    MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
4681}
4682
4683static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4684{
4685	int i, sw_thread_mask = 0;
4686
4687	for (i = 0; i < port->nqvecs; i++)
4688		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4689
4690	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4691		    MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4692}
4693
4694static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4695{
4696	struct mvpp2_port *port = qvec->port;
4697
4698	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4699		    MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4700}
4701
4702static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4703{
4704	struct mvpp2_port *port = qvec->port;
4705
4706	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4707		    MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
4708}
4709
4710/* Mask the current CPU's Rx/Tx interrupts
4711 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4712 * using smp_processor_id() is OK.
4713 */
4714static void mvpp2_interrupts_mask(void *arg)
4715{
4716	struct mvpp2_port *port = arg;
4717
4718	mvpp2_percpu_write(port->priv, smp_processor_id(),
4719			   MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4720}
4721
4722/* Unmask the current CPU's Rx/Tx interrupts.
4723 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4724 * using smp_processor_id() is OK.
4725 */
4726static void mvpp2_interrupts_unmask(void *arg)
4727{
4728	struct mvpp2_port *port = arg;
4729	u32 val;
4730
4731	val = MVPP2_CAUSE_MISC_SUM_MASK |
4732		MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4733	if (port->has_tx_irqs)
4734		val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4735
4736	mvpp2_percpu_write(port->priv, smp_processor_id(),
4737			   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4738}
4739
4740static void
4741mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4742{
4743	u32 val;
4744	int i;
4745
4746	if (port->priv->hw_version != MVPP22)
4747		return;
4748
4749	if (mask)
4750		val = 0;
4751	else
4752		val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4753
4754	for (i = 0; i < port->nqvecs; i++) {
4755		struct mvpp2_queue_vector *v = port->qvecs + i;
4756
4757		if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4758			continue;
4759
4760		mvpp2_percpu_write(port->priv, v->sw_thread_id,
4761				   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4762	}
4763}
4764
4765/* Port configuration routines */
4766
4767static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4768{
4769	struct mvpp2 *priv = port->priv;
4770	u32 val;
4771
4772	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4773	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4774	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4775
4776	regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4777	if (port->gop_id == 2)
4778		val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4779	else if (port->gop_id == 3)
4780		val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4781	regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4782}
4783
4784static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4785{
4786	struct mvpp2 *priv = port->priv;
4787	u32 val;
4788
4789	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4790	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4791	       GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4792	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4793
4794	if (port->gop_id > 1) {
4795		regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4796		if (port->gop_id == 2)
4797			val &= ~GENCONF_CTRL0_PORT0_RGMII;
4798		else if (port->gop_id == 3)
4799			val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4800		regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4801	}
4802}
4803
4804static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4805{
4806	struct mvpp2 *priv = port->priv;
4807	void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4808	void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4809	u32 val;
4810
4811	/* XPCS */
4812	val = readl(xpcs + MVPP22_XPCS_CFG0);
4813	val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4814		 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4815	val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4816	writel(val, xpcs + MVPP22_XPCS_CFG0);
4817
4818	/* MPCS */
4819	val = readl(mpcs + MVPP22_MPCS_CTRL);
4820	val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4821	writel(val, mpcs + MVPP22_MPCS_CTRL);
4822
4823	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4824	val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4825		 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4826	val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4827	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4828
4829	val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4830	val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4831	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4832}
4833
4834static int mvpp22_gop_init(struct mvpp2_port *port)
4835{
4836	struct mvpp2 *priv = port->priv;
4837	u32 val;
4838
4839	if (!priv->sysctrl_base)
4840		return 0;
4841
4842	switch (port->phy_interface) {
4843	case PHY_INTERFACE_MODE_RGMII:
4844	case PHY_INTERFACE_MODE_RGMII_ID:
4845	case PHY_INTERFACE_MODE_RGMII_RXID:
4846	case PHY_INTERFACE_MODE_RGMII_TXID:
4847		if (port->gop_id == 0)
4848			goto invalid_conf;
4849		mvpp22_gop_init_rgmii(port);
4850		break;
4851	case PHY_INTERFACE_MODE_SGMII:
4852		mvpp22_gop_init_sgmii(port);
4853		break;
4854	case PHY_INTERFACE_MODE_10GKR:
4855		if (port->gop_id != 0)
4856			goto invalid_conf;
4857		mvpp22_gop_init_10gkr(port);
4858		break;
4859	default:
4860		goto unsupported_conf;
4861	}
4862
4863	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4864	val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4865	       GENCONF_PORT_CTRL1_EN(port->gop_id);
4866	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4867
4868	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4869	val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4870	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4871
4872	regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4873	val |= GENCONF_SOFT_RESET1_GOP;
4874	regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4875
4876unsupported_conf:
4877	return 0;
4878
4879invalid_conf:
4880	netdev_err(port->dev, "Invalid port configuration\n");
4881	return -EINVAL;
4882}
4883
4884static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4885{
4886	u32 val;
4887
4888	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4889	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4890		/* Enable the GMAC link status irq for this port */
4891		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4892		val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4893		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4894	}
4895
4896	if (port->gop_id == 0) {
4897		/* Enable the XLG/GIG irqs for this port */
4898		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4899		if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4900			val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4901		else
4902			val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4903		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4904	}
4905}
4906
4907static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4908{
4909	u32 val;
4910
4911	if (port->gop_id == 0) {
4912		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4913		val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4914		         MVPP22_XLG_EXT_INT_MASK_GIG);
4915		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4916	}
4917
4918	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4919	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4920		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4921		val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4922		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4923	}
4924}
4925
4926static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4927{
4928	u32 val;
4929
4930	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4931	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4932		val = readl(port->base + MVPP22_GMAC_INT_MASK);
4933		val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4934		writel(val, port->base + MVPP22_GMAC_INT_MASK);
4935	}
4936
4937	if (port->gop_id == 0) {
4938		val = readl(port->base + MVPP22_XLG_INT_MASK);
4939		val |= MVPP22_XLG_INT_MASK_LINK;
4940		writel(val, port->base + MVPP22_XLG_INT_MASK);
4941	}
4942
4943	mvpp22_gop_unmask_irq(port);
4944}
4945
4946static int mvpp22_comphy_init(struct mvpp2_port *port)
4947{
4948	enum phy_mode mode;
4949	int ret;
4950
4951	if (!port->comphy)
4952		return 0;
4953
4954	switch (port->phy_interface) {
4955	case PHY_INTERFACE_MODE_SGMII:
4956		mode = PHY_MODE_SGMII;
4957		break;
4958	case PHY_INTERFACE_MODE_10GKR:
4959		mode = PHY_MODE_10GKR;
4960		break;
4961	default:
4962		return -EINVAL;
4963	}
4964
4965	ret = phy_set_mode(port->comphy, mode);
4966	if (ret)
4967		return ret;
4968
4969	return phy_power_on(port->comphy);
4970}
4971
4972static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4973{
4974	u32 val;
4975
4976	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4977		val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4978		val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4979		       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4980		val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4981		writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4982	} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4983		val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4984		val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4985		       MVPP22_CTRL4_SYNC_BYPASS_DIS |
4986		       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4987		val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4988		writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4989	}
4990
4991	/* The port is connected to a copper PHY */
4992	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4993	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4994	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4995
4996	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4997	val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4998	       MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4999	       MVPP2_GMAC_AN_DUPLEX_EN;
5000	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5001		val |= MVPP2_GMAC_IN_BAND_AUTONEG;
5002	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5003}
5004
5005static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
5006{
5007	u32 val;
5008
5009	/* Force link down */
5010	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5011	val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5012	val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5013	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5014
5015	/* Set the GMAC in a reset state */
5016	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5017	val |= MVPP2_GMAC_PORT_RESET_MASK;
5018	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5019
5020	/* Configure the PCS and in-band AN */
5021	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5022	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5023	        val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
5024	} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
5025		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
5026	}
5027	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5028
5029	mvpp2_port_mii_gmac_configure_mode(port);
5030
5031	/* Unset the GMAC reset state */
5032	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5033	val &= ~MVPP2_GMAC_PORT_RESET_MASK;
5034	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5035
5036	/* Stop forcing link down */
5037	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5038	val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5039	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5040}
5041
5042static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
5043{
5044	u32 val;
5045
5046	if (port->gop_id != 0)
5047		return;
5048
5049	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5050	val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5051	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5052
5053	val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5054	val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5055	val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5056	writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5057}
5058
5059static void mvpp22_port_mii_set(struct mvpp2_port *port)
5060{
5061	u32 val;
5062
5063	/* Only GOP port 0 has an XLG MAC */
5064	if (port->gop_id == 0) {
5065		val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5066		val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
5067
5068		if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5069		    port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5070			val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5071		else
5072			val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5073
5074		writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5075	}
5076}
5077
5078static void mvpp2_port_mii_set(struct mvpp2_port *port)
5079{
5080	if (port->priv->hw_version == MVPP22)
5081		mvpp22_port_mii_set(port);
5082
5083	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
5084	    port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5085		mvpp2_port_mii_gmac_configure(port);
5086	else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5087		mvpp2_port_mii_xlg_configure(port);
5088}
5089
5090static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5091{
5092	u32 val;
5093
5094	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5095	val |= MVPP2_GMAC_FC_ADV_EN;
5096	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5097}
5098
5099static void mvpp2_port_enable(struct mvpp2_port *port)
5100{
5101	u32 val;
5102
5103	/* Only GOP port 0 has an XLG MAC */
5104	if (port->gop_id == 0 &&
5105	    (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5106	     port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5107		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5108		val |= MVPP22_XLG_CTRL0_PORT_EN |
5109		       MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5110		val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5111		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5112	} else {
5113		val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5114		val |= MVPP2_GMAC_PORT_EN_MASK;
5115		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5116		writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5117	}
5118}
5119
5120static void mvpp2_port_disable(struct mvpp2_port *port)
5121{
5122	u32 val;
5123
5124	/* Only GOP port 0 has an XLG MAC */
5125	if (port->gop_id == 0 &&
5126	    (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5127	     port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5128		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5129		val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5130			 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5131		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5132	} else {
5133		val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5134		val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5135		writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5136	}
5137}
5138
5139/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5140static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5141{
5142	u32 val;
5143
5144	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5145		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5146	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5147}
5148
5149/* Configure loopback port */
5150static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5151{
5152	u32 val;
5153
5154	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5155
5156	if (port->speed == 1000)
5157		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5158	else
5159		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5160
5161	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5162		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5163	else
5164		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5165
5166	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5167}
5168
5169struct mvpp2_ethtool_counter {
5170	unsigned int offset;
5171	const char string[ETH_GSTRING_LEN];
5172	bool reg_is_64b;
5173};
5174
5175static u64 mvpp2_read_count(struct mvpp2_port *port,
5176			    const struct mvpp2_ethtool_counter *counter)
5177{
5178	u64 val;
5179
5180	val = readl(port->stats_base + counter->offset);
5181	if (counter->reg_is_64b)
5182		val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5183
5184	return val;
5185}
5186
5187/* Due to the fact that software statistics and hardware statistics are, by
5188 * design, incremented at different moments in the chain of packet processing,
5189 * it is very likely that incoming packets could have been dropped after being
5190 * counted by hardware but before reaching software statistics (most probably
5191 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5192 * are added in between as well as TSO skb will be split and header bytes added.
5193 * Hence, statistics gathered from userspace with ifconfig (software) and
5194 * ethtool (hardware) cannot be compared.
5195 */
5196static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5197	{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5198	{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5199	{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5200	{ MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5201	{ MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5202	{ MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5203	{ MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5204	{ MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5205	{ MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5206	{ MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5207	{ MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5208	{ MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5209	{ MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5210	{ MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5211	{ MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5212	{ MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5213	{ MVPP2_MIB_FC_SENT, "fc_sent" },
5214	{ MVPP2_MIB_FC_RCVD, "fc_received" },
5215	{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5216	{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5217	{ MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5218	{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5219	{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5220	{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5221	{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5222	{ MVPP2_MIB_COLLISION, "collision" },
5223	{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
5224};
5225
5226static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5227				      u8 *data)
5228{
5229	if (sset == ETH_SS_STATS) {
5230		int i;
5231
5232		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5233			memcpy(data + i * ETH_GSTRING_LEN,
5234			       &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5235	}
5236}
5237
5238static void mvpp2_gather_hw_statistics(struct work_struct *work)
5239{
5240	struct delayed_work *del_work = to_delayed_work(work);
5241	struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5242					       stats_work);
5243	u64 *pstats;
5244	int i;
5245
5246	mutex_lock(&port->gather_stats_lock);
5247
5248	pstats = port->ethtool_stats;
5249	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5250		*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
5251
5252	/* No need to read again the counters right after this function if it
5253	 * was called asynchronously by the user (ie. use of ethtool).
5254	 */
5255	cancel_delayed_work(&port->stats_work);
5256	queue_delayed_work(port->priv->stats_queue, &port->stats_work,
5257			   MVPP2_MIB_COUNTERS_STATS_DELAY);
5258
5259	mutex_unlock(&port->gather_stats_lock);
5260}
5261
5262static void mvpp2_ethtool_get_stats(struct net_device *dev,
5263				    struct ethtool_stats *stats, u64 *data)
5264{
5265	struct mvpp2_port *port = netdev_priv(dev);
5266
5267	/* Update statistics for the given port, then take the lock to avoid
5268	 * concurrent accesses on the ethtool_stats structure during its copy.
5269	 */
5270	mvpp2_gather_hw_statistics(&port->stats_work.work);
5271
5272	mutex_lock(&port->gather_stats_lock);
5273	memcpy(data, port->ethtool_stats,
5274	       sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
5275	mutex_unlock(&port->gather_stats_lock);
5276}
5277
5278static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5279{
5280	if (sset == ETH_SS_STATS)
5281		return ARRAY_SIZE(mvpp2_ethtool_regs);
5282
5283	return -EOPNOTSUPP;
5284}
5285
5286static void mvpp2_port_reset(struct mvpp2_port *port)
5287{
5288	u32 val;
5289	unsigned int i;
5290
5291	/* Read the GOP statistics to reset the hardware counters */
5292	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5293		mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
5294
5295	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5296		    ~MVPP2_GMAC_PORT_RESET_MASK;
5297	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5298
5299	while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5300	       MVPP2_GMAC_PORT_RESET_MASK)
5301		continue;
5302}
5303
5304/* Change maximum receive size of the port */
5305static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5306{
5307	u32 val;
5308
5309	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5310	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5311	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5312		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5313	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5314}
5315
5316/* Change maximum receive size of the port */
5317static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5318{
5319	u32 val;
5320
5321	val =  readl(port->base + MVPP22_XLG_CTRL1_REG);
5322	val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5323	val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5324	       MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
5325	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5326}
5327
5328/* Set defaults to the MVPP2 port */
5329static void mvpp2_defaults_set(struct mvpp2_port *port)
5330{
5331	int tx_port_num, val, queue, ptxq, lrxq;
5332
5333	if (port->priv->hw_version == MVPP21) {
5334		/* Configure port to loopback if needed */
5335		if (port->flags & MVPP2_F_LOOPBACK)
5336			mvpp2_port_loopback_set(port);
5337
5338		/* Update TX FIFO MIN Threshold */
5339		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5340		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5341		/* Min. TX threshold must be less than minimal packet length */
5342		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5343		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5344	}
5345
5346	/* Disable Legacy WRR, Disable EJP, Release from reset */
5347	tx_port_num = mvpp2_egress_port(port);
5348	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5349		    tx_port_num);
5350	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5351
5352	/* Close bandwidth for all queues */
5353	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5354		ptxq = mvpp2_txq_phys(port->id, queue);
5355		mvpp2_write(port->priv,
5356			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5357	}
5358
5359	/* Set refill period to 1 usec, refill tokens
5360	 * and bucket size to maximum
5361	 */
5362	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5363		    port->priv->tclk / USEC_PER_SEC);
5364	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5365	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5366	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5367	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5368	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5369	val = MVPP2_TXP_TOKEN_SIZE_MAX;
5370	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5371
5372	/* Set MaximumLowLatencyPacketSize value to 256 */
5373	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5374		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5375		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5376
5377	/* Enable Rx cache snoop */
5378	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5379		queue = port->rxqs[lrxq]->id;
5380		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5381		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5382			   MVPP2_SNOOP_BUF_HDR_MASK;
5383		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5384	}
5385
5386	/* At default, mask all interrupts to all present cpus */
5387	mvpp2_interrupts_disable(port);
5388}
5389
5390/* Enable/disable receiving packets */
5391static void mvpp2_ingress_enable(struct mvpp2_port *port)
5392{
5393	u32 val;
5394	int lrxq, queue;
5395
5396	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5397		queue = port->rxqs[lrxq]->id;
5398		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5399		val &= ~MVPP2_RXQ_DISABLE_MASK;
5400		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5401	}
5402}
5403
5404static void mvpp2_ingress_disable(struct mvpp2_port *port)
5405{
5406	u32 val;
5407	int lrxq, queue;
5408
5409	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5410		queue = port->rxqs[lrxq]->id;
5411		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5412		val |= MVPP2_RXQ_DISABLE_MASK;
5413		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5414	}
5415}
5416
5417/* Enable transmit via physical egress queue
5418 * - HW starts take descriptors from DRAM
5419 */
5420static void mvpp2_egress_enable(struct mvpp2_port *port)
5421{
5422	u32 qmap;
5423	int queue;
5424	int tx_port_num = mvpp2_egress_port(port);
5425
5426	/* Enable all initialized TXs. */
5427	qmap = 0;
5428	for (queue = 0; queue < port->ntxqs; queue++) {
5429		struct mvpp2_tx_queue *txq = port->txqs[queue];
5430
5431		if (txq->descs)
5432			qmap |= (1 << queue);
5433	}
5434
5435	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5436	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5437}
5438
5439/* Disable transmit via physical egress queue
5440 * - HW doesn't take descriptors from DRAM
5441 */
5442static void mvpp2_egress_disable(struct mvpp2_port *port)
5443{
5444	u32 reg_data;
5445	int delay;
5446	int tx_port_num = mvpp2_egress_port(port);
5447
5448	/* Issue stop command for active channels only */
5449	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5450	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5451		    MVPP2_TXP_SCHED_ENQ_MASK;
5452	if (reg_data != 0)
5453		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5454			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5455
5456	/* Wait for all Tx activity to terminate. */
5457	delay = 0;
5458	do {
5459		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5460			netdev_warn(port->dev,
5461				    "Tx stop timed out, status=0x%08x\n",
5462				    reg_data);
5463			break;
5464		}
5465		mdelay(1);
5466		delay++;
5467
5468		/* Check port TX Command register that all
5469		 * Tx queues are stopped
5470		 */
5471		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5472	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5473}
5474
5475/* Rx descriptors helper methods */
5476
5477/* Get number of Rx descriptors occupied by received packets */
5478static inline int
5479mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5480{
5481	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5482
5483	return val & MVPP2_RXQ_OCCUPIED_MASK;
5484}
5485
5486/* Update Rx queue status with the number of occupied and available
5487 * Rx descriptor slots.
5488 */
5489static inline void
5490mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5491			int used_count, int free_count)
5492{
5493	/* Decrement the number of used descriptors and increment count
5494	 * increment the number of free descriptors.
5495	 */
5496	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5497
5498	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5499}
5500
5501/* Get pointer to next RX descriptor to be processed by SW */
5502static inline struct mvpp2_rx_desc *
5503mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5504{
5505	int rx_desc = rxq->next_desc_to_proc;
5506
5507	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5508	prefetch(rxq->descs + rxq->next_desc_to_proc);
5509	return rxq->descs + rx_desc;
5510}
5511
5512/* Set rx queue offset */
5513static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5514				 int prxq, int offset)
5515{
5516	u32 val;
5517
5518	/* Convert offset from bytes to units of 32 bytes */
5519	offset = offset >> 5;
5520
5521	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5522	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5523
5524	/* Offset is in */
5525	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5526		    MVPP2_RXQ_PACKET_OFFSET_MASK);
5527
5528	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5529}
5530
5531/* Tx descriptors helper methods */
5532
5533/* Get pointer to next Tx descriptor to be processed (send) by HW */
5534static struct mvpp2_tx_desc *
5535mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5536{
5537	int tx_desc = txq->next_desc_to_proc;
5538
5539	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5540	return txq->descs + tx_desc;
5541}
5542
5543/* Update HW with number of aggregated Tx descriptors to be sent
5544 *
5545 * Called only from mvpp2_tx(), so migration is disabled, using
5546 * smp_processor_id() is OK.
5547 */
5548static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5549{
5550	/* aggregated access - relevant TXQ number is written in TX desc */
5551	mvpp2_percpu_write(port->priv, smp_processor_id(),
5552			   MVPP2_AGGR_TXQ_UPDATE_REG, pending);
5553}
5554
5555
5556/* Check if there are enough free descriptors in aggregated txq.
5557 * If not, update the number of occupied descriptors and repeat the check.
5558 *
5559 * Called only from mvpp2_tx(), so migration is disabled, using
5560 * smp_processor_id() is OK.
5561 */
5562static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5563				     struct mvpp2_tx_queue *aggr_txq, int num)
5564{
5565	if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
5566		/* Update number of occupied aggregated Tx descriptors */
5567		int cpu = smp_processor_id();
5568		u32 val = mvpp2_read_relaxed(priv,
5569					     MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5570
5571		aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5572	}
5573
5574	if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
5575		return -ENOMEM;
5576
5577	return 0;
5578}
5579
5580/* Reserved Tx descriptors allocation request
5581 *
5582 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5583 * only by mvpp2_tx(), so migration is disabled, using
5584 * smp_processor_id() is OK.
5585 */
5586static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5587					 struct mvpp2_tx_queue *txq, int num)
5588{
5589	u32 val;
5590	int cpu = smp_processor_id();
5591
5592	val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
5593	mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
5594
5595	val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
5596
5597	return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5598}
5599
5600/* Check if there are enough reserved descriptors for transmission.
5601 * If not, request chunk of reserved descriptors and check again.
5602 */
5603static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5604					    struct mvpp2_tx_queue *txq,
5605					    struct mvpp2_txq_pcpu *txq_pcpu,
5606					    int num)
5607{
5608	int req, cpu, desc_count;
5609
5610	if (txq_pcpu->reserved_num >= num)
5611		return 0;
5612
5613	/* Not enough descriptors reserved! Update the reserved descriptor
5614	 * count and check again.
5615	 */
5616
5617	desc_count = 0;
5618	/* Compute total of used descriptors */
5619	for_each_present_cpu(cpu) {
5620		struct mvpp2_txq_pcpu *txq_pcpu_aux;
5621
5622		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5623		desc_count += txq_pcpu_aux->count;
5624		desc_count += txq_pcpu_aux->reserved_num;
5625	}
5626
5627	req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5628	desc_count += req;
5629
5630	if (desc_count >
5631	   (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5632		return -ENOMEM;
5633
5634	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5635
5636	/* OK, the descriptor cound has been updated: check again. */
5637	if (txq_pcpu->reserved_num < num)
5638		return -ENOMEM;
5639	return 0;
5640}
5641
5642/* Release the last allocated Tx descriptor. Useful to handle DMA
5643 * mapping failures in the Tx path.
5644 */
5645static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5646{
5647	if (txq->next_desc_to_proc == 0)
5648		txq->next_desc_to_proc = txq->last_desc - 1;
5649	else
5650		txq->next_desc_to_proc--;
5651}
5652
5653/* Set Tx descriptors fields relevant for CSUM calculation */
5654static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5655			       int ip_hdr_len, int l4_proto)
5656{
5657	u32 command;
5658
5659	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5660	 * G_L4_chk, L4_type required only for checksum calculation
5661	 */
5662	command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5663	command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5664	command |= MVPP2_TXD_IP_CSUM_DISABLE;
5665
5666	if (l3_proto == swab16(ETH_P_IP)) {
5667		command &= ~MVPP2_TXD_IP_CSUM_DISABLE;	/* enable IPv4 csum */
5668		command &= ~MVPP2_TXD_L3_IP6;		/* enable IPv4 */
5669	} else {
5670		command |= MVPP2_TXD_L3_IP6;		/* enable IPv6 */
5671	}
5672
5673	if (l4_proto == IPPROTO_TCP) {
5674		command &= ~MVPP2_TXD_L4_UDP;		/* enable TCP */
5675		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
5676	} else if (l4_proto == IPPROTO_UDP) {
5677		command |= MVPP2_TXD_L4_UDP;		/* enable UDP */
5678		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
5679	} else {
5680		command |= MVPP2_TXD_L4_CSUM_NOT;
5681	}
5682
5683	return command;
5684}
5685
5686/* Get number of sent descriptors and decrement counter.
5687 * The number of sent descriptors is returned.
5688 * Per-CPU access
5689 *
5690 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5691 * (migration disabled) and from the TX completion tasklet (migration
5692 * disabled) so using smp_processor_id() is OK.
5693 */
5694static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5695					   struct mvpp2_tx_queue *txq)
5696{
5697	u32 val;
5698
5699	/* Reading status reg resets transmitted descriptor counter */
5700	val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
5701					MVPP2_TXQ_SENT_REG(txq->id));
5702
5703	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5704		MVPP2_TRANSMITTED_COUNT_OFFSET;
5705}
5706
5707/* Called through on_each_cpu(), so runs on all CPUs, with migration
5708 * disabled, therefore using smp_processor_id() is OK.
5709 */
5710static void mvpp2_txq_sent_counter_clear(void *arg)
5711{
5712	struct mvpp2_port *port = arg;
5713	int queue;
5714
5715	for (queue = 0; queue < port->ntxqs; queue++) {
5716		int id = port->txqs[queue]->id;
5717
5718		mvpp2_percpu_read(port->priv, smp_processor_id(),
5719				  MVPP2_TXQ_SENT_REG(id));
5720	}
5721}
5722
5723/* Set max sizes for Tx queues */
5724static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5725{
5726	u32	val, size, mtu;
5727	int	txq, tx_port_num;
5728
5729	mtu = port->pkt_size * 8;
5730	if (mtu > MVPP2_TXP_MTU_MAX)
5731		mtu = MVPP2_TXP_MTU_MAX;
5732
5733	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5734	mtu = 3 * mtu;
5735
5736	/* Indirect access to registers */
5737	tx_port_num = mvpp2_egress_port(port);
5738	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5739
5740	/* Set MTU */
5741	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5742	val &= ~MVPP2_TXP_MTU_MAX;
5743	val |= mtu;
5744	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5745
5746	/* TXP token size and all TXQs token size must be larger that MTU */
5747	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5748	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5749	if (size < mtu) {
5750		size = mtu;
5751		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5752		val |= size;
5753		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5754	}
5755
5756	for (txq = 0; txq < port->ntxqs; txq++) {
5757		val = mvpp2_read(port->priv,
5758				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5759		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5760
5761		if (size < mtu) {
5762			size = mtu;
5763			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5764			val |= size;
5765			mvpp2_write(port->priv,
5766				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5767				    val);
5768		}
5769	}
5770}
5771
5772/* Set the number of packets that will be received before Rx interrupt
5773 * will be generated by HW.
5774 */
5775static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
5776				   struct mvpp2_rx_queue *rxq)
5777{
5778	int cpu = get_cpu();
5779
5780	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5781		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
5782
5783	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5784	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5785			   rxq->pkts_coal);
5786
5787	put_cpu();
5788}
5789
5790/* For some reason in the LSP this is done on each CPU. Why ? */
5791static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5792				   struct mvpp2_tx_queue *txq)
5793{
5794	int cpu = get_cpu();
5795	u32 val;
5796
5797	if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5798		txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5799
5800	val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5801	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5802	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5803
5804	put_cpu();
5805}
5806
5807static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5808{
5809	u64 tmp = (u64)clk_hz * usec;
5810
5811	do_div(tmp, USEC_PER_SEC);
5812
5813	return tmp > U32_MAX ? U32_MAX : tmp;
5814}
5815
5816static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5817{
5818	u64 tmp = (u64)cycles * USEC_PER_SEC;
5819
5820	do_div(tmp, clk_hz);
5821
5822	return tmp > U32_MAX ? U32_MAX : tmp;
5823}
5824
5825/* Set the time delay in usec before Rx interrupt */
5826static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
5827				   struct mvpp2_rx_queue *rxq)
5828{
5829	unsigned long freq = port->priv->tclk;
5830	u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5831
5832	if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5833		rxq->time_coal =
5834			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5835
5836		/* re-evaluate to get actual register value */
5837		val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5838	}
5839
5840	mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
5841}
5842
5843static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5844{
5845	unsigned long freq = port->priv->tclk;
5846	u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5847
5848	if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5849		port->tx_time_coal =
5850			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5851
5852		/* re-evaluate to get actual register value */
5853		val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5854	}
5855
5856	mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5857}
5858
5859/* Free Tx queue skbuffs */
5860static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5861				struct mvpp2_tx_queue *txq,
5862				struct mvpp2_txq_pcpu *txq_pcpu, int num)
5863{
5864	int i;
5865
5866	for (i = 0; i < num; i++) {
5867		struct mvpp2_txq_pcpu_buf *tx_buf =
5868			txq_pcpu->buffs + txq_pcpu->txq_get_index;
5869
5870		if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5871			dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5872					 tx_buf->size, DMA_TO_DEVICE);
5873		if (tx_buf->skb)
5874			dev_kfree_skb_any(tx_buf->skb);
5875
5876		mvpp2_txq_inc_get(txq_pcpu);
5877	}
5878}
5879
5880static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5881							u32 cause)
5882{
5883	int queue = fls(cause) - 1;
5884
5885	return port->rxqs[queue];
5886}
5887
5888static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5889							u32 cause)
5890{
5891	int queue = fls(cause) - 1;
5892
5893	return port->txqs[queue];
5894}
5895
5896/* Handle end of transmission */
5897static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5898			   struct mvpp2_txq_pcpu *txq_pcpu)
5899{
5900	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5901	int tx_done;
5902
5903	if (txq_pcpu->cpu != smp_processor_id())
5904		netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5905
5906	tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5907	if (!tx_done)
5908		return;
5909	mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5910
5911	txq_pcpu->count -= tx_done;
5912
5913	if (netif_tx_queue_stopped(nq))
5914		if (txq_pcpu->count <= txq_pcpu->wake_threshold)
5915			netif_tx_wake_queue(nq);
5916}
5917
5918static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5919				  int cpu)
5920{
5921	struct mvpp2_tx_queue *txq;
5922	struct mvpp2_txq_pcpu *txq_pcpu;
5923	unsigned int tx_todo = 0;
5924
5925	while (cause) {
5926		txq = mvpp2_get_tx_queue(port, cause);
5927		if (!txq)
5928			break;
5929
5930		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5931
5932		if (txq_pcpu->count) {
5933			mvpp2_txq_done(port, txq, txq_pcpu);
5934			tx_todo += txq_pcpu->count;
5935		}
5936
5937		cause &= ~(1 << txq->log_id);
5938	}
5939	return tx_todo;
5940}
5941
5942/* Rx/Tx queue initialization/cleanup methods */
5943
5944/* Allocate and initialize descriptors for aggr TXQ */
5945static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5946			       struct mvpp2_tx_queue *aggr_txq, int cpu,
5947			       struct mvpp2 *priv)
5948{
5949	u32 txq_dma;
5950
5951	/* Allocate memory for TX descriptors */
5952	aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
5953				MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5954				&aggr_txq->descs_dma, GFP_KERNEL);
5955	if (!aggr_txq->descs)
5956		return -ENOMEM;
5957
5958	aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
5959
5960	/* Aggr TXQ no reset WA */
5961	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5962						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5963
5964	/* Set Tx descriptors queue starting address indirect
5965	 * access
5966	 */
5967	if (priv->hw_version == MVPP21)
5968		txq_dma = aggr_txq->descs_dma;
5969	else
5970		txq_dma = aggr_txq->descs_dma >>
5971			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5972
5973	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
5974	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5975		    MVPP2_AGGR_TXQ_SIZE);
5976
5977	return 0;
5978}
5979
5980/* Create a specified Rx queue */
5981static int mvpp2_rxq_init(struct mvpp2_port *port,
5982			  struct mvpp2_rx_queue *rxq)
5983
5984{
5985	u32 rxq_dma;
5986	int cpu;
5987
5988	rxq->size = port->rx_ring_size;
5989
5990	/* Allocate memory for RX descriptors */
5991	rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5992					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5993					&rxq->descs_dma, GFP_KERNEL);
5994	if (!rxq->descs)
5995		return -ENOMEM;
5996
5997	rxq->last_desc = rxq->size - 1;
5998
5999	/* Zero occupied and non-occupied counters - direct access */
6000	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6001
6002	/* Set Rx descriptors queue starting address - indirect access */
6003	cpu = get_cpu();
6004	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6005	if (port->priv->hw_version == MVPP21)
6006		rxq_dma = rxq->descs_dma;
6007	else
6008		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
6009	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
6010	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
6011	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
6012	put_cpu();
6013
6014	/* Set Offset */
6015	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
6016
6017	/* Set coalescing pkts and time */
6018	mvpp2_rx_pkts_coal_set(port, rxq);
6019	mvpp2_rx_time_coal_set(port, rxq);
6020
6021	/* Add number of descriptors ready for receiving packets */
6022	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
6023
6024	return 0;
6025}
6026
6027/* Push packets received by the RXQ to BM pool */
6028static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
6029				struct mvpp2_rx_queue *rxq)
6030{
6031	int rx_received, i;
6032
6033	rx_received = mvpp2_rxq_received(port, rxq->id);
6034	if (!rx_received)
6035		return;
6036
6037	for (i = 0; i < rx_received; i++) {
6038		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6039		u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6040		int pool;
6041
6042		pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6043			MVPP2_RXD_BM_POOL_ID_OFFS;
6044
6045		mvpp2_bm_pool_put(port, pool,
6046				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
6047				  mvpp2_rxdesc_cookie_get(port, rx_desc));
6048	}
6049	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
6050}
6051
6052/* Cleanup Rx queue */
6053static void mvpp2_rxq_deinit(struct mvpp2_port *port,
6054			     struct mvpp2_rx_queue *rxq)
6055{
6056	int cpu;
6057
6058	mvpp2_rxq_drop_pkts(port, rxq);
6059
6060	if (rxq->descs)
6061		dma_free_coherent(port->dev->dev.parent,
6062				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6063				  rxq->descs,
6064				  rxq->descs_dma);
6065
6066	rxq->descs             = NULL;
6067	rxq->last_desc         = 0;
6068	rxq->next_desc_to_proc = 0;
6069	rxq->descs_dma         = 0;
6070
6071	/* Clear Rx descriptors queue starting address and size;
6072	 * free descriptor number
6073	 */
6074	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6075	cpu = get_cpu();
6076	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6077	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6078	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
6079	put_cpu();
6080}
6081
6082/* Create and initialize a Tx queue */
6083static int mvpp2_txq_init(struct mvpp2_port *port,
6084			  struct mvpp2_tx_queue *txq)
6085{
6086	u32 val;
6087	int cpu, desc, desc_per_txq, tx_port_num;
6088	struct mvpp2_txq_pcpu *txq_pcpu;
6089
6090	txq->size = port->tx_ring_size;
6091
6092	/* Allocate memory for Tx descriptors */
6093	txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6094				txq->size * MVPP2_DESC_ALIGNED_SIZE,
6095				&txq->descs_dma, GFP_KERNEL);
6096	if (!txq->descs)
6097		return -ENOMEM;
6098
6099	txq->last_desc = txq->size - 1;
6100
6101	/* Set Tx descriptors queue starting address - indirect access */
6102	cpu = get_cpu();
6103	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6104	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6105			   txq->descs_dma);
6106	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6107			   txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6108	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6109	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6110			   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6111	val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
6112	val &= ~MVPP2_TXQ_PENDING_MASK;
6113	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
6114
6115	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
6116	 * for each existing TXQ.
6117	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6118	 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6119	 */
6120	desc_per_txq = 16;
6121	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6122	       (txq->log_id * desc_per_txq);
6123
6124	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6125			   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6126			   MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
6127	put_cpu();
6128
6129	/* WRR / EJP configuration - indirect access */
6130	tx_port_num = mvpp2_egress_port(port);
6131	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6132
6133	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6134	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6135	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6136	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6137	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6138
6139	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6140	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6141		    val);
6142
6143	for_each_present_cpu(cpu) {
6144		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6145		txq_pcpu->size = txq->size;
6146		txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6147						sizeof(*txq_pcpu->buffs),
6148						GFP_KERNEL);
6149		if (!txq_pcpu->buffs)
6150			return -ENOMEM;
6151
6152		txq_pcpu->count = 0;
6153		txq_pcpu->reserved_num = 0;
6154		txq_pcpu->txq_put_index = 0;
6155		txq_pcpu->txq_get_index = 0;
6156		txq_pcpu->tso_headers = NULL;
6157
6158		txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6159		txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6160
6161		txq_pcpu->tso_headers =
6162			dma_alloc_coherent(port->dev->dev.parent,
6163					   txq_pcpu->size * TSO_HEADER_SIZE,
6164					   &txq_pcpu->tso_headers_dma,
6165					   GFP_KERNEL);
6166		if (!txq_pcpu->tso_headers)
6167			return -ENOMEM;
6168	}
6169
6170	return 0;
6171}
6172
6173/* Free allocated TXQ resources */
6174static void mvpp2_txq_deinit(struct mvpp2_port *port,
6175			     struct mvpp2_tx_queue *txq)
6176{
6177	struct mvpp2_txq_pcpu *txq_pcpu;
6178	int cpu;
6179
6180	for_each_present_cpu(cpu) {
6181		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6182		kfree(txq_pcpu->buffs);
6183
6184		if (txq_pcpu->tso_headers)
6185			dma_free_coherent(port->dev->dev.parent,
6186					  txq_pcpu->size * TSO_HEADER_SIZE,
6187					  txq_pcpu->tso_headers,
6188					  txq_pcpu->tso_headers_dma);
6189
6190		txq_pcpu->tso_headers = NULL;
6191	}
6192
6193	if (txq->descs)
6194		dma_free_coherent(port->dev->dev.parent,
6195				  txq->size * MVPP2_DESC_ALIGNED_SIZE,
6196				  txq->descs, txq->descs_dma);
6197
6198	txq->descs             = NULL;
6199	txq->last_desc         = 0;
6200	txq->next_desc_to_proc = 0;
6201	txq->descs_dma         = 0;
6202
6203	/* Set minimum bandwidth for disabled TXQs */
6204	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6205
6206	/* Set Tx descriptors queue starting address and size */
6207	cpu = get_cpu();
6208	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6209	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6210	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
6211	put_cpu();
6212}
6213
6214/* Cleanup Tx ports */
6215static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6216{
6217	struct mvpp2_txq_pcpu *txq_pcpu;
6218	int delay, pending, cpu;
6219	u32 val;
6220
6221	cpu = get_cpu();
6222	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6223	val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
6224	val |= MVPP2_TXQ_DRAIN_EN_MASK;
6225	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
6226
6227	/* The napi queue has been stopped so wait for all packets
6228	 * to be transmitted.
6229	 */
6230	delay = 0;
6231	do {
6232		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6233			netdev_warn(port->dev,
6234				    "port %d: cleaning queue %d timed out\n",
6235				    port->id, txq->log_id);
6236			break;
6237		}
6238		mdelay(1);
6239		delay++;
6240
6241		pending = mvpp2_percpu_read(port->priv, cpu,
6242					    MVPP2_TXQ_PENDING_REG);
6243		pending &= MVPP2_TXQ_PENDING_MASK;
6244	} while (pending);
6245
6246	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
6247	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
6248	put_cpu();
6249
6250	for_each_present_cpu(cpu) {
6251		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6252
6253		/* Release all packets */
6254		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6255
6256		/* Reset queue */
6257		txq_pcpu->count = 0;
6258		txq_pcpu->txq_put_index = 0;
6259		txq_pcpu->txq_get_index = 0;
6260	}
6261}
6262
6263/* Cleanup all Tx queues */
6264static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6265{
6266	struct mvpp2_tx_queue *txq;
6267	int queue;
6268	u32 val;
6269
6270	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6271
6272	/* Reset Tx ports and delete Tx queues */
6273	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6274	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6275
6276	for (queue = 0; queue < port->ntxqs; queue++) {
6277		txq = port->txqs[queue];
6278		mvpp2_txq_clean(port, txq);
6279		mvpp2_txq_deinit(port, txq);
6280	}
6281
6282	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6283
6284	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6285	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6286}
6287
6288/* Cleanup all Rx queues */
6289static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6290{
6291	int queue;
6292
6293	for (queue = 0; queue < port->nrxqs; queue++)
6294		mvpp2_rxq_deinit(port, port->rxqs[queue]);
6295}
6296
6297/* Init all Rx queues for port */
6298static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6299{
6300	int queue, err;
6301
6302	for (queue = 0; queue < port->nrxqs; queue++) {
6303		err = mvpp2_rxq_init(port, port->rxqs[queue]);
6304		if (err)
6305			goto err_cleanup;
6306	}
6307	return 0;
6308
6309err_cleanup:
6310	mvpp2_cleanup_rxqs(port);
6311	return err;
6312}
6313
6314/* Init all tx queues for port */
6315static int mvpp2_setup_txqs(struct mvpp2_port *port)
6316{
6317	struct mvpp2_tx_queue *txq;
6318	int queue, err;
6319
6320	for (queue = 0; queue < port->ntxqs; queue++) {
6321		txq = port->txqs[queue];
6322		err = mvpp2_txq_init(port, txq);
6323		if (err)
6324			goto err_cleanup;
6325	}
6326
6327	if (port->has_tx_irqs) {
6328		mvpp2_tx_time_coal_set(port);
6329		for (queue = 0; queue < port->ntxqs; queue++) {
6330			txq = port->txqs[queue];
6331			mvpp2_tx_pkts_coal_set(port, txq);
6332		}
6333	}
6334
6335	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6336	return 0;
6337
6338err_cleanup:
6339	mvpp2_cleanup_txqs(port);
6340	return err;
6341}
6342
6343/* The callback for per-port interrupt */
6344static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6345{
6346	struct mvpp2_queue_vector *qv = dev_id;
6347
6348	mvpp2_qvec_interrupt_disable(qv);
6349
6350	napi_schedule(&qv->napi);
6351
6352	return IRQ_HANDLED;
6353}
6354
6355/* Per-port interrupt for link status changes */
6356static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6357{
6358	struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6359	struct net_device *dev = port->dev;
6360	bool event = false, link = false;
6361	u32 val;
6362
6363	mvpp22_gop_mask_irq(port);
6364
6365	if (port->gop_id == 0 &&
6366	    port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6367		val = readl(port->base + MVPP22_XLG_INT_STAT);
6368		if (val & MVPP22_XLG_INT_STAT_LINK) {
6369			event = true;
6370			val = readl(port->base + MVPP22_XLG_STATUS);
6371			if (val & MVPP22_XLG_STATUS_LINK_UP)
6372				link = true;
6373		}
6374	} else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6375		   port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6376		val = readl(port->base + MVPP22_GMAC_INT_STAT);
6377		if (val & MVPP22_GMAC_INT_STAT_LINK) {
6378			event = true;
6379			val = readl(port->base + MVPP2_GMAC_STATUS0);
6380			if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6381				link = true;
6382		}
6383	}
6384
6385	if (!netif_running(dev) || !event)
6386		goto handled;
6387
6388	if (link) {
6389		mvpp2_interrupts_enable(port);
6390
6391		mvpp2_egress_enable(port);
6392		mvpp2_ingress_enable(port);
6393		netif_carrier_on(dev);
6394		netif_tx_wake_all_queues(dev);
6395	} else {
6396		netif_tx_stop_all_queues(dev);
6397		netif_carrier_off(dev);
6398		mvpp2_ingress_disable(port);
6399		mvpp2_egress_disable(port);
6400
6401		mvpp2_interrupts_disable(port);
6402	}
6403
6404handled:
6405	mvpp22_gop_unmask_irq(port);
6406	return IRQ_HANDLED;
6407}
6408
6409static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6410				   struct phy_device *phydev)
6411{
6412	u32 val;
6413
6414	if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6415	    port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6416	    port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6417	    port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6418	    port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6419		return;
6420
6421	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6422	val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6423		 MVPP2_GMAC_CONFIG_GMII_SPEED |
6424		 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6425		 MVPP2_GMAC_AN_SPEED_EN |
6426		 MVPP2_GMAC_AN_DUPLEX_EN);
6427
6428	if (phydev->duplex)
6429		val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6430
6431	if (phydev->speed == SPEED_1000)
6432		val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6433	else if (phydev->speed == SPEED_100)
6434		val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6435
6436	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6437}
6438
6439/* Adjust link */
6440static void mvpp2_link_event(struct net_device *dev)
6441{
6442	struct mvpp2_port *port = netdev_priv(dev);
6443	struct phy_device *phydev = dev->phydev;
6444	bool link_reconfigured = false;
6445	u32 val;
6446
6447	if (phydev->link) {
6448		if (port->phy_interface != phydev->interface && port->comphy) {
6449	                /* disable current port for reconfiguration */
6450	                mvpp2_interrupts_disable(port);
6451	                netif_carrier_off(port->dev);
6452	                mvpp2_port_disable(port);
6453			phy_power_off(port->comphy);
6454
6455	                /* comphy reconfiguration */
6456	                port->phy_interface = phydev->interface;
6457	                mvpp22_comphy_init(port);
6458
6459	                /* gop/mac reconfiguration */
6460	                mvpp22_gop_init(port);
6461	                mvpp2_port_mii_set(port);
6462
6463	                link_reconfigured = true;
6464		}
6465
6466		if ((port->speed != phydev->speed) ||
6467		    (port->duplex != phydev->duplex)) {
6468			mvpp2_gmac_set_autoneg(port, phydev);
6469
6470			port->duplex = phydev->duplex;
6471			port->speed  = phydev->speed;
6472		}
6473	}
6474
6475	if (phydev->link != port->link || link_reconfigured) {
6476		port->link = phydev->link;
6477
6478		if (phydev->link) {
6479			if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6480			    port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6481			    port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6482			    port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6483			    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6484				val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6485				val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6486					MVPP2_GMAC_FORCE_LINK_DOWN);
6487				writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6488			}
6489
6490			mvpp2_interrupts_enable(port);
6491			mvpp2_port_enable(port);
6492
6493			mvpp2_egress_enable(port);
6494			mvpp2_ingress_enable(port);
6495			netif_carrier_on(dev);
6496			netif_tx_wake_all_queues(dev);
6497		} else {
6498			port->duplex = -1;
6499			port->speed = 0;
6500
6501			netif_tx_stop_all_queues(dev);
6502			netif_carrier_off(dev);
6503			mvpp2_ingress_disable(port);
6504			mvpp2_egress_disable(port);
6505
6506			mvpp2_port_disable(port);
6507			mvpp2_interrupts_disable(port);
6508		}
6509
6510		phy_print_status(phydev);
6511	}
6512}
6513
6514static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6515{
6516	ktime_t interval;
6517
6518	if (!port_pcpu->timer_scheduled) {
6519		port_pcpu->timer_scheduled = true;
6520		interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
6521		hrtimer_start(&port_pcpu->tx_done_timer, interval,
6522			      HRTIMER_MODE_REL_PINNED);
6523	}
6524}
6525
6526static void mvpp2_tx_proc_cb(unsigned long data)
6527{
6528	struct net_device *dev = (struct net_device *)data;
6529	struct mvpp2_port *port = netdev_priv(dev);
6530	struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6531	unsigned int tx_todo, cause;
6532
6533	if (!netif_running(dev))
6534		return;
6535	port_pcpu->timer_scheduled = false;
6536
6537	/* Process all the Tx queues */
6538	cause = (1 << port->ntxqs) - 1;
6539	tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
6540
6541	/* Set the timer in case not all the packets were processed */
6542	if (tx_todo)
6543		mvpp2_timer_set(port_pcpu);
6544}
6545
6546static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6547{
6548	struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6549							 struct mvpp2_port_pcpu,
6550							 tx_done_timer);
6551
6552	tasklet_schedule(&port_pcpu->tx_done_tasklet);
6553
6554	return HRTIMER_NORESTART;
6555}
6556
6557/* Main RX/TX processing routines */
6558
6559/* Display more error info */
6560static void mvpp2_rx_error(struct mvpp2_port *port,
6561			   struct mvpp2_rx_desc *rx_desc)
6562{
6563	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6564	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
6565
6566	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6567	case MVPP2_RXD_ERR_CRC:
6568		netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6569			   status, sz);
6570		break;
6571	case MVPP2_RXD_ERR_OVERRUN:
6572		netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6573			   status, sz);
6574		break;
6575	case MVPP2_RXD_ERR_RESOURCE:
6576		netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6577			   status, sz);
6578		break;
6579	}
6580}
6581
6582/* Handle RX checksum offload */
6583static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6584			  struct sk_buff *skb)
6585{
6586	if (((status & MVPP2_RXD_L3_IP4) &&
6587	     !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6588	    (status & MVPP2_RXD_L3_IP6))
6589		if (((status & MVPP2_RXD_L4_UDP) ||
6590		     (status & MVPP2_RXD_L4_TCP)) &&
6591		     (status & MVPP2_RXD_L4_CSUM_OK)) {
6592			skb->csum = 0;
6593			skb->ip_summed = CHECKSUM_UNNECESSARY;
6594			return;
6595		}
6596
6597	skb->ip_summed = CHECKSUM_NONE;
6598}
6599
6600/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6601static int mvpp2_rx_refill(struct mvpp2_port *port,
6602			   struct mvpp2_bm_pool *bm_pool, int pool)
6603{
6604	dma_addr_t dma_addr;
6605	phys_addr_t phys_addr;
6606	void *buf;
6607
6608	/* No recycle or too many buffers are in use, so allocate a new skb */
6609	buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6610			      GFP_ATOMIC);
6611	if (!buf)
6612		return -ENOMEM;
6613
6614	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6615
6616	return 0;
6617}
6618
6619/* Handle tx checksum */
6620static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6621{
6622	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6623		int ip_hdr_len = 0;
6624		u8 l4_proto;
6625
6626		if (skb->protocol == htons(ETH_P_IP)) {
6627			struct iphdr *ip4h = ip_hdr(skb);
6628
6629			/* Calculate IPv4 checksum and L4 checksum */
6630			ip_hdr_len = ip4h->ihl;
6631			l4_proto = ip4h->protocol;
6632		} else if (skb->protocol == htons(ETH_P_IPV6)) {
6633			struct ipv6hdr *ip6h = ipv6_hdr(skb);
6634
6635			/* Read l4_protocol from one of IPv6 extra headers */
6636			if (skb_network_header_len(skb) > 0)
6637				ip_hdr_len = (skb_network_header_len(skb) >> 2);
6638			l4_proto = ip6h->nexthdr;
6639		} else {
6640			return MVPP2_TXD_L4_CSUM_NOT;
6641		}
6642
6643		return mvpp2_txq_desc_csum(skb_network_offset(skb),
6644				skb->protocol, ip_hdr_len, l4_proto);
6645	}
6646
6647	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6648}
6649
6650/* Main rx processing */
6651static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6652		    int rx_todo, struct mvpp2_rx_queue *rxq)
6653{
6654	struct net_device *dev = port->dev;
6655	int rx_received;
6656	int rx_done = 0;
6657	u32 rcvd_pkts = 0;
6658	u32 rcvd_bytes = 0;
6659
6660	/* Get number of received packets and clamp the to-do */
6661	rx_received = mvpp2_rxq_received(port, rxq->id);
6662	if (rx_todo > rx_received)
6663		rx_todo = rx_received;
6664
6665	while (rx_done < rx_todo) {
6666		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6667		struct mvpp2_bm_pool *bm_pool;
6668		struct sk_buff *skb;
6669		unsigned int frag_size;
6670		dma_addr_t dma_addr;
6671		phys_addr_t phys_addr;
6672		u32 rx_status;
6673		int pool, rx_bytes, err;
6674		void *data;
6675
6676		rx_done++;
6677		rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6678		rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6679		rx_bytes -= MVPP2_MH_SIZE;
6680		dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6681		phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6682		data = (void *)phys_to_virt(phys_addr);
6683
6684		pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6685			MVPP2_RXD_BM_POOL_ID_OFFS;
6686		bm_pool = &port->priv->bm_pools[pool];
6687
6688		/* In case of an error, release the requested buffer pointer
6689		 * to the Buffer Manager. This request process is controlled
6690		 * by the hardware, and the information about the buffer is
6691		 * comprised by the RX descriptor.
6692		 */
6693		if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
6694err_drop_frame:
6695			dev->stats.rx_errors++;
6696			mvpp2_rx_error(port, rx_desc);
6697			/* Return the buffer to the pool */
6698			mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6699			continue;
6700		}
6701
6702		if (bm_pool->frag_size > PAGE_SIZE)
6703			frag_size = 0;
6704		else
6705			frag_size = bm_pool->frag_size;
6706
6707		skb = build_skb(data, frag_size);
6708		if (!skb) {
6709			netdev_warn(port->dev, "skb build failed\n");
6710			goto err_drop_frame;
6711		}
6712
6713		err = mvpp2_rx_refill(port, bm_pool, pool);
6714		if (err) {
6715			netdev_err(port->dev, "failed to refill BM pools\n");
6716			goto err_drop_frame;
6717		}
6718
6719		dma_unmap_single(dev->dev.parent, dma_addr,
6720				 bm_pool->buf_size, DMA_FROM_DEVICE);
6721
6722		rcvd_pkts++;
6723		rcvd_bytes += rx_bytes;
6724
6725		skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
6726		skb_put(skb, rx_bytes);
6727		skb->protocol = eth_type_trans(skb, dev);
6728		mvpp2_rx_csum(port, rx_status, skb);
6729
6730		napi_gro_receive(napi, skb);
6731	}
6732
6733	if (rcvd_pkts) {
6734		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6735
6736		u64_stats_update_begin(&stats->syncp);
6737		stats->rx_packets += rcvd_pkts;
6738		stats->rx_bytes   += rcvd_bytes;
6739		u64_stats_update_end(&stats->syncp);
6740	}
6741
6742	/* Update Rx queue management counters */
6743	wmb();
6744	mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
6745
6746	return rx_todo;
6747}
6748
6749static inline void
6750tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6751		  struct mvpp2_tx_desc *desc)
6752{
6753	struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6754
6755	dma_addr_t buf_dma_addr =
6756		mvpp2_txdesc_dma_addr_get(port, desc);
6757	size_t buf_sz =
6758		mvpp2_txdesc_size_get(port, desc);
6759	if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6760		dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6761				 buf_sz, DMA_TO_DEVICE);
6762	mvpp2_txq_desc_put(txq);
6763}
6764
6765/* Handle tx fragmentation processing */
6766static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6767				 struct mvpp2_tx_queue *aggr_txq,
6768				 struct mvpp2_tx_queue *txq)
6769{
6770	struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6771	struct mvpp2_tx_desc *tx_desc;
6772	int i;
6773	dma_addr_t buf_dma_addr;
6774
6775	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6776		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6777		void *addr = page_address(frag->page.p) + frag->page_offset;
6778
6779		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6780		mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6781		mvpp2_txdesc_size_set(port, tx_desc, frag->size);
6782
6783		buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
6784					       frag->size,
6785					       DMA_TO_DEVICE);
6786		if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
6787			mvpp2_txq_desc_put(txq);
6788			goto cleanup;
6789		}
6790
6791		mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6792
6793		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6794			/* Last descriptor */
6795			mvpp2_txdesc_cmd_set(port, tx_desc,
6796					     MVPP2_TXD_L_DESC);
6797			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6798		} else {
6799			/* Descriptor in the middle: Not First, Not Last */
6800			mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6801			mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6802		}
6803	}
6804
6805	return 0;
6806cleanup:
6807	/* Release all descriptors that were used to map fragments of
6808	 * this packet, as well as the corresponding DMA mappings
6809	 */
6810	for (i = i - 1; i >= 0; i--) {
6811		tx_desc = txq->descs + i;
6812		tx_desc_unmap_put(port, txq, tx_desc);
6813	}
6814
6815	return -ENOMEM;
6816}
6817
6818static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6819				     struct net_device *dev,
6820				     struct mvpp2_tx_queue *txq,
6821				     struct mvpp2_tx_queue *aggr_txq,
6822				     struct mvpp2_txq_pcpu *txq_pcpu,
6823				     int hdr_sz)
6824{
6825	struct mvpp2_port *port = netdev_priv(dev);
6826	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6827	dma_addr_t addr;
6828
6829	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6830	mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6831
6832	addr = txq_pcpu->tso_headers_dma +
6833	       txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6834	mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
6835
6836	mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6837					    MVPP2_TXD_F_DESC |
6838					    MVPP2_TXD_PADDING_DISABLE);
6839	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6840}
6841
6842static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6843				     struct net_device *dev, struct tso_t *tso,
6844				     struct mvpp2_tx_queue *txq,
6845				     struct mvpp2_tx_queue *aggr_txq,
6846				     struct mvpp2_txq_pcpu *txq_pcpu,
6847				     int sz, bool left, bool last)
6848{
6849	struct mvpp2_port *port = netdev_priv(dev);
6850	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6851	dma_addr_t buf_dma_addr;
6852
6853	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6854	mvpp2_txdesc_size_set(port, tx_desc, sz);
6855
6856	buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6857				      DMA_TO_DEVICE);
6858	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6859		mvpp2_txq_desc_put(txq);
6860		return -ENOMEM;
6861	}
6862
6863	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6864
6865	if (!left) {
6866		mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6867		if (last) {
6868			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6869			return 0;
6870		}
6871	} else {
6872		mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6873	}
6874
6875	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6876	return 0;
6877}
6878
6879static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6880			struct mvpp2_tx_queue *txq,
6881			struct mvpp2_tx_queue *aggr_txq,
6882			struct mvpp2_txq_pcpu *txq_pcpu)
6883{
6884	struct mvpp2_port *port = netdev_priv(dev);
6885	struct tso_t tso;
6886	int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6887	int i, len, descs = 0;
6888
6889	/* Check number of available descriptors */
6890	if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6891				      tso_count_descs(skb)) ||
6892	    mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6893					     tso_count_descs(skb)))
6894		return 0;
6895
6896	tso_start(skb, &tso);
6897	len = skb->len - hdr_sz;
6898	while (len > 0) {
6899		int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6900		char *hdr = txq_pcpu->tso_headers +
6901			    txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6902
6903		len -= left;
6904		descs++;
6905
6906		tso_build_hdr(skb, hdr, &tso, left, len == 0);
6907		mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6908
6909		while (left > 0) {
6910			int sz = min_t(int, tso.size, left);
6911			left -= sz;
6912			descs++;
6913
6914			if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6915					       txq_pcpu, sz, left, len == 0))
6916				goto release;
6917			tso_build_data(skb, &tso, sz);
6918		}
6919	}
6920
6921	return descs;
6922
6923release:
6924	for (i = descs - 1; i >= 0; i--) {
6925		struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6926		tx_desc_unmap_put(port, txq, tx_desc);
6927	}
6928	return 0;
6929}
6930
6931/* Main tx processing */
6932static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6933{
6934	struct mvpp2_port *port = netdev_priv(dev);
6935	struct mvpp2_tx_queue *txq, *aggr_txq;
6936	struct mvpp2_txq_pcpu *txq_pcpu;
6937	struct mvpp2_tx_desc *tx_desc;
6938	dma_addr_t buf_dma_addr;
6939	int frags = 0;
6940	u16 txq_id;
6941	u32 tx_cmd;
6942
6943	txq_id = skb_get_queue_mapping(skb);
6944	txq = port->txqs[txq_id];
6945	txq_pcpu = this_cpu_ptr(txq->pcpu);
6946	aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6947
6948	if (skb_is_gso(skb)) {
6949		frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6950		goto out;
6951	}
6952	frags = skb_shinfo(skb)->nr_frags + 1;
6953
6954	/* Check number of available descriptors */
6955	if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6956	    mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6957					     txq_pcpu, frags)) {
6958		frags = 0;
6959		goto out;
6960	}
6961
6962	/* Get a descriptor for the first part of the packet */
6963	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6964	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6965	mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
6966
6967	buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
6968				      skb_headlen(skb), DMA_TO_DEVICE);
6969	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6970		mvpp2_txq_desc_put(txq);
6971		frags = 0;
6972		goto out;
6973	}
6974
6975	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6976
6977	tx_cmd = mvpp2_skb_tx_csum(port, skb);
6978
6979	if (frags == 1) {
6980		/* First and Last descriptor */
6981		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
6982		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6983		mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6984	} else {
6985		/* First but not Last */
6986		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
6987		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6988		mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6989
6990		/* Continue with other skb fragments */
6991		if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
6992			tx_desc_unmap_put(port, txq, tx_desc);
6993			frags = 0;
6994		}
6995	}
6996
6997out:
6998	if (frags > 0) {
6999		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
7000		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
7001
7002		txq_pcpu->reserved_num -= frags;
7003		txq_pcpu->count += frags;
7004		aggr_txq->count += frags;
7005
7006		/* Enable transmit */
7007		wmb();
7008		mvpp2_aggr_txq_pend_desc_add(port, frags);
7009
7010		if (txq_pcpu->count >= txq_pcpu->stop_threshold)
7011			netif_tx_stop_queue(nq);
7012
7013		u64_stats_update_begin(&stats->syncp);
7014		stats->tx_packets++;
7015		stats->tx_bytes += skb->len;
7016		u64_stats_update_end(&stats->syncp);
7017	} else {
7018		dev->stats.tx_dropped++;
7019		dev_kfree_skb_any(skb);
7020	}
7021
7022	/* Finalize TX processing */
7023	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
7024		mvpp2_txq_done(port, txq, txq_pcpu);
7025
7026	/* Set the timer in case not all frags were processed */
7027	if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
7028	    txq_pcpu->count > 0) {
7029		struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
7030
7031		mvpp2_timer_set(port_pcpu);
7032	}
7033
7034	return NETDEV_TX_OK;
7035}
7036
7037static inline void mvpp2_cause_error(struct net_device *dev, int cause)
7038{
7039	if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
7040		netdev_err(dev, "FCS error\n");
7041	if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
7042		netdev_err(dev, "rx fifo overrun error\n");
7043	if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
7044		netdev_err(dev, "tx fifo underrun error\n");
7045}
7046
7047static int mvpp2_poll(struct napi_struct *napi, int budget)
7048{
7049	u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
7050	int rx_done = 0;
7051	struct mvpp2_port *port = netdev_priv(napi->dev);
7052	struct mvpp2_queue_vector *qv;
7053	int cpu = smp_processor_id();
7054
7055	qv = container_of(napi, struct mvpp2_queue_vector, napi);
7056
7057	/* Rx/Tx cause register
7058	 *
7059	 * Bits 0-15: each bit indicates received packets on the Rx queue
7060	 * (bit 0 is for Rx queue 0).
7061	 *
7062	 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7063	 * (bit 16 is for Tx queue 0).
7064	 *
7065	 * Each CPU has its own Rx/Tx cause register
7066	 */
7067	cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
7068						MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
7069
7070	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
7071	if (cause_misc) {
7072		mvpp2_cause_error(port->dev, cause_misc);
7073
7074		/* Clear the cause register */
7075		mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
7076		mvpp2_percpu_write(port->priv, cpu,
7077				   MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7078				   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
7079	}
7080
7081	cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7082	if (cause_tx) {
7083		cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7084		mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7085	}
7086
7087	/* Process RX packets */
7088	cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7089	cause_rx <<= qv->first_rxq;
7090	cause_rx |= qv->pending_cause_rx;
7091	while (cause_rx && budget > 0) {
7092		int count;
7093		struct mvpp2_rx_queue *rxq;
7094
7095		rxq = mvpp2_get_rx_queue(port, cause_rx);
7096		if (!rxq)
7097			break;
7098
7099		count = mvpp2_rx(port, napi, budget, rxq);
7100		rx_done += count;
7101		budget -= count;
7102		if (budget > 0) {
7103			/* Clear the bit associated to this Rx queue
7104			 * so that next iteration will continue from
7105			 * the next Rx queue.
7106			 */
7107			cause_rx &= ~(1 << rxq->logic_rxq);
7108		}
7109	}
7110
7111	if (budget > 0) {
7112		cause_rx = 0;
7113		napi_complete_done(napi, rx_done);
7114
7115		mvpp2_qvec_interrupt_enable(qv);
7116	}
7117	qv->pending_cause_rx = cause_rx;
7118	return rx_done;
7119}
7120
7121/* Set hw internals when starting port */
7122static void mvpp2_start_dev(struct mvpp2_port *port)
7123{
7124	struct net_device *ndev = port->dev;
7125	int i;
7126
7127	if (port->gop_id == 0 &&
7128	    (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7129	     port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7130		mvpp2_xlg_max_rx_size_set(port);
7131	else
7132		mvpp2_gmac_max_rx_size_set(port);
7133
7134	mvpp2_txp_max_tx_size_set(port);
7135
7136	for (i = 0; i < port->nqvecs; i++)
7137		napi_enable(&port->qvecs[i].napi);
7138
7139	/* Enable interrupts on all CPUs */
7140	mvpp2_interrupts_enable(port);
7141
7142	if (port->priv->hw_version == MVPP22) {
7143		mvpp22_comphy_init(port);
7144		mvpp22_gop_init(port);
7145	}
7146
7147	mvpp2_port_mii_set(port);
7148	mvpp2_port_enable(port);
7149	if (ndev->phydev)
7150		phy_start(ndev->phydev);
7151	netif_tx_start_all_queues(port->dev);
7152}
7153
7154/* Set hw internals when stopping port */
7155static void mvpp2_stop_dev(struct mvpp2_port *port)
7156{
7157	struct net_device *ndev = port->dev;
7158	int i;
7159
7160	/* Stop new packets from arriving to RXQs */
7161	mvpp2_ingress_disable(port);
7162
7163	mdelay(10);
7164
7165	/* Disable interrupts on all CPUs */
7166	mvpp2_interrupts_disable(port);
7167
7168	for (i = 0; i < port->nqvecs; i++)
7169		napi_disable(&port->qvecs[i].napi);
7170
7171	netif_carrier_off(port->dev);
7172	netif_tx_stop_all_queues(port->dev);
7173
7174	mvpp2_egress_disable(port);
7175	mvpp2_port_disable(port);
7176	if (ndev->phydev)
7177		phy_stop(ndev->phydev);
7178	phy_power_off(port->comphy);
7179}
7180
7181static int mvpp2_check_ringparam_valid(struct net_device *dev,
7182				       struct ethtool_ringparam *ring)
7183{
7184	u16 new_rx_pending = ring->rx_pending;
7185	u16 new_tx_pending = ring->tx_pending;
7186
7187	if (ring->rx_pending == 0 || ring->tx_pending == 0)
7188		return -EINVAL;
7189
7190	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7191		new_rx_pending = MVPP2_MAX_RXD_MAX;
7192	else if (!IS_ALIGNED(ring->rx_pending, 16))
7193		new_rx_pending = ALIGN(ring->rx_pending, 16);
7194
7195	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7196		new_tx_pending = MVPP2_MAX_TXD_MAX;
7197	else if (!IS_ALIGNED(ring->tx_pending, 32))
7198		new_tx_pending = ALIGN(ring->tx_pending, 32);
7199
7200	/* The Tx ring size cannot be smaller than the minimum number of
7201	 * descriptors needed for TSO.
7202	 */
7203	if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7204		new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7205
7206	if (ring->rx_pending != new_rx_pending) {
7207		netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7208			    ring->rx_pending, new_rx_pending);
7209		ring->rx_pending = new_rx_pending;
7210	}
7211
7212	if (ring->tx_pending != new_tx_pending) {
7213		netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7214			    ring->tx_pending, new_tx_pending);
7215		ring->tx_pending = new_tx_pending;
7216	}
7217
7218	return 0;
7219}
7220
7221static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
7222{
7223	u32 mac_addr_l, mac_addr_m, mac_addr_h;
7224
7225	mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7226	mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7227	mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7228	addr[0] = (mac_addr_h >> 24) & 0xFF;
7229	addr[1] = (mac_addr_h >> 16) & 0xFF;
7230	addr[2] = (mac_addr_h >> 8) & 0xFF;
7231	addr[3] = mac_addr_h & 0xFF;
7232	addr[4] = mac_addr_m & 0xFF;
7233	addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7234}
7235
7236static int mvpp2_phy_connect(struct mvpp2_port *port)
7237{
7238	struct phy_device *phy_dev;
7239
7240	/* No PHY is attached */
7241	if (!port->phy_node)
7242		return 0;
7243
7244	phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7245				 port->phy_interface);
7246	if (!phy_dev) {
7247		netdev_err(port->dev, "cannot connect to phy\n");
7248		return -ENODEV;
7249	}
7250	phy_dev->supported &= PHY_GBIT_FEATURES;
7251	phy_dev->advertising = phy_dev->supported;
7252
7253	port->link    = 0;
7254	port->duplex  = 0;
7255	port->speed   = 0;
7256
7257	return 0;
7258}
7259
7260static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7261{
7262	struct net_device *ndev = port->dev;
7263
7264	if (!ndev->phydev)
7265		return;
7266
7267	phy_disconnect(ndev->phydev);
7268}
7269
7270static int mvpp2_irqs_init(struct mvpp2_port *port)
7271{
7272	int err, i;
7273
7274	for (i = 0; i < port->nqvecs; i++) {
7275		struct mvpp2_queue_vector *qv = port->qvecs + i;
7276
7277		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7278			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7279
7280		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7281		if (err)
7282			goto err;
7283
7284		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7285			irq_set_affinity_hint(qv->irq,
7286					      cpumask_of(qv->sw_thread_id));
7287	}
7288
7289	return 0;
7290err:
7291	for (i = 0; i < port->nqvecs; i++) {
7292		struct mvpp2_queue_vector *qv = port->qvecs + i;
7293
7294		irq_set_affinity_hint(qv->irq, NULL);
7295		free_irq(qv->irq, qv);
7296	}
7297
7298	return err;
7299}
7300
7301static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7302{
7303	int i;
7304
7305	for (i = 0; i < port->nqvecs; i++) {
7306		struct mvpp2_queue_vector *qv = port->qvecs + i;
7307
7308		irq_set_affinity_hint(qv->irq, NULL);
7309		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
7310		free_irq(qv->irq, qv);
7311	}
7312}
7313
7314static void mvpp22_init_rss(struct mvpp2_port *port)
7315{
7316	struct mvpp2 *priv = port->priv;
7317	int i;
7318
7319	/* Set the table width: replace the whole classifier Rx queue number
7320	 * with the ones configured in RSS table entries.
7321	 */
7322	mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7323	mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7324
7325	/* Loop through the classifier Rx Queues and map them to a RSS table.
7326	 * Map them all to the first table (0) by default.
7327	 */
7328	for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7329		mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7330		mvpp2_write(priv, MVPP22_RSS_TABLE,
7331			    MVPP22_RSS_TABLE_POINTER(0));
7332	}
7333
7334	/* Configure the first table to evenly distribute the packets across
7335	 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7336	 */
7337	for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7338		u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7339			  MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7340		mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7341
7342		mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7343	}
7344
7345}
7346
7347static int mvpp2_open(struct net_device *dev)
7348{
7349	struct mvpp2_port *port = netdev_priv(dev);
7350	struct mvpp2 *priv = port->priv;
7351	unsigned char mac_bcast[ETH_ALEN] = {
7352			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7353	int err;
7354
7355	err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
7356	if (err) {
7357		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7358		return err;
7359	}
7360	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
7361	if (err) {
7362		netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
7363		return err;
7364	}
7365	err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7366	if (err) {
7367		netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7368		return err;
7369	}
7370	err = mvpp2_prs_def_flow(port);
7371	if (err) {
7372		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7373		return err;
7374	}
7375
7376	/* Allocate the Rx/Tx queues */
7377	err = mvpp2_setup_rxqs(port);
7378	if (err) {
7379		netdev_err(port->dev, "cannot allocate Rx queues\n");
7380		return err;
7381	}
7382
7383	err = mvpp2_setup_txqs(port);
7384	if (err) {
7385		netdev_err(port->dev, "cannot allocate Tx queues\n");
7386		goto err_cleanup_rxqs;
7387	}
7388
7389	err = mvpp2_irqs_init(port);
7390	if (err) {
7391		netdev_err(port->dev, "cannot init IRQs\n");
7392		goto err_cleanup_txqs;
7393	}
7394
7395	if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7396		err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7397				  dev->name, port);
7398		if (err) {
7399			netdev_err(port->dev, "cannot request link IRQ %d\n",
7400				   port->link_irq);
7401			goto err_free_irq;
7402		}
7403
7404		mvpp22_gop_setup_irq(port);
7405	}
7406
7407	/* In default link is down */
7408	netif_carrier_off(port->dev);
7409
7410	err = mvpp2_phy_connect(port);
7411	if (err < 0)
7412		goto err_free_link_irq;
7413
7414	/* Unmask interrupts on all CPUs */
7415	on_each_cpu(mvpp2_interrupts_unmask, port, 1);
7416	mvpp2_shared_interrupt_mask_unmask(port, false);
7417
7418	mvpp2_start_dev(port);
7419
7420	if (priv->hw_version == MVPP22)
7421		mvpp22_init_rss(port);
7422
7423	/* Start hardware statistics gathering */
7424	queue_delayed_work(priv->stats_queue, &port->stats_work,
7425			   MVPP2_MIB_COUNTERS_STATS_DELAY);
7426
7427	return 0;
7428
7429err_free_link_irq:
7430	if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7431		free_irq(port->link_irq, port);
7432err_free_irq:
7433	mvpp2_irqs_deinit(port);
7434err_cleanup_txqs:
7435	mvpp2_cleanup_txqs(port);
7436err_cleanup_rxqs:
7437	mvpp2_cleanup_rxqs(port);
7438	return err;
7439}
7440
7441static int mvpp2_stop(struct net_device *dev)
7442{
7443	struct mvpp2_port *port = netdev_priv(dev);
7444	struct mvpp2_port_pcpu *port_pcpu;
7445	struct mvpp2 *priv = port->priv;
7446	int cpu;
7447
7448	mvpp2_stop_dev(port);
7449	mvpp2_phy_disconnect(port);
7450
7451	/* Mask interrupts on all CPUs */
7452	on_each_cpu(mvpp2_interrupts_mask, port, 1);
7453	mvpp2_shared_interrupt_mask_unmask(port, true);
7454
7455	if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7456		free_irq(port->link_irq, port);
7457
7458	mvpp2_irqs_deinit(port);
7459	if (!port->has_tx_irqs) {
7460		for_each_present_cpu(cpu) {
7461			port_pcpu = per_cpu_ptr(port->pcpu, cpu);
7462
7463			hrtimer_cancel(&port_pcpu->tx_done_timer);
7464			port_pcpu->timer_scheduled = false;
7465			tasklet_kill(&port_pcpu->tx_done_tasklet);
7466		}
7467	}
7468	mvpp2_cleanup_rxqs(port);
7469	mvpp2_cleanup_txqs(port);
7470
7471	cancel_delayed_work_sync(&port->stats_work);
7472
7473	return 0;
7474}
7475
7476static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
7477					struct netdev_hw_addr_list *list)
7478{
7479	struct netdev_hw_addr *ha;
7480	int ret;
7481
7482	netdev_hw_addr_list_for_each(ha, list) {
7483		ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
7484		if (ret)
7485			return ret;
7486	}
7487
7488	return 0;
7489}
7490
7491static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
7492{
7493	if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
7494		mvpp2_prs_vid_enable_filtering(port);
7495	else
7496		mvpp2_prs_vid_disable_filtering(port);
7497
7498	mvpp2_prs_mac_promisc_set(port->priv, port->id,
7499				  MVPP2_PRS_L2_UNI_CAST, enable);
7500
7501	mvpp2_prs_mac_promisc_set(port->priv, port->id,
7502				  MVPP2_PRS_L2_MULTI_CAST, enable);
7503}
7504
7505static void mvpp2_set_rx_mode(struct net_device *dev)
7506{
7507	struct mvpp2_port *port = netdev_priv(dev);
7508
7509	/* Clear the whole UC and MC list */
7510	mvpp2_prs_mac_del_all(port);
7511
7512	if (dev->flags & IFF_PROMISC) {
7513		mvpp2_set_rx_promisc(port, true);
7514		return;
7515	}
7516
7517	mvpp2_set_rx_promisc(port, false);
7518
7519	if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
7520	    mvpp2_prs_mac_da_accept_list(port, &dev->uc))
7521		mvpp2_prs_mac_promisc_set(port->priv, port->id,
7522					  MVPP2_PRS_L2_UNI_CAST, true);
7523
7524	if (dev->flags & IFF_ALLMULTI) {
7525		mvpp2_prs_mac_promisc_set(port->priv, port->id,
7526					  MVPP2_PRS_L2_MULTI_CAST, true);
7527		return;
7528	}
7529
7530	if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
7531	    mvpp2_prs_mac_da_accept_list(port, &dev->mc))
7532		mvpp2_prs_mac_promisc_set(port->priv, port->id,
7533					  MVPP2_PRS_L2_MULTI_CAST, true);
7534}
7535
7536static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7537{
7538	struct mvpp2_port *port = netdev_priv(dev);
7539	const struct sockaddr *addr = p;
7540	int err;
7541
7542	if (!is_valid_ether_addr(addr->sa_data)) {
7543		err = -EADDRNOTAVAIL;
7544		goto log_error;
7545	}
7546
7547	if (!netif_running(dev)) {
7548		err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7549		if (!err)
7550			return 0;
7551		/* Reconfigure parser to accept the original MAC address */
7552		err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7553		if (err)
7554			goto log_error;
7555	}
7556
7557	mvpp2_stop_dev(port);
7558
7559	err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7560	if (!err)
7561		goto out_start;
7562
7563	/* Reconfigure parser accept the original MAC address */
7564	err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7565	if (err)
7566		goto log_error;
7567out_start:
7568	mvpp2_start_dev(port);
7569	mvpp2_egress_enable(port);
7570	mvpp2_ingress_enable(port);
7571	return 0;
7572log_error:
7573	netdev_err(dev, "failed to change MAC address\n");
7574	return err;
7575}
7576
7577static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7578{
7579	struct mvpp2_port *port = netdev_priv(dev);
7580	int err;
7581
7582	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7583		netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7584			    ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7585		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
7586	}
7587
7588	if (!netif_running(dev)) {
7589		err = mvpp2_bm_update_mtu(dev, mtu);
7590		if (!err) {
7591			port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
7592			return 0;
7593		}
7594
7595		/* Reconfigure BM to the original MTU */
7596		err = mvpp2_bm_update_mtu(dev, dev->mtu);
7597		if (err)
7598			goto log_error;
7599	}
7600
7601	mvpp2_stop_dev(port);
7602
7603	err = mvpp2_bm_update_mtu(dev, mtu);
7604	if (!err) {
7605		port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
7606		goto out_start;
7607	}
7608
7609	/* Reconfigure BM to the original MTU */
7610	err = mvpp2_bm_update_mtu(dev, dev->mtu);
7611	if (err)
7612		goto log_error;
7613
7614out_start:
7615	mvpp2_start_dev(port);
7616	mvpp2_egress_enable(port);
7617	mvpp2_ingress_enable(port);
7618
7619	return 0;
7620log_error:
7621	netdev_err(dev, "failed to change MTU\n");
7622	return err;
7623}
7624
7625static void
7626mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7627{
7628	struct mvpp2_port *port = netdev_priv(dev);
7629	unsigned int start;
7630	int cpu;
7631
7632	for_each_possible_cpu(cpu) {
7633		struct mvpp2_pcpu_stats *cpu_stats;
7634		u64 rx_packets;
7635		u64 rx_bytes;
7636		u64 tx_packets;
7637		u64 tx_bytes;
7638
7639		cpu_stats = per_cpu_ptr(port->stats, cpu);
7640		do {
7641			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7642			rx_packets = cpu_stats->rx_packets;
7643			rx_bytes   = cpu_stats->rx_bytes;
7644			tx_packets = cpu_stats->tx_packets;
7645			tx_bytes   = cpu_stats->tx_bytes;
7646		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7647
7648		stats->rx_packets += rx_packets;
7649		stats->rx_bytes   += rx_bytes;
7650		stats->tx_packets += tx_packets;
7651		stats->tx_bytes   += tx_bytes;
7652	}
7653
7654	stats->rx_errors	= dev->stats.rx_errors;
7655	stats->rx_dropped	= dev->stats.rx_dropped;
7656	stats->tx_dropped	= dev->stats.tx_dropped;
7657}
7658
7659static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7660{
7661	int ret;
7662
7663	if (!dev->phydev)
7664		return -ENOTSUPP;
7665
7666	ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
7667	if (!ret)
7668		mvpp2_link_event(dev);
7669
7670	return ret;
7671}
7672
7673static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7674{
7675	struct mvpp2_port *port = netdev_priv(dev);
7676	int ret;
7677
7678	ret = mvpp2_prs_vid_entry_add(port, vid);
7679	if (ret)
7680		netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7681			   MVPP2_PRS_VLAN_FILT_MAX - 1);
7682	return ret;
7683}
7684
7685static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7686{
7687	struct mvpp2_port *port = netdev_priv(dev);
7688
7689	mvpp2_prs_vid_entry_remove(port, vid);
7690	return 0;
7691}
7692
7693static int mvpp2_set_features(struct net_device *dev,
7694			      netdev_features_t features)
7695{
7696	netdev_features_t changed = dev->features ^ features;
7697	struct mvpp2_port *port = netdev_priv(dev);
7698
7699	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7700		if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7701			mvpp2_prs_vid_enable_filtering(port);
7702		} else {
7703			/* Invalidate all registered VID filters for this
7704			 * port
7705			 */
7706			mvpp2_prs_vid_remove_all(port);
7707
7708			mvpp2_prs_vid_disable_filtering(port);
7709		}
7710	}
7711
7712	return 0;
7713}
7714
7715/* Ethtool methods */
7716
7717/* Set interrupt coalescing for ethtools */
7718static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7719				      struct ethtool_coalesce *c)
7720{
7721	struct mvpp2_port *port = netdev_priv(dev);
7722	int queue;
7723
7724	for (queue = 0; queue < port->nrxqs; queue++) {
7725		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7726
7727		rxq->time_coal = c->rx_coalesce_usecs;
7728		rxq->pkts_coal = c->rx_max_coalesced_frames;
7729		mvpp2_rx_pkts_coal_set(port, rxq);
7730		mvpp2_rx_time_coal_set(port, rxq);
7731	}
7732
7733	if (port->has_tx_irqs) {
7734		port->tx_time_coal = c->tx_coalesce_usecs;
7735		mvpp2_tx_time_coal_set(port);
7736	}
7737
7738	for (queue = 0; queue < port->ntxqs; queue++) {
7739		struct mvpp2_tx_queue *txq = port->txqs[queue];
7740
7741		txq->done_pkts_coal = c->tx_max_coalesced_frames;
7742
7743		if (port->has_tx_irqs)
7744			mvpp2_tx_pkts_coal_set(port, txq);
7745	}
7746
7747	return 0;
7748}
7749
7750/* get coalescing for ethtools */
7751static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7752				      struct ethtool_coalesce *c)
7753{
7754	struct mvpp2_port *port = netdev_priv(dev);
7755
7756	c->rx_coalesce_usecs       = port->rxqs[0]->time_coal;
7757	c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7758	c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
7759	c->tx_coalesce_usecs       = port->tx_time_coal;
7760	return 0;
7761}
7762
7763static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7764				      struct ethtool_drvinfo *drvinfo)
7765{
7766	strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7767		sizeof(drvinfo->driver));
7768	strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7769		sizeof(drvinfo->version));
7770	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7771		sizeof(drvinfo->bus_info));
7772}
7773
7774static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7775					struct ethtool_ringparam *ring)
7776{
7777	struct mvpp2_port *port = netdev_priv(dev);
7778
7779	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7780	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
7781	ring->rx_pending = port->rx_ring_size;
7782	ring->tx_pending = port->tx_ring_size;
7783}
7784
7785static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7786				       struct ethtool_ringparam *ring)
7787{
7788	struct mvpp2_port *port = netdev_priv(dev);
7789	u16 prev_rx_ring_size = port->rx_ring_size;
7790	u16 prev_tx_ring_size = port->tx_ring_size;
7791	int err;
7792
7793	err = mvpp2_check_ringparam_valid(dev, ring);
7794	if (err)
7795		return err;
7796
7797	if (!netif_running(dev)) {
7798		port->rx_ring_size = ring->rx_pending;
7799		port->tx_ring_size = ring->tx_pending;
7800		return 0;
7801	}
7802
7803	/* The interface is running, so we have to force a
7804	 * reallocation of the queues
7805	 */
7806	mvpp2_stop_dev(port);
7807	mvpp2_cleanup_rxqs(port);
7808	mvpp2_cleanup_txqs(port);
7809
7810	port->rx_ring_size = ring->rx_pending;
7811	port->tx_ring_size = ring->tx_pending;
7812
7813	err = mvpp2_setup_rxqs(port);
7814	if (err) {
7815		/* Reallocate Rx queues with the original ring size */
7816		port->rx_ring_size = prev_rx_ring_size;
7817		ring->rx_pending = prev_rx_ring_size;
7818		err = mvpp2_setup_rxqs(port);
7819		if (err)
7820			goto err_out;
7821	}
7822	err = mvpp2_setup_txqs(port);
7823	if (err) {
7824		/* Reallocate Tx queues with the original ring size */
7825		port->tx_ring_size = prev_tx_ring_size;
7826		ring->tx_pending = prev_tx_ring_size;
7827		err = mvpp2_setup_txqs(port);
7828		if (err)
7829			goto err_clean_rxqs;
7830	}
7831
7832	mvpp2_start_dev(port);
7833	mvpp2_egress_enable(port);
7834	mvpp2_ingress_enable(port);
7835
7836	return 0;
7837
7838err_clean_rxqs:
7839	mvpp2_cleanup_rxqs(port);
7840err_out:
7841	netdev_err(dev, "failed to change ring parameters");
7842	return err;
7843}
7844
7845/* Device ops */
7846
7847static const struct net_device_ops mvpp2_netdev_ops = {
7848	.ndo_open		= mvpp2_open,
7849	.ndo_stop		= mvpp2_stop,
7850	.ndo_start_xmit		= mvpp2_tx,
7851	.ndo_set_rx_mode	= mvpp2_set_rx_mode,
7852	.ndo_set_mac_address	= mvpp2_set_mac_address,
7853	.ndo_change_mtu		= mvpp2_change_mtu,
7854	.ndo_get_stats64	= mvpp2_get_stats64,
7855	.ndo_do_ioctl		= mvpp2_ioctl,
7856	.ndo_vlan_rx_add_vid	= mvpp2_vlan_rx_add_vid,
7857	.ndo_vlan_rx_kill_vid	= mvpp2_vlan_rx_kill_vid,
7858	.ndo_set_features	= mvpp2_set_features,
7859};
7860
7861static const struct ethtool_ops mvpp2_eth_tool_ops = {
7862	.nway_reset	= phy_ethtool_nway_reset,
7863	.get_link	= ethtool_op_get_link,
7864	.set_coalesce	= mvpp2_ethtool_set_coalesce,
7865	.get_coalesce	= mvpp2_ethtool_get_coalesce,
7866	.get_drvinfo	= mvpp2_ethtool_get_drvinfo,
7867	.get_ringparam	= mvpp2_ethtool_get_ringparam,
7868	.set_ringparam	= mvpp2_ethtool_set_ringparam,
7869	.get_strings	= mvpp2_ethtool_get_strings,
7870	.get_ethtool_stats = mvpp2_ethtool_get_stats,
7871	.get_sset_count	= mvpp2_ethtool_get_sset_count,
7872	.get_link_ksettings = phy_ethtool_get_link_ksettings,
7873	.set_link_ksettings = phy_ethtool_set_link_ksettings,
7874};
7875
7876/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7877 * had a single IRQ defined per-port.
7878 */
7879static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7880					   struct device_node *port_node)
7881{
7882	struct mvpp2_queue_vector *v = &port->qvecs[0];
7883
7884	v->first_rxq = 0;
7885	v->nrxqs = port->nrxqs;
7886	v->type = MVPP2_QUEUE_VECTOR_SHARED;
7887	v->sw_thread_id = 0;
7888	v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7889	v->port = port;
7890	v->irq = irq_of_parse_and_map(port_node, 0);
7891	if (v->irq <= 0)
7892		return -EINVAL;
7893	netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7894		       NAPI_POLL_WEIGHT);
7895
7896	port->nqvecs = 1;
7897
7898	return 0;
7899}
7900
7901static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7902					  struct device_node *port_node)
7903{
7904	struct mvpp2_queue_vector *v;
7905	int i, ret;
7906
7907	port->nqvecs = num_possible_cpus();
7908	if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7909		port->nqvecs += 1;
7910
7911	for (i = 0; i < port->nqvecs; i++) {
7912		char irqname[16];
7913
7914		v = port->qvecs + i;
7915
7916		v->port = port;
7917		v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7918		v->sw_thread_id = i;
7919		v->sw_thread_mask = BIT(i);
7920
7921		snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7922
7923		if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7924			v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7925			v->nrxqs = MVPP2_DEFAULT_RXQ;
7926		} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7927			   i == (port->nqvecs - 1)) {
7928			v->first_rxq = 0;
7929			v->nrxqs = port->nrxqs;
7930			v->type = MVPP2_QUEUE_VECTOR_SHARED;
7931			strncpy(irqname, "rx-shared", sizeof(irqname));
7932		}
7933
7934		if (port_node)
7935			v->irq = of_irq_get_byname(port_node, irqname);
7936		else
7937			v->irq = fwnode_irq_get(port->fwnode, i);
7938		if (v->irq <= 0) {
7939			ret = -EINVAL;
7940			goto err;
7941		}
7942
7943		netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7944			       NAPI_POLL_WEIGHT);
7945	}
7946
7947	return 0;
7948
7949err:
7950	for (i = 0; i < port->nqvecs; i++)
7951		irq_dispose_mapping(port->qvecs[i].irq);
7952	return ret;
7953}
7954
7955static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7956				    struct device_node *port_node)
7957{
7958	if (port->has_tx_irqs)
7959		return mvpp2_multi_queue_vectors_init(port, port_node);
7960	else
7961		return mvpp2_simple_queue_vectors_init(port, port_node);
7962}
7963
7964static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7965{
7966	int i;
7967
7968	for (i = 0; i < port->nqvecs; i++)
7969		irq_dispose_mapping(port->qvecs[i].irq);
7970}
7971
7972/* Configure Rx queue group interrupt for this port */
7973static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7974{
7975	struct mvpp2 *priv = port->priv;
7976	u32 val;
7977	int i;
7978
7979	if (priv->hw_version == MVPP21) {
7980		mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7981			    port->nrxqs);
7982		return;
7983	}
7984
7985	/* Handle the more complicated PPv2.2 case */
7986	for (i = 0; i < port->nqvecs; i++) {
7987		struct mvpp2_queue_vector *qv = port->qvecs + i;
7988
7989		if (!qv->nrxqs)
7990			continue;
7991
7992		val = qv->sw_thread_id;
7993		val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7994		mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7995
7996		val = qv->first_rxq;
7997		val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7998		mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
7999	}
8000}
8001
8002/* Initialize port HW */
8003static int mvpp2_port_init(struct mvpp2_port *port)
8004{
8005	struct device *dev = port->dev->dev.parent;
8006	struct mvpp2 *priv = port->priv;
8007	struct mvpp2_txq_pcpu *txq_pcpu;
8008	int queue, cpu, err;
8009
8010	/* Checks for hardware constraints */
8011	if (port->first_rxq + port->nrxqs >
8012	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
8013		return -EINVAL;
8014
8015	if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
8016	    (port->ntxqs > MVPP2_MAX_TXQ))
8017		return -EINVAL;
8018
8019	/* Disable port */
8020	mvpp2_egress_disable(port);
8021	mvpp2_port_disable(port);
8022
8023	port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
8024
8025	port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
8026				  GFP_KERNEL);
8027	if (!port->txqs)
8028		return -ENOMEM;
8029
8030	/* Associate physical Tx queues to this port and initialize.
8031	 * The mapping is predefined.
8032	 */
8033	for (queue = 0; queue < port->ntxqs; queue++) {
8034		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
8035		struct mvpp2_tx_queue *txq;
8036
8037		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
8038		if (!txq) {
8039			err = -ENOMEM;
8040			goto err_free_percpu;
8041		}
8042
8043		txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
8044		if (!txq->pcpu) {
8045			err = -ENOMEM;
8046			goto err_free_percpu;
8047		}
8048
8049		txq->id = queue_phy_id;
8050		txq->log_id = queue;
8051		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
8052		for_each_present_cpu(cpu) {
8053			txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8054			txq_pcpu->cpu = cpu;
8055		}
8056
8057		port->txqs[queue] = txq;
8058	}
8059
8060	port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
8061				  GFP_KERNEL);
8062	if (!port->rxqs) {
8063		err = -ENOMEM;
8064		goto err_free_percpu;
8065	}
8066
8067	/* Allocate and initialize Rx queue for this port */
8068	for (queue = 0; queue < port->nrxqs; queue++) {
8069		struct mvpp2_rx_queue *rxq;
8070
8071		/* Map physical Rx queue to port's logical Rx queue */
8072		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
8073		if (!rxq) {
8074			err = -ENOMEM;
8075			goto err_free_percpu;
8076		}
8077		/* Map this Rx queue to a physical queue */
8078		rxq->id = port->first_rxq + queue;
8079		rxq->port = port->id;
8080		rxq->logic_rxq = queue;
8081
8082		port->rxqs[queue] = rxq;
8083	}
8084
8085	mvpp2_rx_irqs_setup(port);
8086
8087	/* Create Rx descriptor rings */
8088	for (queue = 0; queue < port->nrxqs; queue++) {
8089		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8090
8091		rxq->size = port->rx_ring_size;
8092		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8093		rxq->time_coal = MVPP2_RX_COAL_USEC;
8094	}
8095
8096	mvpp2_ingress_disable(port);
8097
8098	/* Port default configuration */
8099	mvpp2_defaults_set(port);
8100
8101	/* Port's classifier configuration */
8102	mvpp2_cls_oversize_rxq_set(port);
8103	mvpp2_cls_port_config(port);
8104
8105	/* Provide an initial Rx packet size */
8106	port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8107
8108	/* Initialize pools for swf */
8109	err = mvpp2_swf_bm_pool_init(port);
8110	if (err)
8111		goto err_free_percpu;
8112
8113	return 0;
8114
8115err_free_percpu:
8116	for (queue = 0; queue < port->ntxqs; queue++) {
8117		if (!port->txqs[queue])
8118			continue;
8119		free_percpu(port->txqs[queue]->pcpu);
8120	}
8121	return err;
8122}
8123
8124/* Checks if the port DT description has the TX interrupts
8125 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8126 * there are available, but we need to keep support for old DTs.
8127 */
8128static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8129				   struct device_node *port_node)
8130{
8131	char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8132			  "tx-cpu2", "tx-cpu3" };
8133	int ret, i;
8134
8135	if (priv->hw_version == MVPP21)
8136		return false;
8137
8138	for (i = 0; i < 5; i++) {
8139		ret = of_property_match_string(port_node, "interrupt-names",
8140					       irqs[i]);
8141		if (ret < 0)
8142			return false;
8143	}
8144
8145	return true;
8146}
8147
8148static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
8149				     struct fwnode_handle *fwnode,
8150				     char **mac_from)
8151{
8152	struct mvpp2_port *port = netdev_priv(dev);
8153	char hw_mac_addr[ETH_ALEN] = {0};
8154	char fw_mac_addr[ETH_ALEN];
8155
8156	if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8157		*mac_from = "firmware node";
8158		ether_addr_copy(dev->dev_addr, fw_mac_addr);
8159		return;
8160	}
8161
8162	if (priv->hw_version == MVPP21) {
8163		mvpp21_get_mac_address(port, hw_mac_addr);
8164		if (is_valid_ether_addr(hw_mac_addr)) {
8165			*mac_from = "hardware";
8166			ether_addr_copy(dev->dev_addr, hw_mac_addr);
8167			return;
8168		}
8169	}
8170
8171	*mac_from = "random";
8172	eth_hw_addr_random(dev);
8173}
8174
8175/* Ports initialization */
8176static int mvpp2_port_probe(struct platform_device *pdev,
8177			    struct fwnode_handle *port_fwnode,
8178			    struct mvpp2 *priv)
8179{
8180	struct device_node *phy_node;
8181	struct phy *comphy = NULL;
8182	struct mvpp2_port *port;
8183	struct mvpp2_port_pcpu *port_pcpu;
8184	struct device_node *port_node = to_of_node(port_fwnode);
8185	struct net_device *dev;
8186	struct resource *res;
8187	char *mac_from = "";
8188	unsigned int ntxqs, nrxqs;
8189	bool has_tx_irqs;
8190	u32 id;
8191	int features;
8192	int phy_mode;
8193	int err, i, cpu;
8194
8195	if (port_node) {
8196		has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8197	} else {
8198		has_tx_irqs = true;
8199		queue_mode = MVPP2_QDIST_MULTI_MODE;
8200	}
8201
8202	if (!has_tx_irqs)
8203		queue_mode = MVPP2_QDIST_SINGLE_MODE;
8204
8205	ntxqs = MVPP2_MAX_TXQ;
8206	if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8207		nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8208	else
8209		nrxqs = MVPP2_DEFAULT_RXQ;
8210
8211	dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
8212	if (!dev)
8213		return -ENOMEM;
8214
8215	if (port_node)
8216		phy_node = of_parse_phandle(port_node, "phy", 0);
8217	else
8218		phy_node = NULL;
8219
8220	phy_mode = fwnode_get_phy_mode(port_fwnode);
8221	if (phy_mode < 0) {
8222		dev_err(&pdev->dev, "incorrect phy mode\n");
8223		err = phy_mode;
8224		goto err_free_netdev;
8225	}
8226
8227	if (port_node) {
8228		comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8229		if (IS_ERR(comphy)) {
8230			if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8231				err = -EPROBE_DEFER;
8232				goto err_free_netdev;
8233			}
8234			comphy = NULL;
8235		}
8236	}
8237
8238	if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
8239		err = -EINVAL;
8240		dev_err(&pdev->dev, "missing port-id value\n");
8241		goto err_free_netdev;
8242	}
8243
8244	dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
8245	dev->watchdog_timeo = 5 * HZ;
8246	dev->netdev_ops = &mvpp2_netdev_ops;
8247	dev->ethtool_ops = &mvpp2_eth_tool_ops;
8248
8249	port = netdev_priv(dev);
8250	port->dev = dev;
8251	port->fwnode = port_fwnode;
8252	port->ntxqs = ntxqs;
8253	port->nrxqs = nrxqs;
8254	port->priv = priv;
8255	port->has_tx_irqs = has_tx_irqs;
8256
8257	err = mvpp2_queue_vectors_init(port, port_node);
8258	if (err)
8259		goto err_free_netdev;
8260
8261	if (port_node)
8262		port->link_irq = of_irq_get_byname(port_node, "link");
8263	else
8264		port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
8265	if (port->link_irq == -EPROBE_DEFER) {
8266		err = -EPROBE_DEFER;
8267		goto err_deinit_qvecs;
8268	}
8269	if (port->link_irq <= 0)
8270		/* the link irq is optional */
8271		port->link_irq = 0;
8272
8273	if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
8274		port->flags |= MVPP2_F_LOOPBACK;
8275
8276	port->id = id;
8277	if (priv->hw_version == MVPP21)
8278		port->first_rxq = port->id * port->nrxqs;
8279	else
8280		port->first_rxq = port->id * priv->max_port_rxqs;
8281
8282	port->phy_node = phy_node;
8283	port->phy_interface = phy_mode;
8284	port->comphy = comphy;
8285
8286	if (priv->hw_version == MVPP21) {
8287		res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8288		port->base = devm_ioremap_resource(&pdev->dev, res);
8289		if (IS_ERR(port->base)) {
8290			err = PTR_ERR(port->base);
8291			goto err_free_irq;
8292		}
8293
8294		port->stats_base = port->priv->lms_base +
8295				   MVPP21_MIB_COUNTERS_OFFSET +
8296				   port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
8297	} else {
8298		if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8299					     &port->gop_id)) {
8300			err = -EINVAL;
8301			dev_err(&pdev->dev, "missing gop-port-id value\n");
8302			goto err_deinit_qvecs;
8303		}
8304
8305		port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
8306		port->stats_base = port->priv->iface_base +
8307				   MVPP22_MIB_COUNTERS_OFFSET +
8308				   port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
8309	}
8310
8311	/* Alloc per-cpu and ethtool stats */
8312	port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8313	if (!port->stats) {
8314		err = -ENOMEM;
8315		goto err_free_irq;
8316	}
8317
8318	port->ethtool_stats = devm_kcalloc(&pdev->dev,
8319					   ARRAY_SIZE(mvpp2_ethtool_regs),
8320					   sizeof(u64), GFP_KERNEL);
8321	if (!port->ethtool_stats) {
8322		err = -ENOMEM;
8323		goto err_free_stats;
8324	}
8325
8326	mutex_init(&port->gather_stats_lock);
8327	INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8328
8329	mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
8330
8331	port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8332	port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
8333	SET_NETDEV_DEV(dev, &pdev->dev);
8334
8335	err = mvpp2_port_init(port);
8336	if (err < 0) {
8337		dev_err(&pdev->dev, "failed to init port %d\n", id);
8338		goto err_free_stats;
8339	}
8340
8341	mvpp2_port_periodic_xon_disable(port);
8342
8343	if (priv->hw_version == MVPP21)
8344		mvpp2_port_fc_adv_enable(port);
8345
8346	mvpp2_port_reset(port);
8347
8348	port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8349	if (!port->pcpu) {
8350		err = -ENOMEM;
8351		goto err_free_txq_pcpu;
8352	}
8353
8354	if (!port->has_tx_irqs) {
8355		for_each_present_cpu(cpu) {
8356			port_pcpu = per_cpu_ptr(port->pcpu, cpu);
8357
8358			hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8359				     HRTIMER_MODE_REL_PINNED);
8360			port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8361			port_pcpu->timer_scheduled = false;
8362
8363			tasklet_init(&port_pcpu->tx_done_tasklet,
8364				     mvpp2_tx_proc_cb,
8365				     (unsigned long)dev);
8366		}
8367	}
8368
8369	features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8370		   NETIF_F_TSO;
8371	dev->features = features | NETIF_F_RXCSUM;
8372	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8373			    NETIF_F_HW_VLAN_CTAG_FILTER;
8374
8375	if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
8376		dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8377		dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8378	}
8379
8380	dev->vlan_features |= features;
8381	dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
8382	dev->priv_flags |= IFF_UNICAST_FLT;
8383
8384	/* MTU range: 68 - 9704 */
8385	dev->min_mtu = ETH_MIN_MTU;
8386	/* 9704 == 9728 - 20 and rounding to 8 */
8387	dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
8388
8389	err = register_netdev(dev);
8390	if (err < 0) {
8391		dev_err(&pdev->dev, "failed to register netdev\n");
8392		goto err_free_port_pcpu;
8393	}
8394	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8395
8396	priv->port_list[priv->port_count++] = port;
8397
8398	return 0;
8399
8400err_free_port_pcpu:
8401	free_percpu(port->pcpu);
8402err_free_txq_pcpu:
8403	for (i = 0; i < port->ntxqs; i++)
8404		free_percpu(port->txqs[i]->pcpu);
8405err_free_stats:
8406	free_percpu(port->stats);
8407err_free_irq:
8408	if (port->link_irq)
8409		irq_dispose_mapping(port->link_irq);
8410err_deinit_qvecs:
8411	mvpp2_queue_vectors_deinit(port);
8412err_free_netdev:
8413	of_node_put(phy_node);
8414	free_netdev(dev);
8415	return err;
8416}
8417
8418/* Ports removal routine */
8419static void mvpp2_port_remove(struct mvpp2_port *port)
8420{
8421	int i;
8422
8423	unregister_netdev(port->dev);
8424	of_node_put(port->phy_node);
8425	free_percpu(port->pcpu);
8426	free_percpu(port->stats);
8427	for (i = 0; i < port->ntxqs; i++)
8428		free_percpu(port->txqs[i]->pcpu);
8429	mvpp2_queue_vectors_deinit(port);
8430	if (port->link_irq)
8431		irq_dispose_mapping(port->link_irq);
8432	free_netdev(port->dev);
8433}
8434
8435/* Initialize decoding windows */
8436static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8437				    struct mvpp2 *priv)
8438{
8439	u32 win_enable;
8440	int i;
8441
8442	for (i = 0; i < 6; i++) {
8443		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8444		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8445
8446		if (i < 4)
8447			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8448	}
8449
8450	win_enable = 0;
8451
8452	for (i = 0; i < dram->num_cs; i++) {
8453		const struct mbus_dram_window *cs = dram->cs + i;
8454
8455		mvpp2_write(priv, MVPP2_WIN_BASE(i),
8456			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8457			    dram->mbus_dram_target_id);
8458
8459		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8460			    (cs->size - 1) & 0xffff0000);
8461
8462		win_enable |= (1 << i);
8463	}
8464
8465	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8466}
8467
8468/* Initialize Rx FIFO's */
8469static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8470{
8471	int port;
8472
8473	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8474		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8475			    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8476		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8477			    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8478	}
8479
8480	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8481		    MVPP2_RX_FIFO_PORT_MIN_PKT);
8482	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8483}
8484
8485static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8486{
8487	int port;
8488
8489	/* The FIFO size parameters are set depending on the maximum speed a
8490	 * given port can handle:
8491	 * - Port 0: 10Gbps
8492	 * - Port 1: 2.5Gbps
8493	 * - Ports 2 and 3: 1Gbps
8494	 */
8495
8496	mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8497		    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8498	mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8499		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8500
8501	mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8502		    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8503	mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8504		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8505
8506	for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8507		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8508			    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8509		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8510			    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8511	}
8512
8513	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8514		    MVPP2_RX_FIFO_PORT_MIN_PKT);
8515	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8516}
8517
8518/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
8519 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
8520 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
8521 */
8522static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8523{
8524	int port, size, thrs;
8525
8526	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8527		if (port == 0) {
8528			size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
8529			thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
8530		} else {
8531			size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
8532			thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
8533		}
8534		mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
8535		mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
8536	}
8537}
8538
8539static void mvpp2_axi_init(struct mvpp2 *priv)
8540{
8541	u32 val, rdval, wrval;
8542
8543	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8544
8545	/* AXI Bridge Configuration */
8546
8547	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8548		<< MVPP22_AXI_ATTR_CACHE_OFFS;
8549	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8550		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
8551
8552	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8553		<< MVPP22_AXI_ATTR_CACHE_OFFS;
8554	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8555		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
8556
8557	/* BM */
8558	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8559	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8560
8561	/* Descriptors */
8562	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8563	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8564	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8565	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8566
8567	/* Buffer Data */
8568	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8569	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8570
8571	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8572		<< MVPP22_AXI_CODE_CACHE_OFFS;
8573	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8574		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
8575	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8576	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8577
8578	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8579		<< MVPP22_AXI_CODE_CACHE_OFFS;
8580	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8581		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
8582
8583	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8584
8585	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8586		<< MVPP22_AXI_CODE_CACHE_OFFS;
8587	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8588		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
8589
8590	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8591}
8592
8593/* Initialize network controller common part HW */
8594static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8595{
8596	const struct mbus_dram_target_info *dram_target_info;
8597	int err, i;
8598	u32 val;
8599
8600	/* MBUS windows configuration */
8601	dram_target_info = mv_mbus_dram_info();
8602	if (dram_target_info)
8603		mvpp2_conf_mbus_windows(dram_target_info, priv);
8604
8605	if (priv->hw_version == MVPP22)
8606		mvpp2_axi_init(priv);
8607
8608	/* Disable HW PHY polling */
8609	if (priv->hw_version == MVPP21) {
8610		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8611		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8612		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8613	} else {
8614		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8615		val &= ~MVPP22_SMI_POLLING_EN;
8616		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8617	}
8618
8619	/* Allocate and initialize aggregated TXQs */
8620	priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
8621				       sizeof(*priv->aggr_txqs),
8622				       GFP_KERNEL);
8623	if (!priv->aggr_txqs)
8624		return -ENOMEM;
8625
8626	for_each_present_cpu(i) {
8627		priv->aggr_txqs[i].id = i;
8628		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
8629		err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
8630		if (err < 0)
8631			return err;
8632	}
8633
8634	/* Fifo Init */
8635	if (priv->hw_version == MVPP21) {
8636		mvpp2_rx_fifo_init(priv);
8637	} else {
8638		mvpp22_rx_fifo_init(priv);
8639		mvpp22_tx_fifo_init(priv);
8640	}
8641
8642	if (priv->hw_version == MVPP21)
8643		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8644		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
8645
8646	/* Allow cache snoop when transmiting packets */
8647	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8648
8649	/* Buffer Manager initialization */
8650	err = mvpp2_bm_init(pdev, priv);
8651	if (err < 0)
8652		return err;
8653
8654	/* Parser default initialization */
8655	err = mvpp2_prs_default_init(pdev, priv);
8656	if (err < 0)
8657		return err;
8658
8659	/* Classifier default initialization */
8660	mvpp2_cls_init(priv);
8661
8662	return 0;
8663}
8664
8665static int mvpp2_probe(struct platform_device *pdev)
8666{
8667	const struct acpi_device_id *acpi_id;
8668	struct fwnode_handle *fwnode = pdev->dev.fwnode;
8669	struct fwnode_handle *port_fwnode;
8670	struct mvpp2 *priv;
8671	struct resource *res;
8672	void __iomem *base;
8673	int i;
8674	int err;
8675
8676	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
8677	if (!priv)
8678		return -ENOMEM;
8679
8680	if (has_acpi_companion(&pdev->dev)) {
8681		acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8682					    &pdev->dev);
8683		priv->hw_version = (unsigned long)acpi_id->driver_data;
8684	} else {
8685		priv->hw_version =
8686			(unsigned long)of_device_get_match_data(&pdev->dev);
8687	}
8688
8689	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8690	base = devm_ioremap_resource(&pdev->dev, res);
8691	if (IS_ERR(base))
8692		return PTR_ERR(base);
8693
8694	if (priv->hw_version == MVPP21) {
8695		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8696		priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8697		if (IS_ERR(priv->lms_base))
8698			return PTR_ERR(priv->lms_base);
8699	} else {
8700		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8701		if (has_acpi_companion(&pdev->dev)) {
8702			/* In case the MDIO memory region is declared in
8703			 * the ACPI, it can already appear as 'in-use'
8704			 * in the OS. Because it is overlapped by second
8705			 * region of the network controller, make
8706			 * sure it is released, before requesting it again.
8707			 * The care is taken by mvpp2 driver to avoid
8708			 * concurrent access to this memory region.
8709			 */
8710			release_resource(res);
8711		}
8712		priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8713		if (IS_ERR(priv->iface_base))
8714			return PTR_ERR(priv->iface_base);
8715	}
8716
8717	if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
8718		priv->sysctrl_base =
8719			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8720							"marvell,system-controller");
8721		if (IS_ERR(priv->sysctrl_base))
8722			/* The system controller regmap is optional for dt
8723			 * compatibility reasons. When not provided, the
8724			 * configuration of the GoP relies on the
8725			 * firmware/bootloader.
8726			 */
8727			priv->sysctrl_base = NULL;
8728	}
8729
8730	mvpp2_setup_bm_pool();
8731
8732	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
8733		u32 addr_space_sz;
8734
8735		addr_space_sz = (priv->hw_version == MVPP21 ?
8736				 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
8737		priv->swth_base[i] = base + i * addr_space_sz;
8738	}
8739
8740	if (priv->hw_version == MVPP21)
8741		priv->max_port_rxqs = 8;
8742	else
8743		priv->max_port_rxqs = 32;
8744
8745	if (dev_of_node(&pdev->dev)) {
8746		priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8747		if (IS_ERR(priv->pp_clk))
8748			return PTR_ERR(priv->pp_clk);
8749		err = clk_prepare_enable(priv->pp_clk);
8750		if (err < 0)
8751			return err;
8752
8753		priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8754		if (IS_ERR(priv->gop_clk)) {
8755			err = PTR_ERR(priv->gop_clk);
8756			goto err_pp_clk;
8757		}
8758		err = clk_prepare_enable(priv->gop_clk);
8759		if (err < 0)
8760			goto err_pp_clk;
8761
8762		if (priv->hw_version == MVPP22) {
8763			priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8764			if (IS_ERR(priv->mg_clk)) {
8765				err = PTR_ERR(priv->mg_clk);
8766				goto err_gop_clk;
8767			}
8768
8769			err = clk_prepare_enable(priv->mg_clk);
8770			if (err < 0)
8771				goto err_gop_clk;
8772
8773			priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
8774			if (IS_ERR(priv->mg_core_clk)) {
8775				priv->mg_core_clk = NULL;
8776			} else {
8777				err = clk_prepare_enable(priv->mg_core_clk);
8778				if (err < 0)
8779					goto err_mg_clk;
8780			}
8781		}
8782
8783		priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8784		if (IS_ERR(priv->axi_clk)) {
8785			err = PTR_ERR(priv->axi_clk);
8786			if (err == -EPROBE_DEFER)
8787				goto err_mg_core_clk;
8788			priv->axi_clk = NULL;
8789		} else {
8790			err = clk_prepare_enable(priv->axi_clk);
8791			if (err < 0)
8792				goto err_mg_core_clk;
8793		}
8794
8795		/* Get system's tclk rate */
8796		priv->tclk = clk_get_rate(priv->pp_clk);
8797	} else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8798					    &priv->tclk)) {
8799		dev_err(&pdev->dev, "missing clock-frequency value\n");
8800		return -EINVAL;
8801	}
8802
8803	if (priv->hw_version == MVPP22) {
8804		err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
8805		if (err)
8806			goto err_axi_clk;
8807		/* Sadly, the BM pools all share the same register to
8808		 * store the high 32 bits of their address. So they
8809		 * must all have the same high 32 bits, which forces
8810		 * us to restrict coherent memory to DMA_BIT_MASK(32).
8811		 */
8812		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8813		if (err)
8814			goto err_axi_clk;
8815	}
8816
8817	/* Initialize network controller */
8818	err = mvpp2_init(pdev, priv);
8819	if (err < 0) {
8820		dev_err(&pdev->dev, "failed to initialize controller\n");
8821		goto err_axi_clk;
8822	}
8823
8824	/* Initialize ports */
8825	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8826		err = mvpp2_port_probe(pdev, port_fwnode, priv);
8827		if (err < 0)
8828			goto err_port_probe;
8829	}
8830
8831	if (priv->port_count == 0) {
8832		dev_err(&pdev->dev, "no ports enabled\n");
8833		err = -ENODEV;
8834		goto err_axi_clk;
8835	}
8836
8837	/* Statistics must be gathered regularly because some of them (like
8838	 * packets counters) are 32-bit registers and could overflow quite
8839	 * quickly. For instance, a 10Gb link used at full bandwidth with the
8840	 * smallest packets (64B) will overflow a 32-bit counter in less than
8841	 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8842	 */
8843	snprintf(priv->queue_name, sizeof(priv->queue_name),
8844		 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8845		 priv->port_count > 1 ? "+" : "");
8846	priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8847	if (!priv->stats_queue) {
8848		err = -ENOMEM;
8849		goto err_port_probe;
8850	}
8851
8852	platform_set_drvdata(pdev, priv);
8853	return 0;
8854
8855err_port_probe:
8856	i = 0;
8857	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8858		if (priv->port_list[i])
8859			mvpp2_port_remove(priv->port_list[i]);
8860		i++;
8861	}
8862err_axi_clk:
8863	clk_disable_unprepare(priv->axi_clk);
8864
8865err_mg_core_clk:
8866	if (priv->hw_version == MVPP22)
8867		clk_disable_unprepare(priv->mg_core_clk);
8868err_mg_clk:
8869	if (priv->hw_version == MVPP22)
8870		clk_disable_unprepare(priv->mg_clk);
8871err_gop_clk:
8872	clk_disable_unprepare(priv->gop_clk);
8873err_pp_clk:
8874	clk_disable_unprepare(priv->pp_clk);
8875	return err;
8876}
8877
8878static int mvpp2_remove(struct platform_device *pdev)
8879{
8880	struct mvpp2 *priv = platform_get_drvdata(pdev);
8881	struct fwnode_handle *fwnode = pdev->dev.fwnode;
8882	struct fwnode_handle *port_fwnode;
8883	int i = 0;
8884
8885	flush_workqueue(priv->stats_queue);
8886	destroy_workqueue(priv->stats_queue);
8887
8888	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8889		if (priv->port_list[i]) {
8890			mutex_destroy(&priv->port_list[i]->gather_stats_lock);
8891			mvpp2_port_remove(priv->port_list[i]);
8892		}
8893		i++;
8894	}
8895
8896	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8897		struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8898
8899		mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8900	}
8901
8902	for_each_present_cpu(i) {
8903		struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8904
8905		dma_free_coherent(&pdev->dev,
8906				  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8907				  aggr_txq->descs,
8908				  aggr_txq->descs_dma);
8909	}
8910
8911	if (is_acpi_node(port_fwnode))
8912		return 0;
8913
8914	clk_disable_unprepare(priv->axi_clk);
8915	clk_disable_unprepare(priv->mg_core_clk);
8916	clk_disable_unprepare(priv->mg_clk);
8917	clk_disable_unprepare(priv->pp_clk);
8918	clk_disable_unprepare(priv->gop_clk);
8919
8920	return 0;
8921}
8922
8923static const struct of_device_id mvpp2_match[] = {
8924	{
8925		.compatible = "marvell,armada-375-pp2",
8926		.data = (void *)MVPP21,
8927	},
8928	{
8929		.compatible = "marvell,armada-7k-pp22",
8930		.data = (void *)MVPP22,
8931	},
8932	{ }
8933};
8934MODULE_DEVICE_TABLE(of, mvpp2_match);
8935
8936static const struct acpi_device_id mvpp2_acpi_match[] = {
8937	{ "MRVL0110", MVPP22 },
8938	{ },
8939};
8940MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8941
8942static struct platform_driver mvpp2_driver = {
8943	.probe = mvpp2_probe,
8944	.remove = mvpp2_remove,
8945	.driver = {
8946		.name = MVPP2_DRIVER_NAME,
8947		.of_match_table = mvpp2_match,
8948		.acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
8949	},
8950};
8951
8952module_platform_driver(mvpp2_driver);
8953
8954MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8955MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
8956MODULE_LICENSE("GPL v2");