Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Texas Instruments Ethernet Switch Driver
   4 *
   5 * Copyright (C) 2019 Texas Instruments
   6 */
   7
   8#include <linux/bpf.h>
   9#include <linux/bpf_trace.h>
  10#include <linux/if_ether.h>
  11#include <linux/if_vlan.h>
  12#include <linux/kmemleak.h>
  13#include <linux/module.h>
  14#include <linux/netdevice.h>
  15#include <linux/net_tstamp.h>
  16#include <linux/of.h>
  17#include <linux/phy.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/skbuff.h>
  21#include <net/page_pool/helpers.h>
  22#include <net/pkt_cls.h>
  23#include <net/pkt_sched.h>
  24
  25#include "cpsw.h"
  26#include "cpts.h"
  27#include "cpsw_ale.h"
  28#include "cpsw_priv.h"
  29#include "cpsw_sl.h"
  30#include "davinci_cpdma.h"
  31
  32#define CPTS_N_ETX_TS 4
  33
  34int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
  35
  36void cpsw_intr_enable(struct cpsw_common *cpsw)
  37{
  38	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
  39	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
  40
  41	cpdma_ctlr_int_ctrl(cpsw->dma, true);
  42}
  43
  44void cpsw_intr_disable(struct cpsw_common *cpsw)
  45{
  46	writel_relaxed(0, &cpsw->wr_regs->tx_en);
  47	writel_relaxed(0, &cpsw->wr_regs->rx_en);
  48
  49	cpdma_ctlr_int_ctrl(cpsw->dma, false);
  50}
  51
  52void cpsw_tx_handler(void *token, int len, int status)
  53{
  54	struct cpsw_meta_xdp	*xmeta;
  55	struct xdp_frame	*xdpf;
  56	struct net_device	*ndev;
  57	struct netdev_queue	*txq;
  58	struct sk_buff		*skb;
  59	int			ch;
  60
  61	if (cpsw_is_xdpf_handle(token)) {
  62		xdpf = cpsw_handle_to_xdpf(token);
  63		xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
  64		ndev = xmeta->ndev;
  65		ch = xmeta->ch;
  66		xdp_return_frame(xdpf);
  67	} else {
  68		skb = token;
  69		ndev = skb->dev;
  70		ch = skb_get_queue_mapping(skb);
  71		cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
  72		dev_kfree_skb_any(skb);
  73	}
  74
  75	/* Check whether the queue is stopped due to stalled tx dma, if the
  76	 * queue is stopped then start the queue as we have free desc for tx
  77	 */
  78	txq = netdev_get_tx_queue(ndev, ch);
  79	if (unlikely(netif_tx_queue_stopped(txq)))
  80		netif_tx_wake_queue(txq);
  81
  82	ndev->stats.tx_packets++;
  83	ndev->stats.tx_bytes += len;
  84}
  85
  86irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
  87{
  88	struct cpsw_common *cpsw = dev_id;
  89
  90	writel(0, &cpsw->wr_regs->tx_en);
  91	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
  92
  93	if (cpsw->quirk_irq) {
  94		disable_irq_nosync(cpsw->irqs_table[1]);
  95		cpsw->tx_irq_disabled = true;
  96	}
  97
  98	napi_schedule(&cpsw->napi_tx);
  99	return IRQ_HANDLED;
 100}
 101
 102irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 103{
 104	struct cpsw_common *cpsw = dev_id;
 105
 106	writel(0, &cpsw->wr_regs->rx_en);
 107	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
 108
 109	if (cpsw->quirk_irq) {
 110		disable_irq_nosync(cpsw->irqs_table[0]);
 111		cpsw->rx_irq_disabled = true;
 112	}
 113
 114	napi_schedule(&cpsw->napi_rx);
 115	return IRQ_HANDLED;
 116}
 117
 118irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
 119{
 120	struct cpsw_common *cpsw = dev_id;
 121
 122	writel(0, &cpsw->wr_regs->misc_en);
 123	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
 124	cpts_misc_interrupt(cpsw->cpts);
 125	writel(0x10, &cpsw->wr_regs->misc_en);
 126
 127	return IRQ_HANDLED;
 128}
 129
 130int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
 131{
 132	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
 133	int			num_tx, cur_budget, ch;
 134	u32			ch_map;
 135	struct cpsw_vector	*txv;
 136
 137	/* process every unprocessed channel */
 138	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
 139	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
 140		if (!(ch_map & 0x80))
 141			continue;
 142
 143		txv = &cpsw->txv[ch];
 144		if (unlikely(txv->budget > budget - num_tx))
 145			cur_budget = budget - num_tx;
 146		else
 147			cur_budget = txv->budget;
 148
 149		num_tx += cpdma_chan_process(txv->ch, cur_budget);
 150		if (num_tx >= budget)
 151			break;
 152	}
 153
 154	if (num_tx < budget) {
 155		napi_complete(napi_tx);
 156		writel(0xff, &cpsw->wr_regs->tx_en);
 157	}
 158
 159	return num_tx;
 160}
 161
 162int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 163{
 164	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
 165	int num_tx;
 166
 167	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
 168	if (num_tx < budget) {
 169		napi_complete(napi_tx);
 170		writel(0xff, &cpsw->wr_regs->tx_en);
 171		if (cpsw->tx_irq_disabled) {
 172			cpsw->tx_irq_disabled = false;
 173			enable_irq(cpsw->irqs_table[1]);
 174		}
 175	}
 176
 177	return num_tx;
 178}
 179
 180int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
 181{
 182	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
 183	int			num_rx, cur_budget, ch;
 184	u32			ch_map;
 185	struct cpsw_vector	*rxv;
 186
 187	/* process every unprocessed channel */
 188	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
 189	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
 190		if (!(ch_map & 0x01))
 191			continue;
 192
 193		rxv = &cpsw->rxv[ch];
 194		if (unlikely(rxv->budget > budget - num_rx))
 195			cur_budget = budget - num_rx;
 196		else
 197			cur_budget = rxv->budget;
 198
 199		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
 200		if (num_rx >= budget)
 201			break;
 202	}
 203
 204	if (num_rx < budget) {
 205		napi_complete_done(napi_rx, num_rx);
 206		writel(0xff, &cpsw->wr_regs->rx_en);
 207	}
 208
 209	return num_rx;
 210}
 211
 212int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
 213{
 214	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
 215	int num_rx;
 216
 217	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
 218	if (num_rx < budget) {
 219		napi_complete_done(napi_rx, num_rx);
 220		writel(0xff, &cpsw->wr_regs->rx_en);
 221		if (cpsw->rx_irq_disabled) {
 222			cpsw->rx_irq_disabled = false;
 223			enable_irq(cpsw->irqs_table[0]);
 224		}
 225	}
 226
 227	return num_rx;
 228}
 229
 230void cpsw_rx_vlan_encap(struct sk_buff *skb)
 231{
 232	struct cpsw_priv *priv = netdev_priv(skb->dev);
 233	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
 234	struct cpsw_common *cpsw = priv->cpsw;
 235	u16 vtag, vid, prio, pkt_type;
 236
 237	/* Remove VLAN header encapsulation word */
 238	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
 239
 240	pkt_type = (rx_vlan_encap_hdr >>
 241		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
 242		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
 243	/* Ignore unknown & Priority-tagged packets*/
 244	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
 245	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
 246		return;
 247
 248	vid = (rx_vlan_encap_hdr >>
 249	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
 250	       VLAN_VID_MASK;
 251	/* Ignore vid 0 and pass packet as is */
 252	if (!vid)
 253		return;
 254
 255	/* Untag P0 packets if set for vlan */
 256	if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
 257		prio = (rx_vlan_encap_hdr >>
 258			CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
 259			CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
 260
 261		vtag = (prio << VLAN_PRIO_SHIFT) | vid;
 262		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
 263	}
 264
 265	/* strip vlan tag for VLAN-tagged packet */
 266	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
 267		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
 268		skb_pull(skb, VLAN_HLEN);
 269	}
 270}
 271
 272void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
 273{
 274	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
 275	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
 276}
 277
 278void soft_reset(const char *module, void __iomem *reg)
 279{
 280	unsigned long timeout = jiffies + HZ;
 281
 282	writel_relaxed(1, reg);
 283	do {
 284		cpu_relax();
 285	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
 286
 287	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
 288}
 289
 290void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
 291{
 292	struct cpsw_priv *priv = netdev_priv(ndev);
 293	struct cpsw_common *cpsw = priv->cpsw;
 294	int ch;
 295
 296	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
 297	ndev->stats.tx_errors++;
 298	cpsw_intr_disable(cpsw);
 299	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
 300		cpdma_chan_stop(cpsw->txv[ch].ch);
 301		cpdma_chan_start(cpsw->txv[ch].ch);
 302	}
 303
 304	cpsw_intr_enable(cpsw);
 305	netif_trans_update(ndev);
 306	netif_tx_wake_all_queues(ndev);
 307}
 308
 309static int cpsw_get_common_speed(struct cpsw_common *cpsw)
 310{
 311	int i, speed;
 312
 313	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
 314		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
 315			speed += cpsw->slaves[i].phy->speed;
 316
 317	return speed;
 318}
 319
 320int cpsw_need_resplit(struct cpsw_common *cpsw)
 321{
 322	int i, rlim_ch_num;
 323	int speed, ch_rate;
 324
 325	/* re-split resources only in case speed was changed */
 326	speed = cpsw_get_common_speed(cpsw);
 327	if (speed == cpsw->speed || !speed)
 328		return 0;
 329
 330	cpsw->speed = speed;
 331
 332	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
 333		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
 334		if (!ch_rate)
 335			break;
 336
 337		rlim_ch_num++;
 338	}
 339
 340	/* cases not dependent on speed */
 341	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
 342		return 0;
 343
 344	return 1;
 345}
 346
 347void cpsw_split_res(struct cpsw_common *cpsw)
 348{
 349	u32 consumed_rate = 0, bigest_rate = 0;
 350	struct cpsw_vector *txv = cpsw->txv;
 351	int i, ch_weight, rlim_ch_num = 0;
 352	int budget, bigest_rate_ch = 0;
 353	u32 ch_rate, max_rate;
 354	int ch_budget = 0;
 355
 356	for (i = 0; i < cpsw->tx_ch_num; i++) {
 357		ch_rate = cpdma_chan_get_rate(txv[i].ch);
 358		if (!ch_rate)
 359			continue;
 360
 361		rlim_ch_num++;
 362		consumed_rate += ch_rate;
 363	}
 364
 365	if (cpsw->tx_ch_num == rlim_ch_num) {
 366		max_rate = consumed_rate;
 367	} else if (!rlim_ch_num) {
 368		ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
 369		bigest_rate = 0;
 370		max_rate = consumed_rate;
 371	} else {
 372		max_rate = cpsw->speed * 1000;
 373
 374		/* if max_rate is less then expected due to reduced link speed,
 375		 * split proportionally according next potential max speed
 376		 */
 377		if (max_rate < consumed_rate)
 378			max_rate *= 10;
 379
 380		if (max_rate < consumed_rate)
 381			max_rate *= 10;
 382
 383		ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
 384		ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
 385			    (cpsw->tx_ch_num - rlim_ch_num);
 386		bigest_rate = (max_rate - consumed_rate) /
 387			      (cpsw->tx_ch_num - rlim_ch_num);
 388	}
 389
 390	/* split tx weight/budget */
 391	budget = NAPI_POLL_WEIGHT;
 392	for (i = 0; i < cpsw->tx_ch_num; i++) {
 393		ch_rate = cpdma_chan_get_rate(txv[i].ch);
 394		if (ch_rate) {
 395			txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
 396			if (!txv[i].budget)
 397				txv[i].budget++;
 398			if (ch_rate > bigest_rate) {
 399				bigest_rate_ch = i;
 400				bigest_rate = ch_rate;
 401			}
 402
 403			ch_weight = (ch_rate * 100) / max_rate;
 404			if (!ch_weight)
 405				ch_weight++;
 406			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
 407		} else {
 408			txv[i].budget = ch_budget;
 409			if (!bigest_rate_ch)
 410				bigest_rate_ch = i;
 411			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
 412		}
 413
 414		budget -= txv[i].budget;
 415	}
 416
 417	if (budget)
 418		txv[bigest_rate_ch].budget += budget;
 419
 420	/* split rx budget */
 421	budget = NAPI_POLL_WEIGHT;
 422	ch_budget = budget / cpsw->rx_ch_num;
 423	for (i = 0; i < cpsw->rx_ch_num; i++) {
 424		cpsw->rxv[i].budget = ch_budget;
 425		budget -= ch_budget;
 426	}
 427
 428	if (budget)
 429		cpsw->rxv[0].budget += budget;
 430}
 431
 432int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
 433		     int ale_ageout, phys_addr_t desc_mem_phys,
 434		     int descs_pool_size)
 435{
 436	u32 slave_offset, sliver_offset, slave_size;
 437	struct cpsw_ale_params ale_params;
 438	struct cpsw_platform_data *data;
 439	struct cpdma_params dma_params;
 440	struct device *dev = cpsw->dev;
 441	struct device_node *cpts_node;
 442	void __iomem *cpts_regs;
 443	int ret = 0, i;
 444
 445	data = &cpsw->data;
 446	cpsw->rx_ch_num = 1;
 447	cpsw->tx_ch_num = 1;
 448
 449	cpsw->version = readl(&cpsw->regs->id_ver);
 450
 451	memset(&dma_params, 0, sizeof(dma_params));
 452	memset(&ale_params, 0, sizeof(ale_params));
 453
 454	switch (cpsw->version) {
 455	case CPSW_VERSION_1:
 456		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
 457		cpts_regs	     = ss_regs + CPSW1_CPTS_OFFSET;
 458		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
 459		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
 460		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
 461		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
 462		slave_offset         = CPSW1_SLAVE_OFFSET;
 463		slave_size           = CPSW1_SLAVE_SIZE;
 464		sliver_offset        = CPSW1_SLIVER_OFFSET;
 465		dma_params.desc_mem_phys = 0;
 466		break;
 467	case CPSW_VERSION_2:
 468	case CPSW_VERSION_3:
 469	case CPSW_VERSION_4:
 470		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
 471		cpts_regs	     = ss_regs + CPSW2_CPTS_OFFSET;
 472		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
 473		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
 474		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
 475		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
 476		slave_offset         = CPSW2_SLAVE_OFFSET;
 477		slave_size           = CPSW2_SLAVE_SIZE;
 478		sliver_offset        = CPSW2_SLIVER_OFFSET;
 479		dma_params.desc_mem_phys = desc_mem_phys;
 480		break;
 481	default:
 482		dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
 483		return -ENODEV;
 484	}
 485
 486	for (i = 0; i < cpsw->data.slaves; i++) {
 487		struct cpsw_slave *slave = &cpsw->slaves[i];
 488		void __iomem		*regs = cpsw->regs;
 489
 490		slave->slave_num = i;
 491		slave->data	= &cpsw->data.slave_data[i];
 492		slave->regs	= regs + slave_offset;
 493		slave->port_vlan = slave->data->dual_emac_res_vlan;
 494		slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
 495		if (IS_ERR(slave->mac_sl))
 496			return PTR_ERR(slave->mac_sl);
 497
 498		slave_offset  += slave_size;
 499		sliver_offset += SLIVER_SIZE;
 500	}
 501
 502	ale_params.dev			= dev;
 503	ale_params.ale_ageout		= ale_ageout;
 504	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
 505	ale_params.dev_id		= "cpsw";
 506	ale_params.bus_freq		= cpsw->bus_freq_mhz * 1000000;
 507
 508	cpsw->ale = cpsw_ale_create(&ale_params);
 509	if (IS_ERR(cpsw->ale)) {
 510		dev_err(dev, "error initializing ale engine\n");
 511		return PTR_ERR(cpsw->ale);
 512	}
 513
 514	dma_params.dev		= dev;
 515	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
 516	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
 517	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
 518	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
 519	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
 520
 521	dma_params.num_chan		= data->channels;
 522	dma_params.has_soft_reset	= true;
 523	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
 524	dma_params.desc_mem_size	= data->bd_ram_size;
 525	dma_params.desc_align		= 16;
 526	dma_params.has_ext_regs		= true;
 527	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
 528	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
 529	dma_params.descs_pool_size	= descs_pool_size;
 530
 531	cpsw->dma = cpdma_ctlr_create(&dma_params);
 532	if (!cpsw->dma) {
 533		dev_err(dev, "error initializing dma\n");
 534		return -ENOMEM;
 535	}
 536
 537	cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
 538	if (!cpts_node)
 539		cpts_node = cpsw->dev->of_node;
 540
 541	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
 542				 CPTS_N_ETX_TS);
 543	if (IS_ERR(cpsw->cpts)) {
 544		ret = PTR_ERR(cpsw->cpts);
 545		cpdma_ctlr_destroy(cpsw->dma);
 546	}
 547	of_node_put(cpts_node);
 548
 549	return ret;
 550}
 551
 552#if IS_ENABLED(CONFIG_TI_CPTS)
 553
 554static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
 555{
 556	struct cpsw_common *cpsw = priv->cpsw;
 557	struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 558	u32 ts_en, seq_id;
 559
 560	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
 561		slave_write(slave, 0, CPSW1_TS_CTL);
 562		return;
 563	}
 564
 565	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
 566	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
 567
 568	if (priv->tx_ts_enabled)
 569		ts_en |= CPSW_V1_TS_TX_EN;
 570
 571	if (priv->rx_ts_enabled)
 572		ts_en |= CPSW_V1_TS_RX_EN;
 573
 574	slave_write(slave, ts_en, CPSW1_TS_CTL);
 575	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
 576}
 577
 578static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 579{
 580	struct cpsw_common *cpsw = priv->cpsw;
 581	struct cpsw_slave *slave;
 582	u32 ctrl, mtype;
 583
 584	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 585
 586	ctrl = slave_read(slave, CPSW2_CONTROL);
 587	switch (cpsw->version) {
 588	case CPSW_VERSION_2:
 589		ctrl &= ~CTRL_V2_ALL_TS_MASK;
 590
 591		if (priv->tx_ts_enabled)
 592			ctrl |= CTRL_V2_TX_TS_BITS;
 593
 594		if (priv->rx_ts_enabled)
 595			ctrl |= CTRL_V2_RX_TS_BITS;
 596		break;
 597	case CPSW_VERSION_3:
 598	default:
 599		ctrl &= ~CTRL_V3_ALL_TS_MASK;
 600
 601		if (priv->tx_ts_enabled)
 602			ctrl |= CTRL_V3_TX_TS_BITS;
 603
 604		if (priv->rx_ts_enabled)
 605			ctrl |= CTRL_V3_RX_TS_BITS;
 606		break;
 607	}
 608
 609	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
 610
 611	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
 612	slave_write(slave, ctrl, CPSW2_CONTROL);
 613	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
 614	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
 615}
 616
 617static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 618{
 619	struct cpsw_priv *priv = netdev_priv(dev);
 620	struct cpsw_common *cpsw = priv->cpsw;
 621	struct hwtstamp_config cfg;
 622
 623	if (cpsw->version != CPSW_VERSION_1 &&
 624	    cpsw->version != CPSW_VERSION_2 &&
 625	    cpsw->version != CPSW_VERSION_3)
 626		return -EOPNOTSUPP;
 627
 628	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 629		return -EFAULT;
 630
 631	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
 632		return -ERANGE;
 633
 634	switch (cfg.rx_filter) {
 635	case HWTSTAMP_FILTER_NONE:
 636		priv->rx_ts_enabled = 0;
 637		break;
 638	case HWTSTAMP_FILTER_ALL:
 639	case HWTSTAMP_FILTER_NTP_ALL:
 640	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 641	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 642	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 643		return -ERANGE;
 644	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 645	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 646	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 647	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 648	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 649	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 650	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 651	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 652	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 653		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
 654		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 655		break;
 656	default:
 657		return -ERANGE;
 658	}
 659
 660	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
 661
 662	switch (cpsw->version) {
 663	case CPSW_VERSION_1:
 664		cpsw_hwtstamp_v1(priv);
 665		break;
 666	case CPSW_VERSION_2:
 667	case CPSW_VERSION_3:
 668		cpsw_hwtstamp_v2(priv);
 669		break;
 670	default:
 671		WARN_ON(1);
 672	}
 673
 674	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 675}
 676
 677static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 678{
 679	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
 680	struct cpsw_priv *priv = netdev_priv(dev);
 681	struct hwtstamp_config cfg;
 682
 683	if (cpsw->version != CPSW_VERSION_1 &&
 684	    cpsw->version != CPSW_VERSION_2 &&
 685	    cpsw->version != CPSW_VERSION_3)
 686		return -EOPNOTSUPP;
 687
 688	cfg.flags = 0;
 689	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 690	cfg.rx_filter = priv->rx_ts_enabled;
 691
 692	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 693}
 694#else
 695static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 696{
 697	return -EOPNOTSUPP;
 698}
 699
 700static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 701{
 702	return -EOPNOTSUPP;
 703}
 704#endif /*CONFIG_TI_CPTS*/
 705
 706int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 707{
 708	struct cpsw_priv *priv = netdev_priv(dev);
 709	struct cpsw_common *cpsw = priv->cpsw;
 710	int slave_no = cpsw_slave_index(cpsw, priv);
 711	struct phy_device *phy;
 712
 713	if (!netif_running(dev))
 714		return -EINVAL;
 715
 716	phy = cpsw->slaves[slave_no].phy;
 717
 718	if (!phy_has_hwtstamp(phy)) {
 719		switch (cmd) {
 720		case SIOCSHWTSTAMP:
 721			return cpsw_hwtstamp_set(dev, req);
 722		case SIOCGHWTSTAMP:
 723			return cpsw_hwtstamp_get(dev, req);
 724		}
 725	}
 726
 727	if (phy)
 728		return phy_mii_ioctl(phy, req, cmd);
 729
 730	return -EOPNOTSUPP;
 731}
 732
 733int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
 734{
 735	struct cpsw_priv *priv = netdev_priv(ndev);
 736	struct cpsw_common *cpsw = priv->cpsw;
 737	struct cpsw_slave *slave;
 738	u32 min_rate;
 739	u32 ch_rate;
 740	int i, ret;
 741
 742	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
 743	if (ch_rate == rate)
 744		return 0;
 745
 746	ch_rate = rate * 1000;
 747	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
 748	if ((ch_rate < min_rate && ch_rate)) {
 749		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
 750			min_rate);
 751		return -EINVAL;
 752	}
 753
 754	if (rate > cpsw->speed) {
 755		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
 756		return -EINVAL;
 757	}
 758
 759	ret = pm_runtime_resume_and_get(cpsw->dev);
 760	if (ret < 0)
 761		return ret;
 762
 763	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
 764	pm_runtime_put(cpsw->dev);
 765
 766	if (ret)
 767		return ret;
 768
 769	/* update rates for slaves tx queues */
 770	for (i = 0; i < cpsw->data.slaves; i++) {
 771		slave = &cpsw->slaves[i];
 772		if (!slave->ndev)
 773			continue;
 774
 775		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
 776	}
 777
 778	cpsw_split_res(cpsw);
 779	return ret;
 780}
 781
 782static int cpsw_tc_to_fifo(int tc, int num_tc)
 783{
 784	if (tc == num_tc - 1)
 785		return 0;
 786
 787	return CPSW_FIFO_SHAPERS_NUM - tc;
 788}
 789
 790bool cpsw_shp_is_off(struct cpsw_priv *priv)
 791{
 792	struct cpsw_common *cpsw = priv->cpsw;
 793	struct cpsw_slave *slave;
 794	u32 shift, mask, val;
 795
 796	val = readl_relaxed(&cpsw->regs->ptype);
 797
 798	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 799	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
 800	mask = 7 << shift;
 801	val = val & mask;
 802
 803	return !val;
 804}
 805
 806static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
 807{
 808	struct cpsw_common *cpsw = priv->cpsw;
 809	struct cpsw_slave *slave;
 810	u32 shift, mask, val;
 811
 812	val = readl_relaxed(&cpsw->regs->ptype);
 813
 814	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 815	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
 816	mask = (1 << --fifo) << shift;
 817	val = on ? val | mask : val & ~mask;
 818
 819	writel_relaxed(val, &cpsw->regs->ptype);
 820}
 821
 822static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
 823{
 824	struct cpsw_common *cpsw = priv->cpsw;
 825	u32 val = 0, send_pct, shift;
 826	struct cpsw_slave *slave;
 827	int pct = 0, i;
 828
 829	if (bw > priv->shp_cfg_speed * 1000)
 830		goto err;
 831
 832	/* shaping has to stay enabled for highest fifos linearly
 833	 * and fifo bw no more then interface can allow
 834	 */
 835	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 836	send_pct = slave_read(slave, SEND_PERCENT);
 837	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
 838		if (!bw) {
 839			if (i >= fifo || !priv->fifo_bw[i])
 840				continue;
 841
 842			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
 843			continue;
 844		}
 845
 846		if (!priv->fifo_bw[i] && i > fifo) {
 847			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
 848			return -EINVAL;
 849		}
 850
 851		shift = (i - 1) * 8;
 852		if (i == fifo) {
 853			send_pct &= ~(CPSW_PCT_MASK << shift);
 854			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
 855			if (!val)
 856				val = 1;
 857
 858			send_pct |= val << shift;
 859			pct += val;
 860			continue;
 861		}
 862
 863		if (priv->fifo_bw[i])
 864			pct += (send_pct >> shift) & CPSW_PCT_MASK;
 865	}
 866
 867	if (pct >= 100)
 868		goto err;
 869
 870	slave_write(slave, send_pct, SEND_PERCENT);
 871	priv->fifo_bw[fifo] = bw;
 872
 873	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
 874		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
 875
 876	return 0;
 877err:
 878	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
 879	return -EINVAL;
 880}
 881
 882static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
 883{
 884	struct cpsw_common *cpsw = priv->cpsw;
 885	struct cpsw_slave *slave;
 886	u32 tx_in_ctl_rg, val;
 887	int ret;
 888
 889	ret = cpsw_set_fifo_bw(priv, fifo, bw);
 890	if (ret)
 891		return ret;
 892
 893	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 894	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
 895		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
 896
 897	if (!bw)
 898		cpsw_fifo_shp_on(priv, fifo, bw);
 899
 900	val = slave_read(slave, tx_in_ctl_rg);
 901	if (cpsw_shp_is_off(priv)) {
 902		/* disable FIFOs rate limited queues */
 903		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
 904
 905		/* set type of FIFO queues to normal priority mode */
 906		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
 907
 908		/* set type of FIFO queues to be rate limited */
 909		if (bw)
 910			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
 911		else
 912			priv->shp_cfg_speed = 0;
 913	}
 914
 915	/* toggle a FIFO rate limited queue */
 916	if (bw)
 917		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
 918	else
 919		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
 920	slave_write(slave, val, tx_in_ctl_rg);
 921
 922	/* FIFO transmit shape enable */
 923	cpsw_fifo_shp_on(priv, fifo, bw);
 924	return 0;
 925}
 926
 927/* Defaults:
 928 * class A - prio 3
 929 * class B - prio 2
 930 * shaping for class A should be set first
 931 */
 932static int cpsw_set_cbs(struct net_device *ndev,
 933			struct tc_cbs_qopt_offload *qopt)
 934{
 935	struct cpsw_priv *priv = netdev_priv(ndev);
 936	struct cpsw_common *cpsw = priv->cpsw;
 937	struct cpsw_slave *slave;
 938	int prev_speed = 0;
 939	int tc, ret, fifo;
 940	u32 bw = 0;
 941
 942	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
 943
 944	/* enable channels in backward order, as highest FIFOs must be rate
 945	 * limited first and for compliance with CPDMA rate limited channels
 946	 * that also used in bacward order. FIFO0 cannot be rate limited.
 947	 */
 948	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
 949	if (!fifo) {
 950		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
 951		return -EINVAL;
 952	}
 953
 954	/* do nothing, it's disabled anyway */
 955	if (!qopt->enable && !priv->fifo_bw[fifo])
 956		return 0;
 957
 958	/* shapers can be set if link speed is known */
 959	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 960	if (slave->phy && slave->phy->link) {
 961		if (priv->shp_cfg_speed &&
 962		    priv->shp_cfg_speed != slave->phy->speed)
 963			prev_speed = priv->shp_cfg_speed;
 964
 965		priv->shp_cfg_speed = slave->phy->speed;
 966	}
 967
 968	if (!priv->shp_cfg_speed) {
 969		dev_err(priv->dev, "Link speed is not known");
 970		return -1;
 971	}
 972
 973	ret = pm_runtime_resume_and_get(cpsw->dev);
 974	if (ret < 0)
 975		return ret;
 976
 977	bw = qopt->enable ? qopt->idleslope : 0;
 978	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
 979	if (ret) {
 980		priv->shp_cfg_speed = prev_speed;
 981		prev_speed = 0;
 982	}
 983
 984	if (bw && prev_speed)
 985		dev_warn(priv->dev,
 986			 "Speed was changed, CBS shaper speeds are changed!");
 987
 988	pm_runtime_put_sync(cpsw->dev);
 989	return ret;
 990}
 991
 992static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
 993{
 994	struct tc_mqprio_qopt_offload *mqprio = type_data;
 995	struct cpsw_priv *priv = netdev_priv(ndev);
 996	struct cpsw_common *cpsw = priv->cpsw;
 997	int fifo, num_tc, count, offset;
 998	struct cpsw_slave *slave;
 999	u32 tx_prio_map = 0;
1000	int i, tc, ret;
1001
1002	num_tc = mqprio->qopt.num_tc;
1003	if (num_tc > CPSW_TC_NUM)
1004		return -EINVAL;
1005
1006	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1007		return -EINVAL;
1008
1009	ret = pm_runtime_resume_and_get(cpsw->dev);
1010	if (ret < 0)
1011		return ret;
1012
1013	if (num_tc) {
1014		for (i = 0; i < 8; i++) {
1015			tc = mqprio->qopt.prio_tc_map[i];
1016			fifo = cpsw_tc_to_fifo(tc, num_tc);
1017			tx_prio_map |= fifo << (4 * i);
1018		}
1019
1020		netdev_set_num_tc(ndev, num_tc);
1021		for (i = 0; i < num_tc; i++) {
1022			count = mqprio->qopt.count[i];
1023			offset = mqprio->qopt.offset[i];
1024			netdev_set_tc_queue(ndev, i, count, offset);
1025		}
1026	}
1027
1028	if (!mqprio->qopt.hw) {
1029		/* restore default configuration */
1030		netdev_reset_tc(ndev);
1031		tx_prio_map = TX_PRIORITY_MAPPING;
1032	}
1033
1034	priv->mqprio_hw = mqprio->qopt.hw;
1035
1036	offset = cpsw->version == CPSW_VERSION_1 ?
1037		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1038
1039	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1040	slave_write(slave, tx_prio_map, offset);
1041
1042	pm_runtime_put_sync(cpsw->dev);
1043
1044	return 0;
1045}
1046
1047static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
1048
1049int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1050		      void *type_data)
1051{
1052	switch (type) {
1053	case TC_SETUP_QDISC_CBS:
1054		return cpsw_set_cbs(ndev, type_data);
1055
1056	case TC_SETUP_QDISC_MQPRIO:
1057		return cpsw_set_mqprio(ndev, type_data);
1058
1059	case TC_SETUP_BLOCK:
1060		return cpsw_qos_setup_tc_block(ndev, type_data);
1061
1062	default:
1063		return -EOPNOTSUPP;
1064	}
1065}
1066
1067void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1068{
1069	int fifo, bw;
1070
1071	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1072		bw = priv->fifo_bw[fifo];
1073		if (!bw)
1074			continue;
1075
1076		cpsw_set_fifo_rlimit(priv, fifo, bw);
1077	}
1078}
1079
1080void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1081{
1082	struct cpsw_common *cpsw = priv->cpsw;
1083	u32 tx_prio_map = 0;
1084	int i, tc, fifo;
1085	u32 tx_prio_rg;
1086
1087	if (!priv->mqprio_hw)
1088		return;
1089
1090	for (i = 0; i < 8; i++) {
1091		tc = netdev_get_prio_tc_map(priv->ndev, i);
1092		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1093		tx_prio_map |= fifo << (4 * i);
1094	}
1095
1096	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1097		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1098
1099	slave_write(slave, tx_prio_map, tx_prio_rg);
1100}
1101
1102int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1103{
1104	struct cpsw_common *cpsw = priv->cpsw;
1105	struct cpsw_meta_xdp *xmeta;
1106	struct page_pool *pool;
1107	struct page *page;
1108	int ch_buf_num;
1109	int ch, i, ret;
1110	dma_addr_t dma;
1111
1112	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1113		pool = cpsw->page_pool[ch];
1114		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1115		for (i = 0; i < ch_buf_num; i++) {
1116			page = page_pool_dev_alloc_pages(pool);
1117			if (!page) {
1118				cpsw_err(priv, ifup, "allocate rx page err\n");
1119				return -ENOMEM;
1120			}
1121
1122			xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1123			xmeta->ndev = priv->ndev;
1124			xmeta->ch = ch;
1125
1126			dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
1127			ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1128							    page, dma,
1129							    cpsw->rx_packet_max,
1130							    0);
1131			if (ret < 0) {
1132				cpsw_err(priv, ifup,
1133					 "cannot submit page to channel %d rx, error %d\n",
1134					 ch, ret);
1135				page_pool_recycle_direct(pool, page);
1136				return ret;
1137			}
1138		}
1139
1140		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1141			  ch, ch_buf_num);
1142	}
1143
1144	return 0;
1145}
1146
1147static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1148					       int size)
1149{
1150	struct page_pool_params pp_params = {};
1151	struct page_pool *pool;
1152
1153	pp_params.order = 0;
1154	pp_params.flags = PP_FLAG_DMA_MAP;
1155	pp_params.pool_size = size;
1156	pp_params.nid = NUMA_NO_NODE;
1157	pp_params.dma_dir = DMA_BIDIRECTIONAL;
1158	pp_params.dev = cpsw->dev;
1159
1160	pool = page_pool_create(&pp_params);
1161	if (IS_ERR(pool))
1162		dev_err(cpsw->dev, "cannot create rx page pool\n");
1163
1164	return pool;
1165}
1166
1167static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1168{
1169	struct page_pool *pool;
1170	int ret = 0, pool_size;
1171
1172	pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1173	pool = cpsw_create_page_pool(cpsw, pool_size);
1174	if (IS_ERR(pool))
1175		ret = PTR_ERR(pool);
1176	else
1177		cpsw->page_pool[ch] = pool;
1178
1179	return ret;
1180}
1181
1182static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1183{
1184	struct cpsw_common *cpsw = priv->cpsw;
1185	struct xdp_rxq_info *rxq;
1186	struct page_pool *pool;
1187	int ret;
1188
1189	pool = cpsw->page_pool[ch];
1190	rxq = &priv->xdp_rxq[ch];
1191
1192	ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
1193	if (ret)
1194		return ret;
1195
1196	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1197	if (ret)
1198		xdp_rxq_info_unreg(rxq);
1199
1200	return ret;
1201}
1202
1203static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1204{
1205	struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1206
1207	if (!xdp_rxq_info_is_reg(rxq))
1208		return;
1209
1210	xdp_rxq_info_unreg(rxq);
1211}
1212
1213void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1214{
1215	struct net_device *ndev;
1216	int i, ch;
1217
1218	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1219		for (i = 0; i < cpsw->data.slaves; i++) {
1220			ndev = cpsw->slaves[i].ndev;
1221			if (!ndev)
1222				continue;
1223
1224			cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1225		}
1226
1227		page_pool_destroy(cpsw->page_pool[ch]);
1228		cpsw->page_pool[ch] = NULL;
1229	}
1230}
1231
1232int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1233{
1234	struct net_device *ndev;
1235	int i, ch, ret;
1236
1237	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1238		ret = cpsw_create_rx_pool(cpsw, ch);
1239		if (ret)
1240			goto err_cleanup;
1241
1242		/* using same page pool is allowed as no running rx handlers
1243		 * simultaneously for both ndevs
1244		 */
1245		for (i = 0; i < cpsw->data.slaves; i++) {
1246			ndev = cpsw->slaves[i].ndev;
1247			if (!ndev)
1248				continue;
1249
1250			ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1251			if (ret)
1252				goto err_cleanup;
1253		}
1254	}
1255
1256	return 0;
1257
1258err_cleanup:
1259	cpsw_destroy_xdp_rxqs(cpsw);
1260
1261	return ret;
1262}
1263
1264static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1265{
1266	struct bpf_prog *prog = bpf->prog;
1267
1268	if (!priv->xdpi.prog && !prog)
1269		return 0;
1270
1271	WRITE_ONCE(priv->xdp_prog, prog);
1272
1273	xdp_attachment_setup(&priv->xdpi, bpf);
1274
1275	return 0;
1276}
1277
1278int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1279{
1280	struct cpsw_priv *priv = netdev_priv(ndev);
1281
1282	switch (bpf->command) {
1283	case XDP_SETUP_PROG:
1284		return cpsw_xdp_prog_setup(priv, bpf);
1285
1286	default:
1287		return -EINVAL;
1288	}
1289}
1290
1291int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1292		      struct page *page, int port)
1293{
1294	struct cpsw_common *cpsw = priv->cpsw;
1295	struct cpsw_meta_xdp *xmeta;
1296	struct cpdma_chan *txch;
1297	dma_addr_t dma;
1298	int ret;
1299
1300	xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1301	xmeta->ndev = priv->ndev;
1302	xmeta->ch = 0;
1303	txch = cpsw->txv[0].ch;
1304
1305	if (page) {
1306		dma = page_pool_get_dma_addr(page);
1307		dma += xdpf->headroom + sizeof(struct xdp_frame);
1308		ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1309					       dma, xdpf->len, port);
1310	} else {
1311		if (sizeof(*xmeta) > xdpf->headroom)
1312			return -EINVAL;
1313
1314		ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1315					xdpf->data, xdpf->len, port);
1316	}
1317
1318	if (ret)
1319		priv->ndev->stats.tx_dropped++;
1320
1321	return ret;
1322}
1323
1324int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1325		 struct page *page, int port, int *len)
1326{
1327	struct cpsw_common *cpsw = priv->cpsw;
1328	struct net_device *ndev = priv->ndev;
1329	int ret = CPSW_XDP_CONSUMED;
1330	struct xdp_frame *xdpf;
1331	struct bpf_prog *prog;
1332	u32 act;
1333
1334	prog = READ_ONCE(priv->xdp_prog);
1335	if (!prog)
1336		return CPSW_XDP_PASS;
1337
1338	act = bpf_prog_run_xdp(prog, xdp);
1339	/* XDP prog might have changed packet data and boundaries */
1340	*len = xdp->data_end - xdp->data;
1341
1342	switch (act) {
1343	case XDP_PASS:
1344		ret = CPSW_XDP_PASS;
1345		goto out;
1346	case XDP_TX:
1347		xdpf = xdp_convert_buff_to_frame(xdp);
1348		if (unlikely(!xdpf))
1349			goto drop;
1350
1351		if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
1352			xdp_return_frame_rx_napi(xdpf);
1353		break;
1354	case XDP_REDIRECT:
1355		if (xdp_do_redirect(ndev, xdp, prog))
1356			goto drop;
1357
1358		/*  Have to flush here, per packet, instead of doing it in bulk
1359		 *  at the end of the napi handler. The RX devices on this
1360		 *  particular hardware is sharing a common queue, so the
1361		 *  incoming device might change per packet.
1362		 */
1363		xdp_do_flush();
1364		break;
1365	default:
1366		bpf_warn_invalid_xdp_action(ndev, prog, act);
1367		fallthrough;
1368	case XDP_ABORTED:
1369		trace_xdp_exception(ndev, prog, act);
1370		fallthrough;	/* handle aborts by dropping packet */
1371	case XDP_DROP:
1372		ndev->stats.rx_bytes += *len;
1373		ndev->stats.rx_packets++;
1374		goto drop;
1375	}
1376
1377	ndev->stats.rx_bytes += *len;
1378	ndev->stats.rx_packets++;
1379out:
1380	return ret;
1381drop:
1382	page_pool_recycle_direct(cpsw->page_pool[ch], page);
1383	return ret;
1384}
1385
1386static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
1387					  struct netlink_ext_ack *extack,
1388					  struct flow_cls_offload *cls,
1389					  u64 rate_pkt_ps)
1390{
1391	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1392	struct flow_dissector *dissector = rule->match.dissector;
1393	static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1394	struct flow_match_eth_addrs match;
1395	u32 port_id;
1396	int ret;
1397
1398	if (dissector->used_keys &
1399	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1400	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1401	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1402		NL_SET_ERR_MSG_MOD(extack,
1403				   "Unsupported keys used");
1404		return -EOPNOTSUPP;
1405	}
1406
1407	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1408		NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1409		return -EOPNOTSUPP;
1410	}
1411
1412	flow_rule_match_eth_addrs(rule, &match);
1413
1414	if (!is_zero_ether_addr(match.mask->src)) {
1415		NL_SET_ERR_MSG_MOD(extack,
1416				   "Matching on source MAC not supported");
1417		return -EOPNOTSUPP;
1418	}
1419
1420	port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1421
1422	if (is_broadcast_ether_addr(match.key->dst) &&
1423	    is_broadcast_ether_addr(match.mask->dst)) {
1424		ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
1425		if (ret)
1426			return ret;
1427
1428		priv->ale_bc_ratelimit.cookie = cls->cookie;
1429		priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1430	} else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1431		   ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1432		ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
1433		if (ret)
1434			return ret;
1435
1436		priv->ale_mc_ratelimit.cookie = cls->cookie;
1437		priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1438	} else {
1439		NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1440		return -EOPNOTSUPP;
1441	}
1442
1443	return 0;
1444}
1445
1446static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1447					       const struct flow_action_entry *act,
1448					       struct netlink_ext_ack *extack)
1449{
1450	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1451		NL_SET_ERR_MSG_MOD(extack,
1452				   "Offload not supported when exceed action is not drop");
1453		return -EOPNOTSUPP;
1454	}
1455
1456	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1457	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1458		NL_SET_ERR_MSG_MOD(extack,
1459				   "Offload not supported when conform action is not pipe or ok");
1460		return -EOPNOTSUPP;
1461	}
1462
1463	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1464	    !flow_action_is_last_entry(action, act)) {
1465		NL_SET_ERR_MSG_MOD(extack,
1466				   "Offload not supported when conform action is ok, but action is not last");
1467		return -EOPNOTSUPP;
1468	}
1469
1470	if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1471	    act->police.avrate || act->police.overhead) {
1472		NL_SET_ERR_MSG_MOD(extack,
1473				   "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1474		return -EOPNOTSUPP;
1475	}
1476
1477	return 0;
1478}
1479
1480static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1481{
1482	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1483	struct netlink_ext_ack *extack = cls->common.extack;
1484	const struct flow_action_entry *act;
1485	int i, ret;
1486
1487	flow_action_for_each(i, act, &rule->action) {
1488		switch (act->id) {
1489		case FLOW_ACTION_POLICE:
1490			ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1491			if (ret)
1492				return ret;
1493
1494			return cpsw_qos_clsflower_add_policer(priv, extack, cls,
1495							      act->police.rate_pkt_ps);
1496		default:
1497			NL_SET_ERR_MSG_MOD(extack, "Action not supported");
1498			return -EOPNOTSUPP;
1499		}
1500	}
1501	return -EOPNOTSUPP;
1502}
1503
1504static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1505{
1506	u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1507
1508	if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
1509		priv->ale_bc_ratelimit.cookie = 0;
1510		priv->ale_bc_ratelimit.rate_packet_ps = 0;
1511		cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
1512	}
1513
1514	if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
1515		priv->ale_mc_ratelimit.cookie = 0;
1516		priv->ale_mc_ratelimit.rate_packet_ps = 0;
1517		cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
1518	}
1519
1520	return 0;
1521}
1522
1523static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
1524{
1525	switch (cls_flower->command) {
1526	case FLOW_CLS_REPLACE:
1527		return cpsw_qos_configure_clsflower(priv, cls_flower);
1528	case FLOW_CLS_DESTROY:
1529		return cpsw_qos_delete_clsflower(priv, cls_flower);
1530	default:
1531		return -EOPNOTSUPP;
1532	}
1533}
1534
1535static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1536{
1537	struct cpsw_priv *priv = cb_priv;
1538	int ret;
1539
1540	if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
1541		return -EOPNOTSUPP;
1542
1543	ret = pm_runtime_get_sync(priv->dev);
1544	if (ret < 0) {
1545		pm_runtime_put_noidle(priv->dev);
1546		return ret;
1547	}
1548
1549	switch (type) {
1550	case TC_SETUP_CLSFLOWER:
1551		ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
1552		break;
1553	default:
1554		ret = -EOPNOTSUPP;
1555	}
1556
1557	pm_runtime_put(priv->dev);
1558	return ret;
1559}
1560
1561static LIST_HEAD(cpsw_qos_block_cb_list);
1562
1563static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1564{
1565	struct cpsw_priv *priv = netdev_priv(ndev);
1566
1567	return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
1568					  cpsw_qos_setup_tc_block_cb,
1569					  priv, priv, true);
1570}
1571
1572void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
1573{
1574	u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1575
1576	if (priv->ale_bc_ratelimit.cookie)
1577		cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
1578					 priv->ale_bc_ratelimit.rate_packet_ps);
1579
1580	if (priv->ale_mc_ratelimit.cookie)
1581		cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
1582					 priv->ale_mc_ratelimit.rate_packet_ps);
1583}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Texas Instruments Ethernet Switch Driver
   4 *
   5 * Copyright (C) 2019 Texas Instruments
   6 */
   7
   8#include <linux/bpf.h>
   9#include <linux/bpf_trace.h>
  10#include <linux/if_ether.h>
  11#include <linux/if_vlan.h>
  12#include <linux/kmemleak.h>
  13#include <linux/module.h>
  14#include <linux/netdevice.h>
  15#include <linux/net_tstamp.h>
  16#include <linux/of.h>
  17#include <linux/phy.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/skbuff.h>
  21#include <net/page_pool.h>
  22#include <net/pkt_cls.h>
 
  23
  24#include "cpsw.h"
  25#include "cpts.h"
  26#include "cpsw_ale.h"
  27#include "cpsw_priv.h"
  28#include "cpsw_sl.h"
  29#include "davinci_cpdma.h"
  30
  31#define CPTS_N_ETX_TS 4
  32
  33int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
  34
  35void cpsw_intr_enable(struct cpsw_common *cpsw)
  36{
  37	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
  38	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
  39
  40	cpdma_ctlr_int_ctrl(cpsw->dma, true);
  41}
  42
  43void cpsw_intr_disable(struct cpsw_common *cpsw)
  44{
  45	writel_relaxed(0, &cpsw->wr_regs->tx_en);
  46	writel_relaxed(0, &cpsw->wr_regs->rx_en);
  47
  48	cpdma_ctlr_int_ctrl(cpsw->dma, false);
  49}
  50
  51void cpsw_tx_handler(void *token, int len, int status)
  52{
  53	struct cpsw_meta_xdp	*xmeta;
  54	struct xdp_frame	*xdpf;
  55	struct net_device	*ndev;
  56	struct netdev_queue	*txq;
  57	struct sk_buff		*skb;
  58	int			ch;
  59
  60	if (cpsw_is_xdpf_handle(token)) {
  61		xdpf = cpsw_handle_to_xdpf(token);
  62		xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
  63		ndev = xmeta->ndev;
  64		ch = xmeta->ch;
  65		xdp_return_frame(xdpf);
  66	} else {
  67		skb = token;
  68		ndev = skb->dev;
  69		ch = skb_get_queue_mapping(skb);
  70		cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
  71		dev_kfree_skb_any(skb);
  72	}
  73
  74	/* Check whether the queue is stopped due to stalled tx dma, if the
  75	 * queue is stopped then start the queue as we have free desc for tx
  76	 */
  77	txq = netdev_get_tx_queue(ndev, ch);
  78	if (unlikely(netif_tx_queue_stopped(txq)))
  79		netif_tx_wake_queue(txq);
  80
  81	ndev->stats.tx_packets++;
  82	ndev->stats.tx_bytes += len;
  83}
  84
  85irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
  86{
  87	struct cpsw_common *cpsw = dev_id;
  88
  89	writel(0, &cpsw->wr_regs->tx_en);
  90	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
  91
  92	if (cpsw->quirk_irq) {
  93		disable_irq_nosync(cpsw->irqs_table[1]);
  94		cpsw->tx_irq_disabled = true;
  95	}
  96
  97	napi_schedule(&cpsw->napi_tx);
  98	return IRQ_HANDLED;
  99}
 100
 101irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 102{
 103	struct cpsw_common *cpsw = dev_id;
 104
 105	writel(0, &cpsw->wr_regs->rx_en);
 106	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
 107
 108	if (cpsw->quirk_irq) {
 109		disable_irq_nosync(cpsw->irqs_table[0]);
 110		cpsw->rx_irq_disabled = true;
 111	}
 112
 113	napi_schedule(&cpsw->napi_rx);
 114	return IRQ_HANDLED;
 115}
 116
 117irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
 118{
 119	struct cpsw_common *cpsw = dev_id;
 120
 121	writel(0, &cpsw->wr_regs->misc_en);
 122	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
 123	cpts_misc_interrupt(cpsw->cpts);
 124	writel(0x10, &cpsw->wr_regs->misc_en);
 125
 126	return IRQ_HANDLED;
 127}
 128
 129int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
 130{
 131	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
 132	int			num_tx, cur_budget, ch;
 133	u32			ch_map;
 134	struct cpsw_vector	*txv;
 135
 136	/* process every unprocessed channel */
 137	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
 138	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
 139		if (!(ch_map & 0x80))
 140			continue;
 141
 142		txv = &cpsw->txv[ch];
 143		if (unlikely(txv->budget > budget - num_tx))
 144			cur_budget = budget - num_tx;
 145		else
 146			cur_budget = txv->budget;
 147
 148		num_tx += cpdma_chan_process(txv->ch, cur_budget);
 149		if (num_tx >= budget)
 150			break;
 151	}
 152
 153	if (num_tx < budget) {
 154		napi_complete(napi_tx);
 155		writel(0xff, &cpsw->wr_regs->tx_en);
 156	}
 157
 158	return num_tx;
 159}
 160
 161int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 162{
 163	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
 164	int num_tx;
 165
 166	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
 167	if (num_tx < budget) {
 168		napi_complete(napi_tx);
 169		writel(0xff, &cpsw->wr_regs->tx_en);
 170		if (cpsw->tx_irq_disabled) {
 171			cpsw->tx_irq_disabled = false;
 172			enable_irq(cpsw->irqs_table[1]);
 173		}
 174	}
 175
 176	return num_tx;
 177}
 178
 179int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
 180{
 181	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
 182	int			num_rx, cur_budget, ch;
 183	u32			ch_map;
 184	struct cpsw_vector	*rxv;
 185
 186	/* process every unprocessed channel */
 187	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
 188	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
 189		if (!(ch_map & 0x01))
 190			continue;
 191
 192		rxv = &cpsw->rxv[ch];
 193		if (unlikely(rxv->budget > budget - num_rx))
 194			cur_budget = budget - num_rx;
 195		else
 196			cur_budget = rxv->budget;
 197
 198		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
 199		if (num_rx >= budget)
 200			break;
 201	}
 202
 203	if (num_rx < budget) {
 204		napi_complete_done(napi_rx, num_rx);
 205		writel(0xff, &cpsw->wr_regs->rx_en);
 206	}
 207
 208	return num_rx;
 209}
 210
 211int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
 212{
 213	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
 214	int num_rx;
 215
 216	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
 217	if (num_rx < budget) {
 218		napi_complete_done(napi_rx, num_rx);
 219		writel(0xff, &cpsw->wr_regs->rx_en);
 220		if (cpsw->rx_irq_disabled) {
 221			cpsw->rx_irq_disabled = false;
 222			enable_irq(cpsw->irqs_table[0]);
 223		}
 224	}
 225
 226	return num_rx;
 227}
 228
 229void cpsw_rx_vlan_encap(struct sk_buff *skb)
 230{
 231	struct cpsw_priv *priv = netdev_priv(skb->dev);
 232	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
 233	struct cpsw_common *cpsw = priv->cpsw;
 234	u16 vtag, vid, prio, pkt_type;
 235
 236	/* Remove VLAN header encapsulation word */
 237	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
 238
 239	pkt_type = (rx_vlan_encap_hdr >>
 240		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
 241		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
 242	/* Ignore unknown & Priority-tagged packets*/
 243	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
 244	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
 245		return;
 246
 247	vid = (rx_vlan_encap_hdr >>
 248	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
 249	       VLAN_VID_MASK;
 250	/* Ignore vid 0 and pass packet as is */
 251	if (!vid)
 252		return;
 253
 254	/* Untag P0 packets if set for vlan */
 255	if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
 256		prio = (rx_vlan_encap_hdr >>
 257			CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
 258			CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
 259
 260		vtag = (prio << VLAN_PRIO_SHIFT) | vid;
 261		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
 262	}
 263
 264	/* strip vlan tag for VLAN-tagged packet */
 265	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
 266		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
 267		skb_pull(skb, VLAN_HLEN);
 268	}
 269}
 270
 271void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
 272{
 273	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
 274	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
 275}
 276
 277void soft_reset(const char *module, void __iomem *reg)
 278{
 279	unsigned long timeout = jiffies + HZ;
 280
 281	writel_relaxed(1, reg);
 282	do {
 283		cpu_relax();
 284	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
 285
 286	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
 287}
 288
 289void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
 290{
 291	struct cpsw_priv *priv = netdev_priv(ndev);
 292	struct cpsw_common *cpsw = priv->cpsw;
 293	int ch;
 294
 295	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
 296	ndev->stats.tx_errors++;
 297	cpsw_intr_disable(cpsw);
 298	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
 299		cpdma_chan_stop(cpsw->txv[ch].ch);
 300		cpdma_chan_start(cpsw->txv[ch].ch);
 301	}
 302
 303	cpsw_intr_enable(cpsw);
 304	netif_trans_update(ndev);
 305	netif_tx_wake_all_queues(ndev);
 306}
 307
 308static int cpsw_get_common_speed(struct cpsw_common *cpsw)
 309{
 310	int i, speed;
 311
 312	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
 313		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
 314			speed += cpsw->slaves[i].phy->speed;
 315
 316	return speed;
 317}
 318
 319int cpsw_need_resplit(struct cpsw_common *cpsw)
 320{
 321	int i, rlim_ch_num;
 322	int speed, ch_rate;
 323
 324	/* re-split resources only in case speed was changed */
 325	speed = cpsw_get_common_speed(cpsw);
 326	if (speed == cpsw->speed || !speed)
 327		return 0;
 328
 329	cpsw->speed = speed;
 330
 331	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
 332		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
 333		if (!ch_rate)
 334			break;
 335
 336		rlim_ch_num++;
 337	}
 338
 339	/* cases not dependent on speed */
 340	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
 341		return 0;
 342
 343	return 1;
 344}
 345
 346void cpsw_split_res(struct cpsw_common *cpsw)
 347{
 348	u32 consumed_rate = 0, bigest_rate = 0;
 349	struct cpsw_vector *txv = cpsw->txv;
 350	int i, ch_weight, rlim_ch_num = 0;
 351	int budget, bigest_rate_ch = 0;
 352	u32 ch_rate, max_rate;
 353	int ch_budget = 0;
 354
 355	for (i = 0; i < cpsw->tx_ch_num; i++) {
 356		ch_rate = cpdma_chan_get_rate(txv[i].ch);
 357		if (!ch_rate)
 358			continue;
 359
 360		rlim_ch_num++;
 361		consumed_rate += ch_rate;
 362	}
 363
 364	if (cpsw->tx_ch_num == rlim_ch_num) {
 365		max_rate = consumed_rate;
 366	} else if (!rlim_ch_num) {
 367		ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
 368		bigest_rate = 0;
 369		max_rate = consumed_rate;
 370	} else {
 371		max_rate = cpsw->speed * 1000;
 372
 373		/* if max_rate is less then expected due to reduced link speed,
 374		 * split proportionally according next potential max speed
 375		 */
 376		if (max_rate < consumed_rate)
 377			max_rate *= 10;
 378
 379		if (max_rate < consumed_rate)
 380			max_rate *= 10;
 381
 382		ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
 383		ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
 384			    (cpsw->tx_ch_num - rlim_ch_num);
 385		bigest_rate = (max_rate - consumed_rate) /
 386			      (cpsw->tx_ch_num - rlim_ch_num);
 387	}
 388
 389	/* split tx weight/budget */
 390	budget = NAPI_POLL_WEIGHT;
 391	for (i = 0; i < cpsw->tx_ch_num; i++) {
 392		ch_rate = cpdma_chan_get_rate(txv[i].ch);
 393		if (ch_rate) {
 394			txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
 395			if (!txv[i].budget)
 396				txv[i].budget++;
 397			if (ch_rate > bigest_rate) {
 398				bigest_rate_ch = i;
 399				bigest_rate = ch_rate;
 400			}
 401
 402			ch_weight = (ch_rate * 100) / max_rate;
 403			if (!ch_weight)
 404				ch_weight++;
 405			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
 406		} else {
 407			txv[i].budget = ch_budget;
 408			if (!bigest_rate_ch)
 409				bigest_rate_ch = i;
 410			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
 411		}
 412
 413		budget -= txv[i].budget;
 414	}
 415
 416	if (budget)
 417		txv[bigest_rate_ch].budget += budget;
 418
 419	/* split rx budget */
 420	budget = NAPI_POLL_WEIGHT;
 421	ch_budget = budget / cpsw->rx_ch_num;
 422	for (i = 0; i < cpsw->rx_ch_num; i++) {
 423		cpsw->rxv[i].budget = ch_budget;
 424		budget -= ch_budget;
 425	}
 426
 427	if (budget)
 428		cpsw->rxv[0].budget += budget;
 429}
 430
 431int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
 432		     int ale_ageout, phys_addr_t desc_mem_phys,
 433		     int descs_pool_size)
 434{
 435	u32 slave_offset, sliver_offset, slave_size;
 436	struct cpsw_ale_params ale_params;
 437	struct cpsw_platform_data *data;
 438	struct cpdma_params dma_params;
 439	struct device *dev = cpsw->dev;
 440	struct device_node *cpts_node;
 441	void __iomem *cpts_regs;
 442	int ret = 0, i;
 443
 444	data = &cpsw->data;
 445	cpsw->rx_ch_num = 1;
 446	cpsw->tx_ch_num = 1;
 447
 448	cpsw->version = readl(&cpsw->regs->id_ver);
 449
 450	memset(&dma_params, 0, sizeof(dma_params));
 451	memset(&ale_params, 0, sizeof(ale_params));
 452
 453	switch (cpsw->version) {
 454	case CPSW_VERSION_1:
 455		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
 456		cpts_regs	     = ss_regs + CPSW1_CPTS_OFFSET;
 457		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
 458		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
 459		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
 460		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
 461		slave_offset         = CPSW1_SLAVE_OFFSET;
 462		slave_size           = CPSW1_SLAVE_SIZE;
 463		sliver_offset        = CPSW1_SLIVER_OFFSET;
 464		dma_params.desc_mem_phys = 0;
 465		break;
 466	case CPSW_VERSION_2:
 467	case CPSW_VERSION_3:
 468	case CPSW_VERSION_4:
 469		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
 470		cpts_regs	     = ss_regs + CPSW2_CPTS_OFFSET;
 471		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
 472		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
 473		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
 474		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
 475		slave_offset         = CPSW2_SLAVE_OFFSET;
 476		slave_size           = CPSW2_SLAVE_SIZE;
 477		sliver_offset        = CPSW2_SLIVER_OFFSET;
 478		dma_params.desc_mem_phys = desc_mem_phys;
 479		break;
 480	default:
 481		dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
 482		return -ENODEV;
 483	}
 484
 485	for (i = 0; i < cpsw->data.slaves; i++) {
 486		struct cpsw_slave *slave = &cpsw->slaves[i];
 487		void __iomem		*regs = cpsw->regs;
 488
 489		slave->slave_num = i;
 490		slave->data	= &cpsw->data.slave_data[i];
 491		slave->regs	= regs + slave_offset;
 492		slave->port_vlan = slave->data->dual_emac_res_vlan;
 493		slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
 494		if (IS_ERR(slave->mac_sl))
 495			return PTR_ERR(slave->mac_sl);
 496
 497		slave_offset  += slave_size;
 498		sliver_offset += SLIVER_SIZE;
 499	}
 500
 501	ale_params.dev			= dev;
 502	ale_params.ale_ageout		= ale_ageout;
 503	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
 504	ale_params.dev_id		= "cpsw";
 505	ale_params.bus_freq		= cpsw->bus_freq_mhz * 1000000;
 506
 507	cpsw->ale = cpsw_ale_create(&ale_params);
 508	if (IS_ERR(cpsw->ale)) {
 509		dev_err(dev, "error initializing ale engine\n");
 510		return PTR_ERR(cpsw->ale);
 511	}
 512
 513	dma_params.dev		= dev;
 514	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
 515	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
 516	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
 517	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
 518	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
 519
 520	dma_params.num_chan		= data->channels;
 521	dma_params.has_soft_reset	= true;
 522	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
 523	dma_params.desc_mem_size	= data->bd_ram_size;
 524	dma_params.desc_align		= 16;
 525	dma_params.has_ext_regs		= true;
 526	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
 527	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
 528	dma_params.descs_pool_size	= descs_pool_size;
 529
 530	cpsw->dma = cpdma_ctlr_create(&dma_params);
 531	if (!cpsw->dma) {
 532		dev_err(dev, "error initializing dma\n");
 533		return -ENOMEM;
 534	}
 535
 536	cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
 537	if (!cpts_node)
 538		cpts_node = cpsw->dev->of_node;
 539
 540	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
 541				 CPTS_N_ETX_TS);
 542	if (IS_ERR(cpsw->cpts)) {
 543		ret = PTR_ERR(cpsw->cpts);
 544		cpdma_ctlr_destroy(cpsw->dma);
 545	}
 546	of_node_put(cpts_node);
 547
 548	return ret;
 549}
 550
 551#if IS_ENABLED(CONFIG_TI_CPTS)
 552
 553static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
 554{
 555	struct cpsw_common *cpsw = priv->cpsw;
 556	struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 557	u32 ts_en, seq_id;
 558
 559	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
 560		slave_write(slave, 0, CPSW1_TS_CTL);
 561		return;
 562	}
 563
 564	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
 565	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
 566
 567	if (priv->tx_ts_enabled)
 568		ts_en |= CPSW_V1_TS_TX_EN;
 569
 570	if (priv->rx_ts_enabled)
 571		ts_en |= CPSW_V1_TS_RX_EN;
 572
 573	slave_write(slave, ts_en, CPSW1_TS_CTL);
 574	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
 575}
 576
 577static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 578{
 579	struct cpsw_common *cpsw = priv->cpsw;
 580	struct cpsw_slave *slave;
 581	u32 ctrl, mtype;
 582
 583	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 584
 585	ctrl = slave_read(slave, CPSW2_CONTROL);
 586	switch (cpsw->version) {
 587	case CPSW_VERSION_2:
 588		ctrl &= ~CTRL_V2_ALL_TS_MASK;
 589
 590		if (priv->tx_ts_enabled)
 591			ctrl |= CTRL_V2_TX_TS_BITS;
 592
 593		if (priv->rx_ts_enabled)
 594			ctrl |= CTRL_V2_RX_TS_BITS;
 595		break;
 596	case CPSW_VERSION_3:
 597	default:
 598		ctrl &= ~CTRL_V3_ALL_TS_MASK;
 599
 600		if (priv->tx_ts_enabled)
 601			ctrl |= CTRL_V3_TX_TS_BITS;
 602
 603		if (priv->rx_ts_enabled)
 604			ctrl |= CTRL_V3_RX_TS_BITS;
 605		break;
 606	}
 607
 608	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
 609
 610	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
 611	slave_write(slave, ctrl, CPSW2_CONTROL);
 612	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
 613	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
 614}
 615
 616static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 617{
 618	struct cpsw_priv *priv = netdev_priv(dev);
 619	struct cpsw_common *cpsw = priv->cpsw;
 620	struct hwtstamp_config cfg;
 621
 622	if (cpsw->version != CPSW_VERSION_1 &&
 623	    cpsw->version != CPSW_VERSION_2 &&
 624	    cpsw->version != CPSW_VERSION_3)
 625		return -EOPNOTSUPP;
 626
 627	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 628		return -EFAULT;
 629
 630	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
 631		return -ERANGE;
 632
 633	switch (cfg.rx_filter) {
 634	case HWTSTAMP_FILTER_NONE:
 635		priv->rx_ts_enabled = 0;
 636		break;
 637	case HWTSTAMP_FILTER_ALL:
 638	case HWTSTAMP_FILTER_NTP_ALL:
 639	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 640	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 641	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 642		return -ERANGE;
 643	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 644	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 645	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 646	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 647	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 648	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 649	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 650	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 651	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 652		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
 653		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 654		break;
 655	default:
 656		return -ERANGE;
 657	}
 658
 659	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
 660
 661	switch (cpsw->version) {
 662	case CPSW_VERSION_1:
 663		cpsw_hwtstamp_v1(priv);
 664		break;
 665	case CPSW_VERSION_2:
 666	case CPSW_VERSION_3:
 667		cpsw_hwtstamp_v2(priv);
 668		break;
 669	default:
 670		WARN_ON(1);
 671	}
 672
 673	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 674}
 675
 676static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 677{
 678	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
 679	struct cpsw_priv *priv = netdev_priv(dev);
 680	struct hwtstamp_config cfg;
 681
 682	if (cpsw->version != CPSW_VERSION_1 &&
 683	    cpsw->version != CPSW_VERSION_2 &&
 684	    cpsw->version != CPSW_VERSION_3)
 685		return -EOPNOTSUPP;
 686
 687	cfg.flags = 0;
 688	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 689	cfg.rx_filter = priv->rx_ts_enabled;
 690
 691	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 692}
 693#else
 694static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 695{
 696	return -EOPNOTSUPP;
 697}
 698
 699static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 700{
 701	return -EOPNOTSUPP;
 702}
 703#endif /*CONFIG_TI_CPTS*/
 704
 705int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 706{
 707	struct cpsw_priv *priv = netdev_priv(dev);
 708	struct cpsw_common *cpsw = priv->cpsw;
 709	int slave_no = cpsw_slave_index(cpsw, priv);
 710	struct phy_device *phy;
 711
 712	if (!netif_running(dev))
 713		return -EINVAL;
 714
 715	phy = cpsw->slaves[slave_no].phy;
 716
 717	if (!phy_has_hwtstamp(phy)) {
 718		switch (cmd) {
 719		case SIOCSHWTSTAMP:
 720			return cpsw_hwtstamp_set(dev, req);
 721		case SIOCGHWTSTAMP:
 722			return cpsw_hwtstamp_get(dev, req);
 723		}
 724	}
 725
 726	if (phy)
 727		return phy_mii_ioctl(phy, req, cmd);
 728
 729	return -EOPNOTSUPP;
 730}
 731
 732int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
 733{
 734	struct cpsw_priv *priv = netdev_priv(ndev);
 735	struct cpsw_common *cpsw = priv->cpsw;
 736	struct cpsw_slave *slave;
 737	u32 min_rate;
 738	u32 ch_rate;
 739	int i, ret;
 740
 741	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
 742	if (ch_rate == rate)
 743		return 0;
 744
 745	ch_rate = rate * 1000;
 746	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
 747	if ((ch_rate < min_rate && ch_rate)) {
 748		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
 749			min_rate);
 750		return -EINVAL;
 751	}
 752
 753	if (rate > cpsw->speed) {
 754		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
 755		return -EINVAL;
 756	}
 757
 758	ret = pm_runtime_resume_and_get(cpsw->dev);
 759	if (ret < 0)
 760		return ret;
 761
 762	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
 763	pm_runtime_put(cpsw->dev);
 764
 765	if (ret)
 766		return ret;
 767
 768	/* update rates for slaves tx queues */
 769	for (i = 0; i < cpsw->data.slaves; i++) {
 770		slave = &cpsw->slaves[i];
 771		if (!slave->ndev)
 772			continue;
 773
 774		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
 775	}
 776
 777	cpsw_split_res(cpsw);
 778	return ret;
 779}
 780
 781static int cpsw_tc_to_fifo(int tc, int num_tc)
 782{
 783	if (tc == num_tc - 1)
 784		return 0;
 785
 786	return CPSW_FIFO_SHAPERS_NUM - tc;
 787}
 788
 789bool cpsw_shp_is_off(struct cpsw_priv *priv)
 790{
 791	struct cpsw_common *cpsw = priv->cpsw;
 792	struct cpsw_slave *slave;
 793	u32 shift, mask, val;
 794
 795	val = readl_relaxed(&cpsw->regs->ptype);
 796
 797	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 798	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
 799	mask = 7 << shift;
 800	val = val & mask;
 801
 802	return !val;
 803}
 804
 805static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
 806{
 807	struct cpsw_common *cpsw = priv->cpsw;
 808	struct cpsw_slave *slave;
 809	u32 shift, mask, val;
 810
 811	val = readl_relaxed(&cpsw->regs->ptype);
 812
 813	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 814	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
 815	mask = (1 << --fifo) << shift;
 816	val = on ? val | mask : val & ~mask;
 817
 818	writel_relaxed(val, &cpsw->regs->ptype);
 819}
 820
 821static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
 822{
 823	struct cpsw_common *cpsw = priv->cpsw;
 824	u32 val = 0, send_pct, shift;
 825	struct cpsw_slave *slave;
 826	int pct = 0, i;
 827
 828	if (bw > priv->shp_cfg_speed * 1000)
 829		goto err;
 830
 831	/* shaping has to stay enabled for highest fifos linearly
 832	 * and fifo bw no more then interface can allow
 833	 */
 834	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 835	send_pct = slave_read(slave, SEND_PERCENT);
 836	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
 837		if (!bw) {
 838			if (i >= fifo || !priv->fifo_bw[i])
 839				continue;
 840
 841			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
 842			continue;
 843		}
 844
 845		if (!priv->fifo_bw[i] && i > fifo) {
 846			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
 847			return -EINVAL;
 848		}
 849
 850		shift = (i - 1) * 8;
 851		if (i == fifo) {
 852			send_pct &= ~(CPSW_PCT_MASK << shift);
 853			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
 854			if (!val)
 855				val = 1;
 856
 857			send_pct |= val << shift;
 858			pct += val;
 859			continue;
 860		}
 861
 862		if (priv->fifo_bw[i])
 863			pct += (send_pct >> shift) & CPSW_PCT_MASK;
 864	}
 865
 866	if (pct >= 100)
 867		goto err;
 868
 869	slave_write(slave, send_pct, SEND_PERCENT);
 870	priv->fifo_bw[fifo] = bw;
 871
 872	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
 873		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
 874
 875	return 0;
 876err:
 877	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
 878	return -EINVAL;
 879}
 880
 881static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
 882{
 883	struct cpsw_common *cpsw = priv->cpsw;
 884	struct cpsw_slave *slave;
 885	u32 tx_in_ctl_rg, val;
 886	int ret;
 887
 888	ret = cpsw_set_fifo_bw(priv, fifo, bw);
 889	if (ret)
 890		return ret;
 891
 892	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 893	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
 894		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
 895
 896	if (!bw)
 897		cpsw_fifo_shp_on(priv, fifo, bw);
 898
 899	val = slave_read(slave, tx_in_ctl_rg);
 900	if (cpsw_shp_is_off(priv)) {
 901		/* disable FIFOs rate limited queues */
 902		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
 903
 904		/* set type of FIFO queues to normal priority mode */
 905		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
 906
 907		/* set type of FIFO queues to be rate limited */
 908		if (bw)
 909			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
 910		else
 911			priv->shp_cfg_speed = 0;
 912	}
 913
 914	/* toggle a FIFO rate limited queue */
 915	if (bw)
 916		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
 917	else
 918		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
 919	slave_write(slave, val, tx_in_ctl_rg);
 920
 921	/* FIFO transmit shape enable */
 922	cpsw_fifo_shp_on(priv, fifo, bw);
 923	return 0;
 924}
 925
 926/* Defaults:
 927 * class A - prio 3
 928 * class B - prio 2
 929 * shaping for class A should be set first
 930 */
 931static int cpsw_set_cbs(struct net_device *ndev,
 932			struct tc_cbs_qopt_offload *qopt)
 933{
 934	struct cpsw_priv *priv = netdev_priv(ndev);
 935	struct cpsw_common *cpsw = priv->cpsw;
 936	struct cpsw_slave *slave;
 937	int prev_speed = 0;
 938	int tc, ret, fifo;
 939	u32 bw = 0;
 940
 941	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
 942
 943	/* enable channels in backward order, as highest FIFOs must be rate
 944	 * limited first and for compliance with CPDMA rate limited channels
 945	 * that also used in bacward order. FIFO0 cannot be rate limited.
 946	 */
 947	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
 948	if (!fifo) {
 949		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
 950		return -EINVAL;
 951	}
 952
 953	/* do nothing, it's disabled anyway */
 954	if (!qopt->enable && !priv->fifo_bw[fifo])
 955		return 0;
 956
 957	/* shapers can be set if link speed is known */
 958	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 959	if (slave->phy && slave->phy->link) {
 960		if (priv->shp_cfg_speed &&
 961		    priv->shp_cfg_speed != slave->phy->speed)
 962			prev_speed = priv->shp_cfg_speed;
 963
 964		priv->shp_cfg_speed = slave->phy->speed;
 965	}
 966
 967	if (!priv->shp_cfg_speed) {
 968		dev_err(priv->dev, "Link speed is not known");
 969		return -1;
 970	}
 971
 972	ret = pm_runtime_resume_and_get(cpsw->dev);
 973	if (ret < 0)
 974		return ret;
 975
 976	bw = qopt->enable ? qopt->idleslope : 0;
 977	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
 978	if (ret) {
 979		priv->shp_cfg_speed = prev_speed;
 980		prev_speed = 0;
 981	}
 982
 983	if (bw && prev_speed)
 984		dev_warn(priv->dev,
 985			 "Speed was changed, CBS shaper speeds are changed!");
 986
 987	pm_runtime_put_sync(cpsw->dev);
 988	return ret;
 989}
 990
 991static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
 992{
 993	struct tc_mqprio_qopt_offload *mqprio = type_data;
 994	struct cpsw_priv *priv = netdev_priv(ndev);
 995	struct cpsw_common *cpsw = priv->cpsw;
 996	int fifo, num_tc, count, offset;
 997	struct cpsw_slave *slave;
 998	u32 tx_prio_map = 0;
 999	int i, tc, ret;
1000
1001	num_tc = mqprio->qopt.num_tc;
1002	if (num_tc > CPSW_TC_NUM)
1003		return -EINVAL;
1004
1005	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1006		return -EINVAL;
1007
1008	ret = pm_runtime_resume_and_get(cpsw->dev);
1009	if (ret < 0)
1010		return ret;
1011
1012	if (num_tc) {
1013		for (i = 0; i < 8; i++) {
1014			tc = mqprio->qopt.prio_tc_map[i];
1015			fifo = cpsw_tc_to_fifo(tc, num_tc);
1016			tx_prio_map |= fifo << (4 * i);
1017		}
1018
1019		netdev_set_num_tc(ndev, num_tc);
1020		for (i = 0; i < num_tc; i++) {
1021			count = mqprio->qopt.count[i];
1022			offset = mqprio->qopt.offset[i];
1023			netdev_set_tc_queue(ndev, i, count, offset);
1024		}
1025	}
1026
1027	if (!mqprio->qopt.hw) {
1028		/* restore default configuration */
1029		netdev_reset_tc(ndev);
1030		tx_prio_map = TX_PRIORITY_MAPPING;
1031	}
1032
1033	priv->mqprio_hw = mqprio->qopt.hw;
1034
1035	offset = cpsw->version == CPSW_VERSION_1 ?
1036		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1037
1038	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1039	slave_write(slave, tx_prio_map, offset);
1040
1041	pm_runtime_put_sync(cpsw->dev);
1042
1043	return 0;
1044}
1045
1046static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
1047
1048int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1049		      void *type_data)
1050{
1051	switch (type) {
1052	case TC_SETUP_QDISC_CBS:
1053		return cpsw_set_cbs(ndev, type_data);
1054
1055	case TC_SETUP_QDISC_MQPRIO:
1056		return cpsw_set_mqprio(ndev, type_data);
1057
1058	case TC_SETUP_BLOCK:
1059		return cpsw_qos_setup_tc_block(ndev, type_data);
1060
1061	default:
1062		return -EOPNOTSUPP;
1063	}
1064}
1065
1066void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1067{
1068	int fifo, bw;
1069
1070	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1071		bw = priv->fifo_bw[fifo];
1072		if (!bw)
1073			continue;
1074
1075		cpsw_set_fifo_rlimit(priv, fifo, bw);
1076	}
1077}
1078
1079void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1080{
1081	struct cpsw_common *cpsw = priv->cpsw;
1082	u32 tx_prio_map = 0;
1083	int i, tc, fifo;
1084	u32 tx_prio_rg;
1085
1086	if (!priv->mqprio_hw)
1087		return;
1088
1089	for (i = 0; i < 8; i++) {
1090		tc = netdev_get_prio_tc_map(priv->ndev, i);
1091		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1092		tx_prio_map |= fifo << (4 * i);
1093	}
1094
1095	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1096		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1097
1098	slave_write(slave, tx_prio_map, tx_prio_rg);
1099}
1100
1101int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1102{
1103	struct cpsw_common *cpsw = priv->cpsw;
1104	struct cpsw_meta_xdp *xmeta;
1105	struct page_pool *pool;
1106	struct page *page;
1107	int ch_buf_num;
1108	int ch, i, ret;
1109	dma_addr_t dma;
1110
1111	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1112		pool = cpsw->page_pool[ch];
1113		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1114		for (i = 0; i < ch_buf_num; i++) {
1115			page = page_pool_dev_alloc_pages(pool);
1116			if (!page) {
1117				cpsw_err(priv, ifup, "allocate rx page err\n");
1118				return -ENOMEM;
1119			}
1120
1121			xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1122			xmeta->ndev = priv->ndev;
1123			xmeta->ch = ch;
1124
1125			dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
1126			ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1127							    page, dma,
1128							    cpsw->rx_packet_max,
1129							    0);
1130			if (ret < 0) {
1131				cpsw_err(priv, ifup,
1132					 "cannot submit page to channel %d rx, error %d\n",
1133					 ch, ret);
1134				page_pool_recycle_direct(pool, page);
1135				return ret;
1136			}
1137		}
1138
1139		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1140			  ch, ch_buf_num);
1141	}
1142
1143	return 0;
1144}
1145
1146static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1147					       int size)
1148{
1149	struct page_pool_params pp_params = {};
1150	struct page_pool *pool;
1151
1152	pp_params.order = 0;
1153	pp_params.flags = PP_FLAG_DMA_MAP;
1154	pp_params.pool_size = size;
1155	pp_params.nid = NUMA_NO_NODE;
1156	pp_params.dma_dir = DMA_BIDIRECTIONAL;
1157	pp_params.dev = cpsw->dev;
1158
1159	pool = page_pool_create(&pp_params);
1160	if (IS_ERR(pool))
1161		dev_err(cpsw->dev, "cannot create rx page pool\n");
1162
1163	return pool;
1164}
1165
1166static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1167{
1168	struct page_pool *pool;
1169	int ret = 0, pool_size;
1170
1171	pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1172	pool = cpsw_create_page_pool(cpsw, pool_size);
1173	if (IS_ERR(pool))
1174		ret = PTR_ERR(pool);
1175	else
1176		cpsw->page_pool[ch] = pool;
1177
1178	return ret;
1179}
1180
1181static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1182{
1183	struct cpsw_common *cpsw = priv->cpsw;
1184	struct xdp_rxq_info *rxq;
1185	struct page_pool *pool;
1186	int ret;
1187
1188	pool = cpsw->page_pool[ch];
1189	rxq = &priv->xdp_rxq[ch];
1190
1191	ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
1192	if (ret)
1193		return ret;
1194
1195	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1196	if (ret)
1197		xdp_rxq_info_unreg(rxq);
1198
1199	return ret;
1200}
1201
1202static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1203{
1204	struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1205
1206	if (!xdp_rxq_info_is_reg(rxq))
1207		return;
1208
1209	xdp_rxq_info_unreg(rxq);
1210}
1211
1212void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1213{
1214	struct net_device *ndev;
1215	int i, ch;
1216
1217	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1218		for (i = 0; i < cpsw->data.slaves; i++) {
1219			ndev = cpsw->slaves[i].ndev;
1220			if (!ndev)
1221				continue;
1222
1223			cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1224		}
1225
1226		page_pool_destroy(cpsw->page_pool[ch]);
1227		cpsw->page_pool[ch] = NULL;
1228	}
1229}
1230
1231int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1232{
1233	struct net_device *ndev;
1234	int i, ch, ret;
1235
1236	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1237		ret = cpsw_create_rx_pool(cpsw, ch);
1238		if (ret)
1239			goto err_cleanup;
1240
1241		/* using same page pool is allowed as no running rx handlers
1242		 * simultaneously for both ndevs
1243		 */
1244		for (i = 0; i < cpsw->data.slaves; i++) {
1245			ndev = cpsw->slaves[i].ndev;
1246			if (!ndev)
1247				continue;
1248
1249			ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1250			if (ret)
1251				goto err_cleanup;
1252		}
1253	}
1254
1255	return 0;
1256
1257err_cleanup:
1258	cpsw_destroy_xdp_rxqs(cpsw);
1259
1260	return ret;
1261}
1262
1263static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1264{
1265	struct bpf_prog *prog = bpf->prog;
1266
1267	if (!priv->xdpi.prog && !prog)
1268		return 0;
1269
1270	WRITE_ONCE(priv->xdp_prog, prog);
1271
1272	xdp_attachment_setup(&priv->xdpi, bpf);
1273
1274	return 0;
1275}
1276
1277int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1278{
1279	struct cpsw_priv *priv = netdev_priv(ndev);
1280
1281	switch (bpf->command) {
1282	case XDP_SETUP_PROG:
1283		return cpsw_xdp_prog_setup(priv, bpf);
1284
1285	default:
1286		return -EINVAL;
1287	}
1288}
1289
1290int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1291		      struct page *page, int port)
1292{
1293	struct cpsw_common *cpsw = priv->cpsw;
1294	struct cpsw_meta_xdp *xmeta;
1295	struct cpdma_chan *txch;
1296	dma_addr_t dma;
1297	int ret;
1298
1299	xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1300	xmeta->ndev = priv->ndev;
1301	xmeta->ch = 0;
1302	txch = cpsw->txv[0].ch;
1303
1304	if (page) {
1305		dma = page_pool_get_dma_addr(page);
1306		dma += xdpf->headroom + sizeof(struct xdp_frame);
1307		ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1308					       dma, xdpf->len, port);
1309	} else {
1310		if (sizeof(*xmeta) > xdpf->headroom)
1311			return -EINVAL;
1312
1313		ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1314					xdpf->data, xdpf->len, port);
1315	}
1316
1317	if (ret)
1318		priv->ndev->stats.tx_dropped++;
1319
1320	return ret;
1321}
1322
1323int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1324		 struct page *page, int port, int *len)
1325{
1326	struct cpsw_common *cpsw = priv->cpsw;
1327	struct net_device *ndev = priv->ndev;
1328	int ret = CPSW_XDP_CONSUMED;
1329	struct xdp_frame *xdpf;
1330	struct bpf_prog *prog;
1331	u32 act;
1332
1333	prog = READ_ONCE(priv->xdp_prog);
1334	if (!prog)
1335		return CPSW_XDP_PASS;
1336
1337	act = bpf_prog_run_xdp(prog, xdp);
1338	/* XDP prog might have changed packet data and boundaries */
1339	*len = xdp->data_end - xdp->data;
1340
1341	switch (act) {
1342	case XDP_PASS:
1343		ret = CPSW_XDP_PASS;
1344		goto out;
1345	case XDP_TX:
1346		xdpf = xdp_convert_buff_to_frame(xdp);
1347		if (unlikely(!xdpf))
1348			goto drop;
1349
1350		if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
1351			xdp_return_frame_rx_napi(xdpf);
1352		break;
1353	case XDP_REDIRECT:
1354		if (xdp_do_redirect(ndev, xdp, prog))
1355			goto drop;
1356
1357		/*  Have to flush here, per packet, instead of doing it in bulk
1358		 *  at the end of the napi handler. The RX devices on this
1359		 *  particular hardware is sharing a common queue, so the
1360		 *  incoming device might change per packet.
1361		 */
1362		xdp_do_flush_map();
1363		break;
1364	default:
1365		bpf_warn_invalid_xdp_action(ndev, prog, act);
1366		fallthrough;
1367	case XDP_ABORTED:
1368		trace_xdp_exception(ndev, prog, act);
1369		fallthrough;	/* handle aborts by dropping packet */
1370	case XDP_DROP:
1371		ndev->stats.rx_bytes += *len;
1372		ndev->stats.rx_packets++;
1373		goto drop;
1374	}
1375
1376	ndev->stats.rx_bytes += *len;
1377	ndev->stats.rx_packets++;
1378out:
1379	return ret;
1380drop:
1381	page_pool_recycle_direct(cpsw->page_pool[ch], page);
1382	return ret;
1383}
1384
1385static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
1386					  struct netlink_ext_ack *extack,
1387					  struct flow_cls_offload *cls,
1388					  u64 rate_pkt_ps)
1389{
1390	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1391	struct flow_dissector *dissector = rule->match.dissector;
1392	static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1393	struct flow_match_eth_addrs match;
1394	u32 port_id;
1395	int ret;
1396
1397	if (dissector->used_keys &
1398	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
1399	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1400	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1401		NL_SET_ERR_MSG_MOD(extack,
1402				   "Unsupported keys used");
1403		return -EOPNOTSUPP;
1404	}
1405
1406	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1407		NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1408		return -EOPNOTSUPP;
1409	}
1410
1411	flow_rule_match_eth_addrs(rule, &match);
1412
1413	if (!is_zero_ether_addr(match.mask->src)) {
1414		NL_SET_ERR_MSG_MOD(extack,
1415				   "Matching on source MAC not supported");
1416		return -EOPNOTSUPP;
1417	}
1418
1419	port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1420
1421	if (is_broadcast_ether_addr(match.key->dst) &&
1422	    is_broadcast_ether_addr(match.mask->dst)) {
1423		ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
1424		if (ret)
1425			return ret;
1426
1427		priv->ale_bc_ratelimit.cookie = cls->cookie;
1428		priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1429	} else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1430		   ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1431		ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
1432		if (ret)
1433			return ret;
1434
1435		priv->ale_mc_ratelimit.cookie = cls->cookie;
1436		priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1437	} else {
1438		NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1439		return -EOPNOTSUPP;
1440	}
1441
1442	return 0;
1443}
1444
1445static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1446					       const struct flow_action_entry *act,
1447					       struct netlink_ext_ack *extack)
1448{
1449	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1450		NL_SET_ERR_MSG_MOD(extack,
1451				   "Offload not supported when exceed action is not drop");
1452		return -EOPNOTSUPP;
1453	}
1454
1455	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1456	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1457		NL_SET_ERR_MSG_MOD(extack,
1458				   "Offload not supported when conform action is not pipe or ok");
1459		return -EOPNOTSUPP;
1460	}
1461
1462	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1463	    !flow_action_is_last_entry(action, act)) {
1464		NL_SET_ERR_MSG_MOD(extack,
1465				   "Offload not supported when conform action is ok, but action is not last");
1466		return -EOPNOTSUPP;
1467	}
1468
1469	if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1470	    act->police.avrate || act->police.overhead) {
1471		NL_SET_ERR_MSG_MOD(extack,
1472				   "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1473		return -EOPNOTSUPP;
1474	}
1475
1476	return 0;
1477}
1478
1479static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1480{
1481	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1482	struct netlink_ext_ack *extack = cls->common.extack;
1483	const struct flow_action_entry *act;
1484	int i, ret;
1485
1486	flow_action_for_each(i, act, &rule->action) {
1487		switch (act->id) {
1488		case FLOW_ACTION_POLICE:
1489			ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1490			if (ret)
1491				return ret;
1492
1493			return cpsw_qos_clsflower_add_policer(priv, extack, cls,
1494							      act->police.rate_pkt_ps);
1495		default:
1496			NL_SET_ERR_MSG_MOD(extack, "Action not supported");
1497			return -EOPNOTSUPP;
1498		}
1499	}
1500	return -EOPNOTSUPP;
1501}
1502
1503static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1504{
1505	u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1506
1507	if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
1508		priv->ale_bc_ratelimit.cookie = 0;
1509		priv->ale_bc_ratelimit.rate_packet_ps = 0;
1510		cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
1511	}
1512
1513	if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
1514		priv->ale_mc_ratelimit.cookie = 0;
1515		priv->ale_mc_ratelimit.rate_packet_ps = 0;
1516		cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
1517	}
1518
1519	return 0;
1520}
1521
1522static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
1523{
1524	switch (cls_flower->command) {
1525	case FLOW_CLS_REPLACE:
1526		return cpsw_qos_configure_clsflower(priv, cls_flower);
1527	case FLOW_CLS_DESTROY:
1528		return cpsw_qos_delete_clsflower(priv, cls_flower);
1529	default:
1530		return -EOPNOTSUPP;
1531	}
1532}
1533
1534static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1535{
1536	struct cpsw_priv *priv = cb_priv;
1537	int ret;
1538
1539	if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
1540		return -EOPNOTSUPP;
1541
1542	ret = pm_runtime_get_sync(priv->dev);
1543	if (ret < 0) {
1544		pm_runtime_put_noidle(priv->dev);
1545		return ret;
1546	}
1547
1548	switch (type) {
1549	case TC_SETUP_CLSFLOWER:
1550		ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
1551		break;
1552	default:
1553		ret = -EOPNOTSUPP;
1554	}
1555
1556	pm_runtime_put(priv->dev);
1557	return ret;
1558}
1559
1560static LIST_HEAD(cpsw_qos_block_cb_list);
1561
1562static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1563{
1564	struct cpsw_priv *priv = netdev_priv(ndev);
1565
1566	return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
1567					  cpsw_qos_setup_tc_block_cb,
1568					  priv, priv, true);
1569}
1570
1571void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
1572{
1573	u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1574
1575	if (priv->ale_bc_ratelimit.cookie)
1576		cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
1577					 priv->ale_bc_ratelimit.rate_packet_ps);
1578
1579	if (priv->ale_mc_ratelimit.cookie)
1580		cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
1581					 priv->ale_mc_ratelimit.rate_packet_ps);
1582}