Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/* Copyright 2019 NXP */
   3
   4#include "enetc.h"
   5
   6#include <net/pkt_sched.h>
   7#include <linux/math64.h>
   8#include <linux/refcount.h>
   9#include <net/pkt_cls.h>
  10#include <net/tc_act/tc_gate.h>
  11
  12static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
  13{
  14	return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
  15}
  16
  17void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
  18{
  19	struct enetc_hw *hw = &priv->si->hw;
  20	u32 old_speed = priv->speed;
  21	u32 pspeed, tmp;
  22
  23	if (speed == old_speed)
  24		return;
  25
  26	switch (speed) {
  27	case SPEED_1000:
  28		pspeed = ENETC_PMR_PSPEED_1000M;
  29		break;
  30	case SPEED_2500:
  31		pspeed = ENETC_PMR_PSPEED_2500M;
  32		break;
  33	case SPEED_100:
  34		pspeed = ENETC_PMR_PSPEED_100M;
  35		break;
  36	case SPEED_10:
  37	default:
  38		pspeed = ENETC_PMR_PSPEED_10M;
  39	}
  40
  41	priv->speed = speed;
  42	tmp = enetc_port_rd(hw, ENETC_PMR);
  43	enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
  44}
  45
  46static int enetc_setup_taprio(struct enetc_ndev_priv *priv,
  47			      struct tc_taprio_qopt_offload *admin_conf)
  48{
 
  49	struct enetc_hw *hw = &priv->si->hw;
  50	struct enetc_cbd cbd = {.cmd = 0};
  51	struct tgs_gcl_conf *gcl_config;
  52	struct tgs_gcl_data *gcl_data;
  53	dma_addr_t dma;
  54	struct gce *gce;
  55	u16 data_size;
  56	u16 gcl_len;
  57	void *tmp;
  58	u32 tge;
  59	int err;
  60	int i;
  61
  62	/* TSD and Qbv are mutually exclusive in hardware */
  63	for (i = 0; i < priv->num_tx_rings; i++)
  64		if (priv->tx_ring[i]->tsd_enable)
  65			return -EBUSY;
  66
  67	if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
  68		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
  69
  70	if (admin_conf->cycle_time > U32_MAX ||
  71	    admin_conf->cycle_time_extension > U32_MAX)
  72		return -EINVAL;
  73
  74	/* Configure the (administrative) gate control list using the
  75	 * control BD descriptor.
  76	 */
  77	gcl_config = &cbd.gcl_conf;
  78	gcl_len = admin_conf->num_entries;
  79
  80	data_size = struct_size(gcl_data, entry, gcl_len);
  81	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
  82				       &dma, (void *)&gcl_data);
  83	if (!tmp)
  84		return -ENOMEM;
  85
  86	gce = (struct gce *)(gcl_data + 1);
  87
  88	/* Set all gates open as default */
  89	gcl_config->atc = 0xff;
  90	gcl_config->acl_len = cpu_to_le16(gcl_len);
  91
  92	gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
  93	gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
  94	gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
  95	gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
  96
  97	for (i = 0; i < gcl_len; i++) {
  98		struct tc_taprio_sched_entry *temp_entry;
  99		struct gce *temp_gce = gce + i;
 100
 101		temp_entry = &admin_conf->entries[i];
 102
 103		temp_gce->gate = (u8)temp_entry->gate_mask;
 104		temp_gce->period = cpu_to_le32(temp_entry->interval);
 105	}
 106
 107	cbd.status_flags = 0;
 108
 109	cbd.cls = BDCR_CMD_PORT_GCL;
 110	cbd.status_flags = 0;
 111
 112	tge = enetc_rd(hw, ENETC_PTGCR);
 113	enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
 114
 115	err = enetc_send_cmd(priv->si, &cbd);
 116	if (err)
 117		enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
 118
 119	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 120
 121	if (err)
 122		return err;
 123
 124	enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
 125	priv->active_offloads |= ENETC_F_QBV;
 126
 127	return 0;
 128}
 129
 130static void enetc_reset_taprio_stats(struct enetc_ndev_priv *priv)
 131{
 132	int i;
 133
 134	for (i = 0; i < priv->num_tx_rings; i++)
 135		priv->tx_ring[i]->stats.win_drop = 0;
 136}
 137
 138static void enetc_reset_taprio(struct enetc_ndev_priv *priv)
 139{
 140	struct enetc_hw *hw = &priv->si->hw;
 141	u32 val;
 142
 143	val = enetc_rd(hw, ENETC_PTGCR);
 144	enetc_wr(hw, ENETC_PTGCR, val & ~ENETC_PTGCR_TGE);
 145	enetc_reset_ptcmsdur(hw);
 146
 147	priv->active_offloads &= ~ENETC_F_QBV;
 148}
 149
 150static void enetc_taprio_destroy(struct net_device *ndev)
 151{
 152	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 153
 154	enetc_reset_taprio(priv);
 155	enetc_reset_tc_mqprio(ndev);
 156	enetc_reset_taprio_stats(priv);
 157}
 158
 159static void enetc_taprio_stats(struct net_device *ndev,
 160			       struct tc_taprio_qopt_stats *stats)
 161{
 
 162	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 163	u64 window_drops = 0;
 
 
 164	int i;
 165
 
 166	for (i = 0; i < priv->num_tx_rings; i++)
 167		window_drops += priv->tx_ring[i]->stats.win_drop;
 168
 169	stats->window_drops = window_drops;
 170}
 171
 172static void enetc_taprio_queue_stats(struct net_device *ndev,
 173				     struct tc_taprio_qopt_queue_stats *queue_stats)
 174{
 175	struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
 176	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 177	int queue = queue_stats->queue;
 178
 179	stats->window_drops = priv->tx_ring[queue]->stats.win_drop;
 180}
 181
 182static int enetc_taprio_replace(struct net_device *ndev,
 183				struct tc_taprio_qopt_offload *offload)
 184{
 185	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 186	int err;
 187
 188	err = enetc_setup_tc_mqprio(ndev, &offload->mqprio);
 189	if (err)
 190		return err;
 191
 192	err = enetc_setup_taprio(priv, offload);
 193	if (err)
 194		enetc_reset_tc_mqprio(ndev);
 195
 196	return err;
 197}
 198
 199int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
 200{
 201	struct tc_taprio_qopt_offload *offload = type_data;
 202	int err = 0;
 
 203
 204	switch (offload->cmd) {
 205	case TAPRIO_CMD_REPLACE:
 206		err = enetc_taprio_replace(ndev, offload);
 207		break;
 208	case TAPRIO_CMD_DESTROY:
 209		enetc_taprio_destroy(ndev);
 210		break;
 211	case TAPRIO_CMD_STATS:
 212		enetc_taprio_stats(ndev, &offload->stats);
 213		break;
 214	case TAPRIO_CMD_QUEUE_STATS:
 215		enetc_taprio_queue_stats(ndev, &offload->queue_stats);
 216		break;
 217	default:
 218		err = -EOPNOTSUPP;
 219	}
 220
 221	return err;
 222}
 223
 224static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
 225{
 226	return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
 227}
 228
 229static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
 230{
 231	return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
 232}
 233
 234int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
 235{
 236	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 237	struct tc_cbs_qopt_offload *cbs = type_data;
 238	u32 port_transmit_rate = priv->speed;
 239	u8 tc_nums = netdev_get_num_tc(ndev);
 240	struct enetc_hw *hw = &priv->si->hw;
 241	u32 hi_credit_bit, hi_credit_reg;
 242	u32 max_interference_size;
 243	u32 port_frame_max_size;
 244	u8 tc = cbs->queue;
 245	u8 prio_top, prio_next;
 246	int bw_sum = 0;
 247	u8 bw;
 248
 249	prio_top = tc_nums - 1;
 250	prio_next = tc_nums - 2;
 251
 252	/* Support highest prio and second prio tc in cbs mode */
 253	if (tc != prio_top && tc != prio_next)
 254		return -EOPNOTSUPP;
 255
 256	if (!cbs->enable) {
 257		/* Make sure the other TC that are numerically
 258		 * lower than this TC have been disabled.
 259		 */
 260		if (tc == prio_top &&
 261		    enetc_get_cbs_enable(hw, prio_next)) {
 262			dev_err(&ndev->dev,
 263				"Disable TC%d before disable TC%d\n",
 264				prio_next, tc);
 265			return -EINVAL;
 266		}
 267
 268		enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
 269		enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
 270
 271		return 0;
 272	}
 273
 274	if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
 275	    cbs->idleslope < 0 || cbs->sendslope > 0)
 276		return -EOPNOTSUPP;
 277
 278	port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 279
 280	bw = cbs->idleslope / (port_transmit_rate * 10UL);
 281
 282	/* Make sure the other TC that are numerically
 283	 * higher than this TC have been enabled.
 284	 */
 285	if (tc == prio_next) {
 286		if (!enetc_get_cbs_enable(hw, prio_top)) {
 287			dev_err(&ndev->dev,
 288				"Enable TC%d first before enable TC%d\n",
 289				prio_top, prio_next);
 290			return -EINVAL;
 291		}
 292		bw_sum += enetc_get_cbs_bw(hw, prio_top);
 293	}
 294
 295	if (bw_sum + bw >= 100) {
 296		dev_err(&ndev->dev,
 297			"The sum of all CBS Bandwidth can't exceed 100\n");
 298		return -EINVAL;
 299	}
 300
 301	enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
 302
 303	/* For top prio TC, the max_interfrence_size is maxSizedFrame.
 304	 *
 305	 * For next prio TC, the max_interfrence_size is calculated as below:
 306	 *
 307	 *      max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
 308	 *
 309	 *	- RA: idleSlope for AVB Class A
 310	 *	- R0: port transmit rate
 311	 *	- M0: maximum sized frame for the port
 312	 *	- MA: maximum sized frame for AVB Class A
 313	 */
 314
 315	if (tc == prio_top) {
 316		max_interference_size = port_frame_max_size * 8;
 317	} else {
 318		u32 m0, ma, r0, ra;
 319
 320		m0 = port_frame_max_size * 8;
 321		ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
 322		ra = enetc_get_cbs_bw(hw, prio_top) *
 323			port_transmit_rate * 10000ULL;
 324		r0 = port_transmit_rate * 1000000ULL;
 325		max_interference_size = m0 + ma +
 326			(u32)div_u64((u64)ra * m0, r0 - ra);
 327	}
 328
 329	/* hiCredit bits calculate by:
 330	 *
 331	 * maxSizedFrame * (idleSlope/portTxRate)
 332	 */
 333	hi_credit_bit = max_interference_size * bw / 100;
 334
 335	/* hiCredit bits to hiCredit register need to calculated as:
 336	 *
 337	 * (enetClockFrequency / portTransmitRate) * 100
 338	 */
 339	hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
 340				     port_transmit_rate * 1000000ULL);
 341
 342	enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
 343
 344	/* Set bw register and enable this traffic class */
 345	enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
 346
 347	return 0;
 348}
 349
 350int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
 351{
 352	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 353	struct tc_etf_qopt_offload *qopt = type_data;
 354	u8 tc_nums = netdev_get_num_tc(ndev);
 355	struct enetc_hw *hw = &priv->si->hw;
 356	int tc;
 357
 358	if (!tc_nums)
 359		return -EOPNOTSUPP;
 360
 361	tc = qopt->queue;
 362
 363	if (tc < 0 || tc >= priv->num_tx_rings)
 364		return -EINVAL;
 365
 366	/* TSD and Qbv are mutually exclusive in hardware */
 367	if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
 368		return -EBUSY;
 369
 370	priv->tx_ring[tc]->tsd_enable = qopt->enable;
 371	enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
 372
 373	return 0;
 374}
 375
 376enum streamid_type {
 377	STREAMID_TYPE_RESERVED = 0,
 378	STREAMID_TYPE_NULL,
 379	STREAMID_TYPE_SMAC,
 380};
 381
 382enum streamid_vlan_tagged {
 383	STREAMID_VLAN_RESERVED = 0,
 384	STREAMID_VLAN_TAGGED,
 385	STREAMID_VLAN_UNTAGGED,
 386	STREAMID_VLAN_ALL,
 387};
 388
 389#define ENETC_PSFP_WILDCARD -1
 390#define HANDLE_OFFSET 100
 391
 392enum forward_type {
 393	FILTER_ACTION_TYPE_PSFP = BIT(0),
 394	FILTER_ACTION_TYPE_ACL = BIT(1),
 395	FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
 396};
 397
 398/* This is for limit output type for input actions */
 399struct actions_fwd {
 400	u64 actions;
 401	u64 keys;	/* include the must needed keys */
 402	enum forward_type output;
 403};
 404
 405struct psfp_streamfilter_counters {
 406	u64 matching_frames_count;
 407	u64 passing_frames_count;
 408	u64 not_passing_frames_count;
 409	u64 passing_sdu_count;
 410	u64 not_passing_sdu_count;
 411	u64 red_frames_count;
 412};
 413
 414struct enetc_streamid {
 415	u32 index;
 416	union {
 417		u8 src_mac[6];
 418		u8 dst_mac[6];
 419	};
 420	u8 filtertype;
 421	u16 vid;
 422	u8 tagged;
 423	s32 handle;
 424};
 425
 426struct enetc_psfp_filter {
 427	u32 index;
 428	s32 handle;
 429	s8 prio;
 430	u32 maxsdu;
 431	u32 gate_id;
 432	s32 meter_id;
 433	refcount_t refcount;
 434	struct hlist_node node;
 435};
 436
 437struct enetc_psfp_gate {
 438	u32 index;
 439	s8 init_ipv;
 440	u64 basetime;
 441	u64 cycletime;
 442	u64 cycletimext;
 443	u32 num_entries;
 444	refcount_t refcount;
 445	struct hlist_node node;
 446	struct action_gate_entry entries[] __counted_by(num_entries);
 447};
 448
 449/* Only enable the green color frame now
 450 * Will add eir and ebs color blind, couple flag etc when
 451 * policing action add more offloading parameters
 452 */
 453struct enetc_psfp_meter {
 454	u32 index;
 455	u32 cir;
 456	u32 cbs;
 457	refcount_t refcount;
 458	struct hlist_node node;
 459};
 460
 461#define ENETC_PSFP_FLAGS_FMI BIT(0)
 462
 463struct enetc_stream_filter {
 464	struct enetc_streamid sid;
 465	u32 sfi_index;
 466	u32 sgi_index;
 467	u32 flags;
 468	u32 fmi_index;
 469	struct flow_stats stats;
 470	struct hlist_node node;
 471};
 472
 473struct enetc_psfp {
 474	unsigned long dev_bitmap;
 475	unsigned long *psfp_sfi_bitmap;
 476	struct hlist_head stream_list;
 477	struct hlist_head psfp_filter_list;
 478	struct hlist_head psfp_gate_list;
 479	struct hlist_head psfp_meter_list;
 480	spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
 481};
 482
 483static struct actions_fwd enetc_act_fwd[] = {
 484	{
 485		BIT(FLOW_ACTION_GATE),
 486		BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
 487		FILTER_ACTION_TYPE_PSFP
 488	},
 489	{
 490		BIT(FLOW_ACTION_POLICE) |
 491		BIT(FLOW_ACTION_GATE),
 492		BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
 493		FILTER_ACTION_TYPE_PSFP
 494	},
 495	/* example for ACL actions */
 496	{
 497		BIT(FLOW_ACTION_DROP),
 498		0,
 499		FILTER_ACTION_TYPE_ACL
 500	}
 501};
 502
 503static struct enetc_psfp epsfp = {
 504	.dev_bitmap = 0,
 505	.psfp_sfi_bitmap = NULL,
 506};
 507
 508static LIST_HEAD(enetc_block_cb_list);
 509
 510/* Stream Identity Entry Set Descriptor */
 511static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
 512				 struct enetc_streamid *sid,
 513				 u8 enable)
 514{
 515	struct enetc_cbd cbd = {.cmd = 0};
 516	struct streamid_data *si_data;
 517	struct streamid_conf *si_conf;
 518	dma_addr_t dma;
 519	u16 data_size;
 520	void *tmp;
 521	int port;
 522	int err;
 523
 524	port = enetc_pf_to_port(priv->si->pdev);
 525	if (port < 0)
 526		return -EINVAL;
 527
 528	if (sid->index >= priv->psfp_cap.max_streamid)
 529		return -EINVAL;
 530
 531	if (sid->filtertype != STREAMID_TYPE_NULL &&
 532	    sid->filtertype != STREAMID_TYPE_SMAC)
 533		return -EOPNOTSUPP;
 534
 535	/* Disable operation before enable */
 536	cbd.index = cpu_to_le16((u16)sid->index);
 537	cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
 538	cbd.status_flags = 0;
 539
 540	data_size = sizeof(struct streamid_data);
 541	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
 542				       &dma, (void *)&si_data);
 543	if (!tmp)
 544		return -ENOMEM;
 545
 546	eth_broadcast_addr(si_data->dmac);
 547	si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
 548			       + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
 549
 550	si_conf = &cbd.sid_set;
 551	/* Only one port supported for one entry, set itself */
 552	si_conf->iports = cpu_to_le32(1 << port);
 553	si_conf->id_type = 1;
 554	si_conf->oui[2] = 0x0;
 555	si_conf->oui[1] = 0x80;
 556	si_conf->oui[0] = 0xC2;
 557
 558	err = enetc_send_cmd(priv->si, &cbd);
 559	if (err)
 560		goto out;
 561
 562	if (!enable)
 563		goto out;
 564
 565	/* Enable the entry overwrite again incase space flushed by hardware */
 566	cbd.status_flags = 0;
 567
 568	si_conf->en = 0x80;
 569	si_conf->stream_handle = cpu_to_le32(sid->handle);
 570	si_conf->iports = cpu_to_le32(1 << port);
 571	si_conf->id_type = sid->filtertype;
 572	si_conf->oui[2] = 0x0;
 573	si_conf->oui[1] = 0x80;
 574	si_conf->oui[0] = 0xC2;
 575
 576	memset(si_data, 0, data_size);
 577
 578	/* VIDM default to be 1.
 579	 * VID Match. If set (b1) then the VID must match, otherwise
 580	 * any VID is considered a match. VIDM setting is only used
 581	 * when TG is set to b01.
 582	 */
 583	if (si_conf->id_type == STREAMID_TYPE_NULL) {
 584		ether_addr_copy(si_data->dmac, sid->dst_mac);
 585		si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
 586				       ((((u16)(sid->tagged) & 0x3) << 14)
 587				       | ENETC_CBDR_SID_VIDM);
 588	} else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
 589		ether_addr_copy(si_data->smac, sid->src_mac);
 590		si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
 591				       ((((u16)(sid->tagged) & 0x3) << 14)
 592				       | ENETC_CBDR_SID_VIDM);
 593	}
 594
 595	err = enetc_send_cmd(priv->si, &cbd);
 596out:
 597	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 598
 599	return err;
 600}
 601
 602/* Stream Filter Instance Set Descriptor */
 603static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
 604				     struct enetc_psfp_filter *sfi,
 605				     u8 enable)
 606{
 607	struct enetc_cbd cbd = {.cmd = 0};
 608	struct sfi_conf *sfi_config;
 609	int port;
 610
 611	port = enetc_pf_to_port(priv->si->pdev);
 612	if (port < 0)
 613		return -EINVAL;
 614
 615	cbd.index = cpu_to_le16(sfi->index);
 616	cbd.cls = BDCR_CMD_STREAM_FILTER;
 617	cbd.status_flags = 0x80;
 618	cbd.length = cpu_to_le16(1);
 619
 620	sfi_config = &cbd.sfi_conf;
 621	if (!enable)
 622		goto exit;
 623
 624	sfi_config->en = 0x80;
 625
 626	if (sfi->handle >= 0) {
 627		sfi_config->stream_handle =
 628			cpu_to_le32(sfi->handle);
 629		sfi_config->sthm |= 0x80;
 630	}
 631
 632	sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
 633	sfi_config->input_ports = cpu_to_le32(1 << port);
 634
 635	/* The priority value which may be matched against the
 636	 * frame’s priority value to determine a match for this entry.
 637	 */
 638	if (sfi->prio >= 0)
 639		sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
 640
 641	/* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
 642	 * field as being either an MSDU value or an index into the Flow
 643	 * Meter Instance table.
 644	 */
 645	if (sfi->maxsdu) {
 646		sfi_config->msdu =
 647		cpu_to_le16(sfi->maxsdu);
 648		sfi_config->multi |= 0x40;
 649	}
 650
 651	if (sfi->meter_id >= 0) {
 652		sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
 653		sfi_config->multi |= 0x80;
 654	}
 655
 656exit:
 657	return enetc_send_cmd(priv->si, &cbd);
 658}
 659
 660static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
 661				      u32 index,
 662				      struct psfp_streamfilter_counters *cnt)
 663{
 664	struct enetc_cbd cbd = { .cmd = 2 };
 665	struct sfi_counter_data *data_buf;
 666	dma_addr_t dma;
 667	u16 data_size;
 668	void *tmp;
 669	int err;
 670
 671	cbd.index = cpu_to_le16((u16)index);
 672	cbd.cmd = 2;
 673	cbd.cls = BDCR_CMD_STREAM_FILTER;
 674	cbd.status_flags = 0;
 675
 676	data_size = sizeof(struct sfi_counter_data);
 677
 678	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
 679				       &dma, (void *)&data_buf);
 680	if (!tmp)
 681		return -ENOMEM;
 682
 683	err = enetc_send_cmd(priv->si, &cbd);
 684	if (err)
 685		goto exit;
 686
 687	cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
 688				     data_buf->matchl;
 689
 690	cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
 691				     data_buf->msdu_dropl;
 692
 693	cnt->passing_sdu_count = cnt->matching_frames_count
 694				- cnt->not_passing_sdu_count;
 695
 696	cnt->not_passing_frames_count =
 697				((u64)data_buf->stream_gate_droph << 32) +
 698				data_buf->stream_gate_dropl;
 699
 700	cnt->passing_frames_count = cnt->matching_frames_count -
 701				    cnt->not_passing_sdu_count -
 702				    cnt->not_passing_frames_count;
 703
 704	cnt->red_frames_count =	((u64)data_buf->flow_meter_droph << 32)	+
 705				data_buf->flow_meter_dropl;
 706
 707exit:
 708	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 709
 710	return err;
 711}
 712
 713static u64 get_ptp_now(struct enetc_hw *hw)
 714{
 715	u64 now_lo, now_hi, now;
 716
 717	now_lo = enetc_rd(hw, ENETC_SICTR0);
 718	now_hi = enetc_rd(hw, ENETC_SICTR1);
 719	now = now_lo | now_hi << 32;
 720
 721	return now;
 722}
 723
 724static int get_start_ns(u64 now, u64 cycle, u64 *start)
 725{
 726	u64 n;
 727
 728	if (!cycle)
 729		return -EFAULT;
 730
 731	n = div64_u64(now, cycle);
 732
 733	*start = (n + 1) * cycle;
 734
 735	return 0;
 736}
 737
 738/* Stream Gate Instance Set Descriptor */
 739static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
 740				   struct enetc_psfp_gate *sgi,
 741				   u8 enable)
 742{
 743	struct enetc_cbd cbd = { .cmd = 0 };
 744	struct sgi_table *sgi_config;
 745	struct sgcl_conf *sgcl_config;
 746	struct sgcl_data *sgcl_data;
 747	struct sgce *sgce;
 748	dma_addr_t dma;
 749	u16 data_size;
 750	int err, i;
 751	void *tmp;
 752	u64 now;
 753
 754	cbd.index = cpu_to_le16(sgi->index);
 755	cbd.cmd = 0;
 756	cbd.cls = BDCR_CMD_STREAM_GCL;
 757	cbd.status_flags = 0x80;
 758
 759	/* disable */
 760	if (!enable)
 761		return enetc_send_cmd(priv->si, &cbd);
 762
 763	if (!sgi->num_entries)
 764		return 0;
 765
 766	if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
 767	    !sgi->cycletime)
 768		return -EINVAL;
 769
 770	/* enable */
 771	sgi_config = &cbd.sgi_table;
 772
 773	/* Keep open before gate list start */
 774	sgi_config->ocgtst = 0x80;
 775
 776	sgi_config->oipv = (sgi->init_ipv < 0) ?
 777				0x0 : ((sgi->init_ipv & 0x7) | 0x8);
 778
 779	sgi_config->en = 0x80;
 780
 781	/* Basic config */
 782	err = enetc_send_cmd(priv->si, &cbd);
 783	if (err)
 784		return -EINVAL;
 785
 786	memset(&cbd, 0, sizeof(cbd));
 787
 788	cbd.index = cpu_to_le16(sgi->index);
 789	cbd.cmd = 1;
 790	cbd.cls = BDCR_CMD_STREAM_GCL;
 791	cbd.status_flags = 0;
 792
 793	sgcl_config = &cbd.sgcl_conf;
 794
 795	sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
 796
 797	data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
 798	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
 799				       &dma, (void *)&sgcl_data);
 800	if (!tmp)
 801		return -ENOMEM;
 802
 803	sgce = &sgcl_data->sgcl[0];
 804
 805	sgcl_config->agtst = 0x80;
 806
 807	sgcl_data->ct = sgi->cycletime;
 808	sgcl_data->cte = sgi->cycletimext;
 809
 810	if (sgi->init_ipv >= 0)
 811		sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
 812
 813	for (i = 0; i < sgi->num_entries; i++) {
 814		struct action_gate_entry *from = &sgi->entries[i];
 815		struct sgce *to = &sgce[i];
 816
 817		if (from->gate_state)
 818			to->multi |= 0x10;
 819
 820		if (from->ipv >= 0)
 821			to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
 822
 823		if (from->maxoctets >= 0) {
 824			to->multi |= 0x01;
 825			to->msdu[0] = from->maxoctets & 0xFF;
 826			to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
 827			to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
 828		}
 829
 830		to->interval = from->interval;
 831	}
 832
 833	/* If basetime is less than now, calculate start time */
 834	now = get_ptp_now(&priv->si->hw);
 835
 836	if (sgi->basetime < now) {
 837		u64 start;
 838
 839		err = get_start_ns(now, sgi->cycletime, &start);
 840		if (err)
 841			goto exit;
 842		sgcl_data->btl = lower_32_bits(start);
 843		sgcl_data->bth = upper_32_bits(start);
 844	} else {
 845		u32 hi, lo;
 846
 847		hi = upper_32_bits(sgi->basetime);
 848		lo = lower_32_bits(sgi->basetime);
 849		sgcl_data->bth = hi;
 850		sgcl_data->btl = lo;
 851	}
 852
 853	err = enetc_send_cmd(priv->si, &cbd);
 854
 855exit:
 856	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 857	return err;
 858}
 859
 860static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
 861				  struct enetc_psfp_meter *fmi,
 862				  u8 enable)
 863{
 864	struct enetc_cbd cbd = { .cmd = 0 };
 865	struct fmi_conf *fmi_config;
 866	u64 temp = 0;
 867
 868	cbd.index = cpu_to_le16((u16)fmi->index);
 869	cbd.cls = BDCR_CMD_FLOW_METER;
 870	cbd.status_flags = 0x80;
 871
 872	if (!enable)
 873		return enetc_send_cmd(priv->si, &cbd);
 874
 875	fmi_config = &cbd.fmi_conf;
 876	fmi_config->en = 0x80;
 877
 878	if (fmi->cir) {
 879		temp = (u64)8000 * fmi->cir;
 880		temp = div_u64(temp, 3725);
 881	}
 882
 883	fmi_config->cir = cpu_to_le32((u32)temp);
 884	fmi_config->cbs = cpu_to_le32(fmi->cbs);
 885
 886	/* Default for eir ebs disable */
 887	fmi_config->eir = 0;
 888	fmi_config->ebs = 0;
 889
 890	/* Default:
 891	 * mark red disable
 892	 * drop on yellow disable
 893	 * color mode disable
 894	 * couple flag disable
 895	 */
 896	fmi_config->conf = 0;
 897
 898	return enetc_send_cmd(priv->si, &cbd);
 899}
 900
 901static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
 902{
 903	struct enetc_stream_filter *f;
 904
 905	hlist_for_each_entry(f, &epsfp.stream_list, node)
 906		if (f->sid.index == index)
 907			return f;
 908
 909	return NULL;
 910}
 911
 912static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
 913{
 914	struct enetc_psfp_gate *g;
 915
 916	hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
 917		if (g->index == index)
 918			return g;
 919
 920	return NULL;
 921}
 922
 923static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
 924{
 925	struct enetc_psfp_filter *s;
 926
 927	hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
 928		if (s->index == index)
 929			return s;
 930
 931	return NULL;
 932}
 933
 934static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
 935{
 936	struct enetc_psfp_meter *m;
 937
 938	hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
 939		if (m->index == index)
 940			return m;
 941
 942	return NULL;
 943}
 944
 945static struct enetc_psfp_filter
 946	*enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
 947{
 948	struct enetc_psfp_filter *s;
 949
 950	hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
 951		if (s->gate_id == sfi->gate_id &&
 952		    s->prio == sfi->prio &&
 953		    s->maxsdu == sfi->maxsdu &&
 954		    s->meter_id == sfi->meter_id)
 955			return s;
 956
 957	return NULL;
 958}
 959
 960static int enetc_get_free_index(struct enetc_ndev_priv *priv)
 961{
 962	u32 max_size = priv->psfp_cap.max_psfp_filter;
 963	unsigned long index;
 964
 965	index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
 966	if (index == max_size)
 967		return -1;
 968
 969	return index;
 970}
 971
 972static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
 973{
 974	struct enetc_psfp_filter *sfi;
 975	u8 z;
 976
 977	sfi = enetc_get_filter_by_index(index);
 978	WARN_ON(!sfi);
 979	z = refcount_dec_and_test(&sfi->refcount);
 980
 981	if (z) {
 982		enetc_streamfilter_hw_set(priv, sfi, false);
 983		hlist_del(&sfi->node);
 984		kfree(sfi);
 985		clear_bit(index, epsfp.psfp_sfi_bitmap);
 986	}
 987}
 988
 989static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
 990{
 991	struct enetc_psfp_gate *sgi;
 992	u8 z;
 993
 994	sgi = enetc_get_gate_by_index(index);
 995	WARN_ON(!sgi);
 996	z = refcount_dec_and_test(&sgi->refcount);
 997	if (z) {
 998		enetc_streamgate_hw_set(priv, sgi, false);
 999		hlist_del(&sgi->node);
1000		kfree(sgi);
1001	}
1002}
1003
1004static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
1005{
1006	struct enetc_psfp_meter *fmi;
1007	u8 z;
1008
1009	fmi = enetc_get_meter_by_index(index);
1010	WARN_ON(!fmi);
1011	z = refcount_dec_and_test(&fmi->refcount);
1012	if (z) {
1013		enetc_flowmeter_hw_set(priv, fmi, false);
1014		hlist_del(&fmi->node);
1015		kfree(fmi);
1016	}
1017}
1018
1019static void remove_one_chain(struct enetc_ndev_priv *priv,
1020			     struct enetc_stream_filter *filter)
1021{
1022	if (filter->flags & ENETC_PSFP_FLAGS_FMI)
1023		flow_meter_unref(priv, filter->fmi_index);
1024
1025	stream_gate_unref(priv, filter->sgi_index);
1026	stream_filter_unref(priv, filter->sfi_index);
1027
1028	hlist_del(&filter->node);
1029	kfree(filter);
1030}
1031
1032static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
1033			     struct enetc_streamid *sid,
1034			     struct enetc_psfp_filter *sfi,
1035			     struct enetc_psfp_gate *sgi,
1036			     struct enetc_psfp_meter *fmi)
1037{
1038	int err;
1039
1040	err = enetc_streamid_hw_set(priv, sid, true);
1041	if (err)
1042		return err;
1043
1044	if (sfi) {
1045		err = enetc_streamfilter_hw_set(priv, sfi, true);
1046		if (err)
1047			goto revert_sid;
1048	}
1049
1050	err = enetc_streamgate_hw_set(priv, sgi, true);
1051	if (err)
1052		goto revert_sfi;
1053
1054	if (fmi) {
1055		err = enetc_flowmeter_hw_set(priv, fmi, true);
1056		if (err)
1057			goto revert_sgi;
1058	}
1059
1060	return 0;
1061
1062revert_sgi:
1063	enetc_streamgate_hw_set(priv, sgi, false);
1064revert_sfi:
1065	if (sfi)
1066		enetc_streamfilter_hw_set(priv, sfi, false);
1067revert_sid:
1068	enetc_streamid_hw_set(priv, sid, false);
1069	return err;
1070}
1071
1072static struct actions_fwd *
1073enetc_check_flow_actions(u64 acts, unsigned long long inputkeys)
1074{
1075	int i;
1076
1077	for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
1078		if (acts == enetc_act_fwd[i].actions &&
1079		    inputkeys & enetc_act_fwd[i].keys)
1080			return &enetc_act_fwd[i];
1081
1082	return NULL;
1083}
1084
1085static int enetc_psfp_policer_validate(const struct flow_action *action,
1086				       const struct flow_action_entry *act,
1087				       struct netlink_ext_ack *extack)
1088{
1089	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1090		NL_SET_ERR_MSG_MOD(extack,
1091				   "Offload not supported when exceed action is not drop");
1092		return -EOPNOTSUPP;
1093	}
1094
1095	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1096	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1097		NL_SET_ERR_MSG_MOD(extack,
1098				   "Offload not supported when conform action is not pipe or ok");
1099		return -EOPNOTSUPP;
1100	}
1101
1102	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1103	    !flow_action_is_last_entry(action, act)) {
1104		NL_SET_ERR_MSG_MOD(extack,
1105				   "Offload not supported when conform action is ok, but action is not last");
1106		return -EOPNOTSUPP;
1107	}
1108
1109	if (act->police.peakrate_bytes_ps ||
1110	    act->police.avrate || act->police.overhead) {
1111		NL_SET_ERR_MSG_MOD(extack,
1112				   "Offload not supported when peakrate/avrate/overhead is configured");
1113		return -EOPNOTSUPP;
1114	}
1115
1116	if (act->police.rate_pkt_ps) {
1117		NL_SET_ERR_MSG_MOD(extack,
1118				   "QoS offload not support packets per second");
1119		return -EOPNOTSUPP;
1120	}
1121
1122	return 0;
1123}
1124
1125static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
1126				      struct flow_cls_offload *f)
1127{
1128	struct flow_action_entry *entryg = NULL, *entryp = NULL;
1129	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1130	struct netlink_ext_ack *extack = f->common.extack;
1131	struct enetc_stream_filter *filter, *old_filter;
1132	struct enetc_psfp_meter *fmi = NULL, *old_fmi;
1133	struct enetc_psfp_filter *sfi, *old_sfi;
1134	struct enetc_psfp_gate *sgi, *old_sgi;
1135	struct flow_action_entry *entry;
1136	struct action_gate_entry *e;
1137	u8 sfi_overwrite = 0;
1138	int entries_size;
1139	int i, err;
1140
1141	if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1142		NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1143		return -ENOSPC;
1144	}
1145
1146	flow_action_for_each(i, entry, &rule->action)
1147		if (entry->id == FLOW_ACTION_GATE)
1148			entryg = entry;
1149		else if (entry->id == FLOW_ACTION_POLICE)
1150			entryp = entry;
1151
1152	/* Not support without gate action */
1153	if (!entryg)
1154		return -EINVAL;
1155
1156	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
1157	if (!filter)
1158		return -ENOMEM;
1159
1160	filter->sid.index = f->common.chain_index;
1161
1162	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1163		struct flow_match_eth_addrs match;
1164
1165		flow_rule_match_eth_addrs(rule, &match);
1166
1167		if (!is_zero_ether_addr(match.mask->dst) &&
1168		    !is_zero_ether_addr(match.mask->src)) {
1169			NL_SET_ERR_MSG_MOD(extack,
1170					   "Cannot match on both source and destination MAC");
1171			err = -EINVAL;
1172			goto free_filter;
1173		}
1174
1175		if (!is_zero_ether_addr(match.mask->dst)) {
1176			if (!is_broadcast_ether_addr(match.mask->dst)) {
1177				NL_SET_ERR_MSG_MOD(extack,
1178						   "Masked matching on destination MAC not supported");
1179				err = -EINVAL;
1180				goto free_filter;
1181			}
1182			ether_addr_copy(filter->sid.dst_mac, match.key->dst);
1183			filter->sid.filtertype = STREAMID_TYPE_NULL;
1184		}
1185
1186		if (!is_zero_ether_addr(match.mask->src)) {
1187			if (!is_broadcast_ether_addr(match.mask->src)) {
1188				NL_SET_ERR_MSG_MOD(extack,
1189						   "Masked matching on source MAC not supported");
1190				err = -EINVAL;
1191				goto free_filter;
1192			}
1193			ether_addr_copy(filter->sid.src_mac, match.key->src);
1194			filter->sid.filtertype = STREAMID_TYPE_SMAC;
1195		}
1196	} else {
1197		NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
1198		err = -EINVAL;
1199		goto free_filter;
1200	}
1201
1202	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1203		struct flow_match_vlan match;
1204
1205		flow_rule_match_vlan(rule, &match);
1206		if (match.mask->vlan_priority) {
1207			if (match.mask->vlan_priority !=
1208			    (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
1209				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
1210				err = -EINVAL;
1211				goto free_filter;
1212			}
1213		}
1214
1215		if (match.mask->vlan_id) {
1216			if (match.mask->vlan_id != VLAN_VID_MASK) {
1217				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
1218				err = -EINVAL;
1219				goto free_filter;
1220			}
1221
1222			filter->sid.vid = match.key->vlan_id;
1223			if (!filter->sid.vid)
1224				filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
1225			else
1226				filter->sid.tagged = STREAMID_VLAN_TAGGED;
1227		}
1228	} else {
1229		filter->sid.tagged = STREAMID_VLAN_ALL;
1230	}
1231
1232	/* parsing gate action */
1233	if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
1234		NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1235		err = -ENOSPC;
1236		goto free_filter;
1237	}
1238
1239	if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
1240		NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1241		err = -ENOSPC;
1242		goto free_filter;
1243	}
1244
1245	entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
1246	sgi = kzalloc(entries_size, GFP_KERNEL);
1247	if (!sgi) {
1248		err = -ENOMEM;
1249		goto free_filter;
1250	}
1251
1252	refcount_set(&sgi->refcount, 1);
1253	sgi->index = entryg->hw_index;
1254	sgi->init_ipv = entryg->gate.prio;
1255	sgi->basetime = entryg->gate.basetime;
1256	sgi->cycletime = entryg->gate.cycletime;
1257	sgi->num_entries = entryg->gate.num_entries;
1258
1259	e = sgi->entries;
1260	for (i = 0; i < entryg->gate.num_entries; i++) {
1261		e[i].gate_state = entryg->gate.entries[i].gate_state;
1262		e[i].interval = entryg->gate.entries[i].interval;
1263		e[i].ipv = entryg->gate.entries[i].ipv;
1264		e[i].maxoctets = entryg->gate.entries[i].maxoctets;
1265	}
1266
1267	filter->sgi_index = sgi->index;
1268
1269	sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
1270	if (!sfi) {
1271		err = -ENOMEM;
1272		goto free_gate;
1273	}
1274
1275	refcount_set(&sfi->refcount, 1);
1276	sfi->gate_id = sgi->index;
1277	sfi->meter_id = ENETC_PSFP_WILDCARD;
1278
1279	/* Flow meter and max frame size */
1280	if (entryp) {
1281		err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
1282		if (err)
1283			goto free_sfi;
1284
1285		if (entryp->police.burst) {
1286			fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
1287			if (!fmi) {
1288				err = -ENOMEM;
1289				goto free_sfi;
1290			}
1291			refcount_set(&fmi->refcount, 1);
1292			fmi->cir = entryp->police.rate_bytes_ps;
1293			fmi->cbs = entryp->police.burst;
1294			fmi->index = entryp->hw_index;
1295			filter->flags |= ENETC_PSFP_FLAGS_FMI;
1296			filter->fmi_index = fmi->index;
1297			sfi->meter_id = fmi->index;
1298		}
1299
1300		if (entryp->police.mtu)
1301			sfi->maxsdu = entryp->police.mtu;
1302	}
1303
1304	/* prio ref the filter prio */
1305	if (f->common.prio && f->common.prio <= BIT(3))
1306		sfi->prio = f->common.prio - 1;
1307	else
1308		sfi->prio = ENETC_PSFP_WILDCARD;
1309
1310	old_sfi = enetc_psfp_check_sfi(sfi);
1311	if (!old_sfi) {
1312		int index;
1313
1314		index = enetc_get_free_index(priv);
1315		if (index < 0) {
1316			NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
1317			err = -ENOSPC;
1318			goto free_fmi;
1319		}
1320
1321		sfi->index = index;
1322		sfi->handle = index + HANDLE_OFFSET;
1323		/* Update the stream filter handle also */
1324		filter->sid.handle = sfi->handle;
1325		filter->sfi_index = sfi->index;
1326		sfi_overwrite = 0;
1327	} else {
1328		filter->sfi_index = old_sfi->index;
1329		filter->sid.handle = old_sfi->handle;
1330		sfi_overwrite = 1;
1331	}
1332
1333	err = enetc_psfp_hw_set(priv, &filter->sid,
1334				sfi_overwrite ? NULL : sfi, sgi, fmi);
1335	if (err)
1336		goto free_fmi;
1337
1338	spin_lock(&epsfp.psfp_lock);
1339	if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
1340		old_fmi = enetc_get_meter_by_index(filter->fmi_index);
1341		if (old_fmi) {
1342			fmi->refcount = old_fmi->refcount;
1343			refcount_set(&fmi->refcount,
1344				     refcount_read(&old_fmi->refcount) + 1);
1345			hlist_del(&old_fmi->node);
1346			kfree(old_fmi);
1347		}
1348		hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
1349	}
1350
1351	/* Remove the old node if exist and update with a new node */
1352	old_sgi = enetc_get_gate_by_index(filter->sgi_index);
1353	if (old_sgi) {
1354		refcount_set(&sgi->refcount,
1355			     refcount_read(&old_sgi->refcount) + 1);
1356		hlist_del(&old_sgi->node);
1357		kfree(old_sgi);
1358	}
1359
1360	hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
1361
1362	if (!old_sfi) {
1363		hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
1364		set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
1365	} else {
1366		kfree(sfi);
1367		refcount_inc(&old_sfi->refcount);
1368	}
1369
1370	old_filter = enetc_get_stream_by_index(filter->sid.index);
1371	if (old_filter)
1372		remove_one_chain(priv, old_filter);
1373
1374	filter->stats.lastused = jiffies;
1375	hlist_add_head(&filter->node, &epsfp.stream_list);
1376
1377	spin_unlock(&epsfp.psfp_lock);
1378
1379	return 0;
1380
1381free_fmi:
1382	kfree(fmi);
1383free_sfi:
1384	kfree(sfi);
1385free_gate:
1386	kfree(sgi);
1387free_filter:
1388	kfree(filter);
1389
1390	return err;
1391}
1392
1393static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
1394				  struct flow_cls_offload *cls_flower)
1395{
1396	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1397	struct netlink_ext_ack *extack = cls_flower->common.extack;
1398	struct flow_dissector *dissector = rule->match.dissector;
1399	struct flow_action *action = &rule->action;
1400	struct flow_action_entry *entry;
1401	struct actions_fwd *fwd;
1402	u64 actions = 0;
1403	int i, err;
1404
1405	if (!flow_action_has_entries(action)) {
1406		NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
1407		return -EINVAL;
1408	}
1409
1410	flow_action_for_each(i, entry, action)
1411		actions |= BIT(entry->id);
1412
1413	fwd = enetc_check_flow_actions(actions, dissector->used_keys);
1414	if (!fwd) {
1415		NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
1416		return -EOPNOTSUPP;
1417	}
1418
1419	if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
1420		err = enetc_psfp_parse_clsflower(priv, cls_flower);
1421		if (err) {
1422			NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
1423			return err;
1424		}
1425	} else {
1426		NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
1427		return -EOPNOTSUPP;
1428	}
1429
1430	return 0;
1431}
1432
1433static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
1434					struct flow_cls_offload *f)
1435{
1436	struct enetc_stream_filter *filter;
1437	struct netlink_ext_ack *extack = f->common.extack;
1438	int err;
1439
1440	if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1441		NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1442		return -ENOSPC;
1443	}
1444
1445	filter = enetc_get_stream_by_index(f->common.chain_index);
1446	if (!filter)
1447		return -EINVAL;
1448
1449	err = enetc_streamid_hw_set(priv, &filter->sid, false);
1450	if (err)
1451		return err;
1452
1453	remove_one_chain(priv, filter);
1454
1455	return 0;
1456}
1457
1458static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
1459				   struct flow_cls_offload *f)
1460{
1461	return enetc_psfp_destroy_clsflower(priv, f);
1462}
1463
1464static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
1465				struct flow_cls_offload *f)
1466{
1467	struct psfp_streamfilter_counters counters = {};
1468	struct enetc_stream_filter *filter;
1469	struct flow_stats stats = {};
1470	int err;
1471
1472	filter = enetc_get_stream_by_index(f->common.chain_index);
1473	if (!filter)
1474		return -EINVAL;
1475
1476	err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
1477	if (err)
1478		return -EINVAL;
1479
1480	spin_lock(&epsfp.psfp_lock);
1481	stats.pkts = counters.matching_frames_count +
1482		     counters.not_passing_sdu_count -
1483		     filter->stats.pkts;
1484	stats.drops = counters.not_passing_frames_count +
1485		      counters.not_passing_sdu_count +
1486		      counters.red_frames_count -
1487		      filter->stats.drops;
1488	stats.lastused = filter->stats.lastused;
1489	filter->stats.pkts += stats.pkts;
1490	filter->stats.drops += stats.drops;
1491	spin_unlock(&epsfp.psfp_lock);
1492
1493	flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
1494			  stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
1495
1496	return 0;
1497}
1498
1499static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
1500				     struct flow_cls_offload *cls_flower)
1501{
1502	switch (cls_flower->command) {
1503	case FLOW_CLS_REPLACE:
1504		return enetc_config_clsflower(priv, cls_flower);
1505	case FLOW_CLS_DESTROY:
1506		return enetc_destroy_clsflower(priv, cls_flower);
1507	case FLOW_CLS_STATS:
1508		return enetc_psfp_get_stats(priv, cls_flower);
1509	default:
1510		return -EOPNOTSUPP;
1511	}
1512}
1513
1514static inline void clean_psfp_sfi_bitmap(void)
1515{
1516	bitmap_free(epsfp.psfp_sfi_bitmap);
1517	epsfp.psfp_sfi_bitmap = NULL;
1518}
1519
1520static void clean_stream_list(void)
1521{
1522	struct enetc_stream_filter *s;
1523	struct hlist_node *tmp;
1524
1525	hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
1526		hlist_del(&s->node);
1527		kfree(s);
1528	}
1529}
1530
1531static void clean_sfi_list(void)
1532{
1533	struct enetc_psfp_filter *sfi;
1534	struct hlist_node *tmp;
1535
1536	hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
1537		hlist_del(&sfi->node);
1538		kfree(sfi);
1539	}
1540}
1541
1542static void clean_sgi_list(void)
1543{
1544	struct enetc_psfp_gate *sgi;
1545	struct hlist_node *tmp;
1546
1547	hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
1548		hlist_del(&sgi->node);
1549		kfree(sgi);
1550	}
1551}
1552
1553static void clean_psfp_all(void)
1554{
1555	/* Disable all list nodes and free all memory */
1556	clean_sfi_list();
1557	clean_sgi_list();
1558	clean_stream_list();
1559	epsfp.dev_bitmap = 0;
1560	clean_psfp_sfi_bitmap();
1561}
1562
1563int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1564			    void *cb_priv)
1565{
1566	struct net_device *ndev = cb_priv;
1567
1568	if (!tc_can_offload(ndev))
1569		return -EOPNOTSUPP;
1570
1571	switch (type) {
1572	case TC_SETUP_CLSFLOWER:
1573		return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
1574	default:
1575		return -EOPNOTSUPP;
1576	}
1577}
1578
1579int enetc_set_psfp(struct net_device *ndev, bool en)
1580{
1581	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1582	int err;
1583
1584	if (en) {
1585		err = enetc_psfp_enable(priv);
1586		if (err)
1587			return err;
1588
1589		priv->active_offloads |= ENETC_F_QCI;
1590		return 0;
1591	}
1592
1593	err = enetc_psfp_disable(priv);
1594	if (err)
1595		return err;
1596
1597	priv->active_offloads &= ~ENETC_F_QCI;
1598
1599	return 0;
1600}
1601
1602int enetc_psfp_init(struct enetc_ndev_priv *priv)
1603{
1604	if (epsfp.psfp_sfi_bitmap)
1605		return 0;
1606
1607	epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
1608					      GFP_KERNEL);
1609	if (!epsfp.psfp_sfi_bitmap)
1610		return -ENOMEM;
1611
1612	spin_lock_init(&epsfp.psfp_lock);
1613
1614	if (list_empty(&enetc_block_cb_list))
1615		epsfp.dev_bitmap = 0;
1616
1617	return 0;
1618}
1619
1620int enetc_psfp_clean(struct enetc_ndev_priv *priv)
1621{
1622	if (!list_empty(&enetc_block_cb_list))
1623		return -EBUSY;
1624
1625	clean_psfp_all();
1626
1627	return 0;
1628}
1629
1630int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
1631{
1632	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1633	struct flow_block_offload *f = type_data;
1634	int port, err;
1635
1636	err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
1637					 enetc_setup_tc_block_cb,
1638					 ndev, ndev, true);
1639	if (err)
1640		return err;
1641
1642	switch (f->command) {
1643	case FLOW_BLOCK_BIND:
1644		port = enetc_pf_to_port(priv->si->pdev);
1645		if (port < 0)
1646			return -EINVAL;
1647
1648		set_bit(port, &epsfp.dev_bitmap);
1649		break;
1650	case FLOW_BLOCK_UNBIND:
1651		port = enetc_pf_to_port(priv->si->pdev);
1652		if (port < 0)
1653			return -EINVAL;
1654
1655		clear_bit(port, &epsfp.dev_bitmap);
1656		if (!epsfp.dev_bitmap)
1657			clean_psfp_all();
1658		break;
1659	}
1660
1661	return 0;
1662}
1663
1664int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
1665{
1666	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1667	struct tc_query_caps_base *base = type_data;
1668	struct enetc_si *si = priv->si;
1669
1670	switch (base->type) {
1671	case TC_SETUP_QDISC_MQPRIO: {
1672		struct tc_mqprio_caps *caps = base->caps;
1673
1674		caps->validate_queue_counts = true;
1675
1676		return 0;
1677	}
1678	case TC_SETUP_QDISC_TAPRIO: {
1679		struct tc_taprio_caps *caps = base->caps;
1680
1681		if (si->hw_features & ENETC_SI_F_QBV)
1682			caps->supports_queue_max_sdu = true;
1683
1684		return 0;
1685	}
1686	default:
1687		return -EOPNOTSUPP;
1688	}
1689}
v6.2
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/* Copyright 2019 NXP */
   3
   4#include "enetc.h"
   5
   6#include <net/pkt_sched.h>
   7#include <linux/math64.h>
   8#include <linux/refcount.h>
   9#include <net/pkt_cls.h>
  10#include <net/tc_act/tc_gate.h>
  11
  12static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
  13{
  14	return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
  15}
  16
  17void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
  18{
  19	struct enetc_hw *hw = &priv->si->hw;
  20	u32 old_speed = priv->speed;
  21	u32 pspeed, tmp;
  22
  23	if (speed == old_speed)
  24		return;
  25
  26	switch (speed) {
  27	case SPEED_1000:
  28		pspeed = ENETC_PMR_PSPEED_1000M;
  29		break;
  30	case SPEED_2500:
  31		pspeed = ENETC_PMR_PSPEED_2500M;
  32		break;
  33	case SPEED_100:
  34		pspeed = ENETC_PMR_PSPEED_100M;
  35		break;
  36	case SPEED_10:
  37	default:
  38		pspeed = ENETC_PMR_PSPEED_10M;
  39	}
  40
  41	priv->speed = speed;
  42	tmp = enetc_port_rd(hw, ENETC_PMR);
  43	enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
  44}
  45
  46static int enetc_setup_taprio(struct net_device *ndev,
  47			      struct tc_taprio_qopt_offload *admin_conf)
  48{
  49	struct enetc_ndev_priv *priv = netdev_priv(ndev);
  50	struct enetc_hw *hw = &priv->si->hw;
  51	struct enetc_cbd cbd = {.cmd = 0};
  52	struct tgs_gcl_conf *gcl_config;
  53	struct tgs_gcl_data *gcl_data;
  54	dma_addr_t dma;
  55	struct gce *gce;
  56	u16 data_size;
  57	u16 gcl_len;
  58	void *tmp;
  59	u32 tge;
  60	int err;
  61	int i;
  62
 
 
 
 
 
  63	if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
  64		return -EINVAL;
  65	gcl_len = admin_conf->num_entries;
  66
  67	tge = enetc_rd(hw, ENETC_PTGCR);
  68	if (!admin_conf->enable) {
  69		enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
  70		enetc_reset_ptcmsdur(hw);
  71
  72		priv->active_offloads &= ~ENETC_F_QBV;
  73
  74		return 0;
  75	}
  76
  77	if (admin_conf->cycle_time > U32_MAX ||
  78	    admin_conf->cycle_time_extension > U32_MAX)
  79		return -EINVAL;
  80
  81	/* Configure the (administrative) gate control list using the
  82	 * control BD descriptor.
  83	 */
  84	gcl_config = &cbd.gcl_conf;
 
  85
  86	data_size = struct_size(gcl_data, entry, gcl_len);
  87	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
  88				       &dma, (void *)&gcl_data);
  89	if (!tmp)
  90		return -ENOMEM;
  91
  92	gce = (struct gce *)(gcl_data + 1);
  93
  94	/* Set all gates open as default */
  95	gcl_config->atc = 0xff;
  96	gcl_config->acl_len = cpu_to_le16(gcl_len);
  97
  98	gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
  99	gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
 100	gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
 101	gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
 102
 103	for (i = 0; i < gcl_len; i++) {
 104		struct tc_taprio_sched_entry *temp_entry;
 105		struct gce *temp_gce = gce + i;
 106
 107		temp_entry = &admin_conf->entries[i];
 108
 109		temp_gce->gate = (u8)temp_entry->gate_mask;
 110		temp_gce->period = cpu_to_le32(temp_entry->interval);
 111	}
 112
 113	cbd.status_flags = 0;
 114
 115	cbd.cls = BDCR_CMD_PORT_GCL;
 116	cbd.status_flags = 0;
 117
 
 118	enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
 119
 120	err = enetc_send_cmd(priv->si, &cbd);
 121	if (err)
 122		enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
 123
 124	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 125
 126	if (err)
 127		return err;
 128
 129	enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
 130	priv->active_offloads |= ENETC_F_QBV;
 131
 132	return 0;
 133}
 134
 135int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136{
 137	struct tc_taprio_qopt_offload *taprio = type_data;
 138	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 139	struct enetc_hw *hw = &priv->si->hw;
 140	struct enetc_bdr *tx_ring;
 141	int err;
 142	int i;
 143
 144	/* TSD and Qbv are mutually exclusive in hardware */
 145	for (i = 0; i < priv->num_tx_rings; i++)
 146		if (priv->tx_ring[i]->tsd_enable)
 147			return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148
 149	for (i = 0; i < priv->num_tx_rings; i++) {
 150		tx_ring = priv->tx_ring[i];
 151		tx_ring->prio = taprio->enable ? i : 0;
 152		enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
 153	}
 154
 155	err = enetc_setup_taprio(ndev, taprio);
 156	if (err) {
 157		for (i = 0; i < priv->num_tx_rings; i++) {
 158			tx_ring = priv->tx_ring[i];
 159			tx_ring->prio = taprio->enable ? 0 : i;
 160			enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
 161		}
 
 
 
 
 
 
 
 
 162	}
 163
 164	return err;
 165}
 166
 167static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
 168{
 169	return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
 170}
 171
 172static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
 173{
 174	return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
 175}
 176
 177int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
 178{
 179	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 180	struct tc_cbs_qopt_offload *cbs = type_data;
 181	u32 port_transmit_rate = priv->speed;
 182	u8 tc_nums = netdev_get_num_tc(ndev);
 183	struct enetc_hw *hw = &priv->si->hw;
 184	u32 hi_credit_bit, hi_credit_reg;
 185	u32 max_interference_size;
 186	u32 port_frame_max_size;
 187	u8 tc = cbs->queue;
 188	u8 prio_top, prio_next;
 189	int bw_sum = 0;
 190	u8 bw;
 191
 192	prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
 193	prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
 194
 195	/* Support highest prio and second prio tc in cbs mode */
 196	if (tc != prio_top && tc != prio_next)
 197		return -EOPNOTSUPP;
 198
 199	if (!cbs->enable) {
 200		/* Make sure the other TC that are numerically
 201		 * lower than this TC have been disabled.
 202		 */
 203		if (tc == prio_top &&
 204		    enetc_get_cbs_enable(hw, prio_next)) {
 205			dev_err(&ndev->dev,
 206				"Disable TC%d before disable TC%d\n",
 207				prio_next, tc);
 208			return -EINVAL;
 209		}
 210
 211		enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
 212		enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
 213
 214		return 0;
 215	}
 216
 217	if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
 218	    cbs->idleslope < 0 || cbs->sendslope > 0)
 219		return -EOPNOTSUPP;
 220
 221	port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 222
 223	bw = cbs->idleslope / (port_transmit_rate * 10UL);
 224
 225	/* Make sure the other TC that are numerically
 226	 * higher than this TC have been enabled.
 227	 */
 228	if (tc == prio_next) {
 229		if (!enetc_get_cbs_enable(hw, prio_top)) {
 230			dev_err(&ndev->dev,
 231				"Enable TC%d first before enable TC%d\n",
 232				prio_top, prio_next);
 233			return -EINVAL;
 234		}
 235		bw_sum += enetc_get_cbs_bw(hw, prio_top);
 236	}
 237
 238	if (bw_sum + bw >= 100) {
 239		dev_err(&ndev->dev,
 240			"The sum of all CBS Bandwidth can't exceed 100\n");
 241		return -EINVAL;
 242	}
 243
 244	enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
 245
 246	/* For top prio TC, the max_interfrence_size is maxSizedFrame.
 247	 *
 248	 * For next prio TC, the max_interfrence_size is calculated as below:
 249	 *
 250	 *      max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
 251	 *
 252	 *	- RA: idleSlope for AVB Class A
 253	 *	- R0: port transmit rate
 254	 *	- M0: maximum sized frame for the port
 255	 *	- MA: maximum sized frame for AVB Class A
 256	 */
 257
 258	if (tc == prio_top) {
 259		max_interference_size = port_frame_max_size * 8;
 260	} else {
 261		u32 m0, ma, r0, ra;
 262
 263		m0 = port_frame_max_size * 8;
 264		ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
 265		ra = enetc_get_cbs_bw(hw, prio_top) *
 266			port_transmit_rate * 10000ULL;
 267		r0 = port_transmit_rate * 1000000ULL;
 268		max_interference_size = m0 + ma +
 269			(u32)div_u64((u64)ra * m0, r0 - ra);
 270	}
 271
 272	/* hiCredit bits calculate by:
 273	 *
 274	 * maxSizedFrame * (idleSlope/portTxRate)
 275	 */
 276	hi_credit_bit = max_interference_size * bw / 100;
 277
 278	/* hiCredit bits to hiCredit register need to calculated as:
 279	 *
 280	 * (enetClockFrequency / portTransmitRate) * 100
 281	 */
 282	hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
 283				     port_transmit_rate * 1000000ULL);
 284
 285	enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
 286
 287	/* Set bw register and enable this traffic class */
 288	enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
 289
 290	return 0;
 291}
 292
 293int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
 294{
 295	struct enetc_ndev_priv *priv = netdev_priv(ndev);
 296	struct tc_etf_qopt_offload *qopt = type_data;
 297	u8 tc_nums = netdev_get_num_tc(ndev);
 298	struct enetc_hw *hw = &priv->si->hw;
 299	int tc;
 300
 301	if (!tc_nums)
 302		return -EOPNOTSUPP;
 303
 304	tc = qopt->queue;
 305
 306	if (tc < 0 || tc >= priv->num_tx_rings)
 307		return -EINVAL;
 308
 309	/* TSD and Qbv are mutually exclusive in hardware */
 310	if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
 311		return -EBUSY;
 312
 313	priv->tx_ring[tc]->tsd_enable = qopt->enable;
 314	enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
 315
 316	return 0;
 317}
 318
 319enum streamid_type {
 320	STREAMID_TYPE_RESERVED = 0,
 321	STREAMID_TYPE_NULL,
 322	STREAMID_TYPE_SMAC,
 323};
 324
 325enum streamid_vlan_tagged {
 326	STREAMID_VLAN_RESERVED = 0,
 327	STREAMID_VLAN_TAGGED,
 328	STREAMID_VLAN_UNTAGGED,
 329	STREAMID_VLAN_ALL,
 330};
 331
 332#define ENETC_PSFP_WILDCARD -1
 333#define HANDLE_OFFSET 100
 334
 335enum forward_type {
 336	FILTER_ACTION_TYPE_PSFP = BIT(0),
 337	FILTER_ACTION_TYPE_ACL = BIT(1),
 338	FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
 339};
 340
 341/* This is for limit output type for input actions */
 342struct actions_fwd {
 343	u64 actions;
 344	u64 keys;	/* include the must needed keys */
 345	enum forward_type output;
 346};
 347
 348struct psfp_streamfilter_counters {
 349	u64 matching_frames_count;
 350	u64 passing_frames_count;
 351	u64 not_passing_frames_count;
 352	u64 passing_sdu_count;
 353	u64 not_passing_sdu_count;
 354	u64 red_frames_count;
 355};
 356
 357struct enetc_streamid {
 358	u32 index;
 359	union {
 360		u8 src_mac[6];
 361		u8 dst_mac[6];
 362	};
 363	u8 filtertype;
 364	u16 vid;
 365	u8 tagged;
 366	s32 handle;
 367};
 368
 369struct enetc_psfp_filter {
 370	u32 index;
 371	s32 handle;
 372	s8 prio;
 373	u32 maxsdu;
 374	u32 gate_id;
 375	s32 meter_id;
 376	refcount_t refcount;
 377	struct hlist_node node;
 378};
 379
 380struct enetc_psfp_gate {
 381	u32 index;
 382	s8 init_ipv;
 383	u64 basetime;
 384	u64 cycletime;
 385	u64 cycletimext;
 386	u32 num_entries;
 387	refcount_t refcount;
 388	struct hlist_node node;
 389	struct action_gate_entry entries[];
 390};
 391
 392/* Only enable the green color frame now
 393 * Will add eir and ebs color blind, couple flag etc when
 394 * policing action add more offloading parameters
 395 */
 396struct enetc_psfp_meter {
 397	u32 index;
 398	u32 cir;
 399	u32 cbs;
 400	refcount_t refcount;
 401	struct hlist_node node;
 402};
 403
 404#define ENETC_PSFP_FLAGS_FMI BIT(0)
 405
 406struct enetc_stream_filter {
 407	struct enetc_streamid sid;
 408	u32 sfi_index;
 409	u32 sgi_index;
 410	u32 flags;
 411	u32 fmi_index;
 412	struct flow_stats stats;
 413	struct hlist_node node;
 414};
 415
 416struct enetc_psfp {
 417	unsigned long dev_bitmap;
 418	unsigned long *psfp_sfi_bitmap;
 419	struct hlist_head stream_list;
 420	struct hlist_head psfp_filter_list;
 421	struct hlist_head psfp_gate_list;
 422	struct hlist_head psfp_meter_list;
 423	spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
 424};
 425
 426static struct actions_fwd enetc_act_fwd[] = {
 427	{
 428		BIT(FLOW_ACTION_GATE),
 429		BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
 430		FILTER_ACTION_TYPE_PSFP
 431	},
 432	{
 433		BIT(FLOW_ACTION_POLICE) |
 434		BIT(FLOW_ACTION_GATE),
 435		BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
 436		FILTER_ACTION_TYPE_PSFP
 437	},
 438	/* example for ACL actions */
 439	{
 440		BIT(FLOW_ACTION_DROP),
 441		0,
 442		FILTER_ACTION_TYPE_ACL
 443	}
 444};
 445
 446static struct enetc_psfp epsfp = {
 447	.dev_bitmap = 0,
 448	.psfp_sfi_bitmap = NULL,
 449};
 450
 451static LIST_HEAD(enetc_block_cb_list);
 452
 453/* Stream Identity Entry Set Descriptor */
 454static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
 455				 struct enetc_streamid *sid,
 456				 u8 enable)
 457{
 458	struct enetc_cbd cbd = {.cmd = 0};
 459	struct streamid_data *si_data;
 460	struct streamid_conf *si_conf;
 461	dma_addr_t dma;
 462	u16 data_size;
 463	void *tmp;
 464	int port;
 465	int err;
 466
 467	port = enetc_pf_to_port(priv->si->pdev);
 468	if (port < 0)
 469		return -EINVAL;
 470
 471	if (sid->index >= priv->psfp_cap.max_streamid)
 472		return -EINVAL;
 473
 474	if (sid->filtertype != STREAMID_TYPE_NULL &&
 475	    sid->filtertype != STREAMID_TYPE_SMAC)
 476		return -EOPNOTSUPP;
 477
 478	/* Disable operation before enable */
 479	cbd.index = cpu_to_le16((u16)sid->index);
 480	cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
 481	cbd.status_flags = 0;
 482
 483	data_size = sizeof(struct streamid_data);
 484	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
 485				       &dma, (void *)&si_data);
 486	if (!tmp)
 487		return -ENOMEM;
 488
 489	eth_broadcast_addr(si_data->dmac);
 490	si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
 491			       + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
 492
 493	si_conf = &cbd.sid_set;
 494	/* Only one port supported for one entry, set itself */
 495	si_conf->iports = cpu_to_le32(1 << port);
 496	si_conf->id_type = 1;
 497	si_conf->oui[2] = 0x0;
 498	si_conf->oui[1] = 0x80;
 499	si_conf->oui[0] = 0xC2;
 500
 501	err = enetc_send_cmd(priv->si, &cbd);
 502	if (err)
 503		goto out;
 504
 505	if (!enable)
 506		goto out;
 507
 508	/* Enable the entry overwrite again incase space flushed by hardware */
 509	cbd.status_flags = 0;
 510
 511	si_conf->en = 0x80;
 512	si_conf->stream_handle = cpu_to_le32(sid->handle);
 513	si_conf->iports = cpu_to_le32(1 << port);
 514	si_conf->id_type = sid->filtertype;
 515	si_conf->oui[2] = 0x0;
 516	si_conf->oui[1] = 0x80;
 517	si_conf->oui[0] = 0xC2;
 518
 519	memset(si_data, 0, data_size);
 520
 521	/* VIDM default to be 1.
 522	 * VID Match. If set (b1) then the VID must match, otherwise
 523	 * any VID is considered a match. VIDM setting is only used
 524	 * when TG is set to b01.
 525	 */
 526	if (si_conf->id_type == STREAMID_TYPE_NULL) {
 527		ether_addr_copy(si_data->dmac, sid->dst_mac);
 528		si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
 529				       ((((u16)(sid->tagged) & 0x3) << 14)
 530				       | ENETC_CBDR_SID_VIDM);
 531	} else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
 532		ether_addr_copy(si_data->smac, sid->src_mac);
 533		si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
 534				       ((((u16)(sid->tagged) & 0x3) << 14)
 535				       | ENETC_CBDR_SID_VIDM);
 536	}
 537
 538	err = enetc_send_cmd(priv->si, &cbd);
 539out:
 540	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 541
 542	return err;
 543}
 544
 545/* Stream Filter Instance Set Descriptor */
 546static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
 547				     struct enetc_psfp_filter *sfi,
 548				     u8 enable)
 549{
 550	struct enetc_cbd cbd = {.cmd = 0};
 551	struct sfi_conf *sfi_config;
 552	int port;
 553
 554	port = enetc_pf_to_port(priv->si->pdev);
 555	if (port < 0)
 556		return -EINVAL;
 557
 558	cbd.index = cpu_to_le16(sfi->index);
 559	cbd.cls = BDCR_CMD_STREAM_FILTER;
 560	cbd.status_flags = 0x80;
 561	cbd.length = cpu_to_le16(1);
 562
 563	sfi_config = &cbd.sfi_conf;
 564	if (!enable)
 565		goto exit;
 566
 567	sfi_config->en = 0x80;
 568
 569	if (sfi->handle >= 0) {
 570		sfi_config->stream_handle =
 571			cpu_to_le32(sfi->handle);
 572		sfi_config->sthm |= 0x80;
 573	}
 574
 575	sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
 576	sfi_config->input_ports = cpu_to_le32(1 << port);
 577
 578	/* The priority value which may be matched against the
 579	 * frame’s priority value to determine a match for this entry.
 580	 */
 581	if (sfi->prio >= 0)
 582		sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
 583
 584	/* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
 585	 * field as being either an MSDU value or an index into the Flow
 586	 * Meter Instance table.
 587	 */
 588	if (sfi->maxsdu) {
 589		sfi_config->msdu =
 590		cpu_to_le16(sfi->maxsdu);
 591		sfi_config->multi |= 0x40;
 592	}
 593
 594	if (sfi->meter_id >= 0) {
 595		sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
 596		sfi_config->multi |= 0x80;
 597	}
 598
 599exit:
 600	return enetc_send_cmd(priv->si, &cbd);
 601}
 602
 603static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
 604				      u32 index,
 605				      struct psfp_streamfilter_counters *cnt)
 606{
 607	struct enetc_cbd cbd = { .cmd = 2 };
 608	struct sfi_counter_data *data_buf;
 609	dma_addr_t dma;
 610	u16 data_size;
 611	void *tmp;
 612	int err;
 613
 614	cbd.index = cpu_to_le16((u16)index);
 615	cbd.cmd = 2;
 616	cbd.cls = BDCR_CMD_STREAM_FILTER;
 617	cbd.status_flags = 0;
 618
 619	data_size = sizeof(struct sfi_counter_data);
 620
 621	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
 622				       &dma, (void *)&data_buf);
 623	if (!tmp)
 624		return -ENOMEM;
 625
 626	err = enetc_send_cmd(priv->si, &cbd);
 627	if (err)
 628		goto exit;
 629
 630	cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
 631				     data_buf->matchl;
 632
 633	cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
 634				     data_buf->msdu_dropl;
 635
 636	cnt->passing_sdu_count = cnt->matching_frames_count
 637				- cnt->not_passing_sdu_count;
 638
 639	cnt->not_passing_frames_count =
 640				((u64)data_buf->stream_gate_droph << 32) +
 641				data_buf->stream_gate_dropl;
 642
 643	cnt->passing_frames_count = cnt->matching_frames_count -
 644				    cnt->not_passing_sdu_count -
 645				    cnt->not_passing_frames_count;
 646
 647	cnt->red_frames_count =	((u64)data_buf->flow_meter_droph << 32)	+
 648				data_buf->flow_meter_dropl;
 649
 650exit:
 651	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 652
 653	return err;
 654}
 655
 656static u64 get_ptp_now(struct enetc_hw *hw)
 657{
 658	u64 now_lo, now_hi, now;
 659
 660	now_lo = enetc_rd(hw, ENETC_SICTR0);
 661	now_hi = enetc_rd(hw, ENETC_SICTR1);
 662	now = now_lo | now_hi << 32;
 663
 664	return now;
 665}
 666
 667static int get_start_ns(u64 now, u64 cycle, u64 *start)
 668{
 669	u64 n;
 670
 671	if (!cycle)
 672		return -EFAULT;
 673
 674	n = div64_u64(now, cycle);
 675
 676	*start = (n + 1) * cycle;
 677
 678	return 0;
 679}
 680
 681/* Stream Gate Instance Set Descriptor */
 682static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
 683				   struct enetc_psfp_gate *sgi,
 684				   u8 enable)
 685{
 686	struct enetc_cbd cbd = { .cmd = 0 };
 687	struct sgi_table *sgi_config;
 688	struct sgcl_conf *sgcl_config;
 689	struct sgcl_data *sgcl_data;
 690	struct sgce *sgce;
 691	dma_addr_t dma;
 692	u16 data_size;
 693	int err, i;
 694	void *tmp;
 695	u64 now;
 696
 697	cbd.index = cpu_to_le16(sgi->index);
 698	cbd.cmd = 0;
 699	cbd.cls = BDCR_CMD_STREAM_GCL;
 700	cbd.status_flags = 0x80;
 701
 702	/* disable */
 703	if (!enable)
 704		return enetc_send_cmd(priv->si, &cbd);
 705
 706	if (!sgi->num_entries)
 707		return 0;
 708
 709	if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
 710	    !sgi->cycletime)
 711		return -EINVAL;
 712
 713	/* enable */
 714	sgi_config = &cbd.sgi_table;
 715
 716	/* Keep open before gate list start */
 717	sgi_config->ocgtst = 0x80;
 718
 719	sgi_config->oipv = (sgi->init_ipv < 0) ?
 720				0x0 : ((sgi->init_ipv & 0x7) | 0x8);
 721
 722	sgi_config->en = 0x80;
 723
 724	/* Basic config */
 725	err = enetc_send_cmd(priv->si, &cbd);
 726	if (err)
 727		return -EINVAL;
 728
 729	memset(&cbd, 0, sizeof(cbd));
 730
 731	cbd.index = cpu_to_le16(sgi->index);
 732	cbd.cmd = 1;
 733	cbd.cls = BDCR_CMD_STREAM_GCL;
 734	cbd.status_flags = 0;
 735
 736	sgcl_config = &cbd.sgcl_conf;
 737
 738	sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
 739
 740	data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
 741	tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
 742				       &dma, (void *)&sgcl_data);
 743	if (!tmp)
 744		return -ENOMEM;
 745
 746	sgce = &sgcl_data->sgcl[0];
 747
 748	sgcl_config->agtst = 0x80;
 749
 750	sgcl_data->ct = sgi->cycletime;
 751	sgcl_data->cte = sgi->cycletimext;
 752
 753	if (sgi->init_ipv >= 0)
 754		sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
 755
 756	for (i = 0; i < sgi->num_entries; i++) {
 757		struct action_gate_entry *from = &sgi->entries[i];
 758		struct sgce *to = &sgce[i];
 759
 760		if (from->gate_state)
 761			to->multi |= 0x10;
 762
 763		if (from->ipv >= 0)
 764			to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
 765
 766		if (from->maxoctets >= 0) {
 767			to->multi |= 0x01;
 768			to->msdu[0] = from->maxoctets & 0xFF;
 769			to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
 770			to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
 771		}
 772
 773		to->interval = from->interval;
 774	}
 775
 776	/* If basetime is less than now, calculate start time */
 777	now = get_ptp_now(&priv->si->hw);
 778
 779	if (sgi->basetime < now) {
 780		u64 start;
 781
 782		err = get_start_ns(now, sgi->cycletime, &start);
 783		if (err)
 784			goto exit;
 785		sgcl_data->btl = lower_32_bits(start);
 786		sgcl_data->bth = upper_32_bits(start);
 787	} else {
 788		u32 hi, lo;
 789
 790		hi = upper_32_bits(sgi->basetime);
 791		lo = lower_32_bits(sgi->basetime);
 792		sgcl_data->bth = hi;
 793		sgcl_data->btl = lo;
 794	}
 795
 796	err = enetc_send_cmd(priv->si, &cbd);
 797
 798exit:
 799	enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
 800	return err;
 801}
 802
 803static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
 804				  struct enetc_psfp_meter *fmi,
 805				  u8 enable)
 806{
 807	struct enetc_cbd cbd = { .cmd = 0 };
 808	struct fmi_conf *fmi_config;
 809	u64 temp = 0;
 810
 811	cbd.index = cpu_to_le16((u16)fmi->index);
 812	cbd.cls = BDCR_CMD_FLOW_METER;
 813	cbd.status_flags = 0x80;
 814
 815	if (!enable)
 816		return enetc_send_cmd(priv->si, &cbd);
 817
 818	fmi_config = &cbd.fmi_conf;
 819	fmi_config->en = 0x80;
 820
 821	if (fmi->cir) {
 822		temp = (u64)8000 * fmi->cir;
 823		temp = div_u64(temp, 3725);
 824	}
 825
 826	fmi_config->cir = cpu_to_le32((u32)temp);
 827	fmi_config->cbs = cpu_to_le32(fmi->cbs);
 828
 829	/* Default for eir ebs disable */
 830	fmi_config->eir = 0;
 831	fmi_config->ebs = 0;
 832
 833	/* Default:
 834	 * mark red disable
 835	 * drop on yellow disable
 836	 * color mode disable
 837	 * couple flag disable
 838	 */
 839	fmi_config->conf = 0;
 840
 841	return enetc_send_cmd(priv->si, &cbd);
 842}
 843
 844static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
 845{
 846	struct enetc_stream_filter *f;
 847
 848	hlist_for_each_entry(f, &epsfp.stream_list, node)
 849		if (f->sid.index == index)
 850			return f;
 851
 852	return NULL;
 853}
 854
 855static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
 856{
 857	struct enetc_psfp_gate *g;
 858
 859	hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
 860		if (g->index == index)
 861			return g;
 862
 863	return NULL;
 864}
 865
 866static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
 867{
 868	struct enetc_psfp_filter *s;
 869
 870	hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
 871		if (s->index == index)
 872			return s;
 873
 874	return NULL;
 875}
 876
 877static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
 878{
 879	struct enetc_psfp_meter *m;
 880
 881	hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
 882		if (m->index == index)
 883			return m;
 884
 885	return NULL;
 886}
 887
 888static struct enetc_psfp_filter
 889	*enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
 890{
 891	struct enetc_psfp_filter *s;
 892
 893	hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
 894		if (s->gate_id == sfi->gate_id &&
 895		    s->prio == sfi->prio &&
 896		    s->maxsdu == sfi->maxsdu &&
 897		    s->meter_id == sfi->meter_id)
 898			return s;
 899
 900	return NULL;
 901}
 902
 903static int enetc_get_free_index(struct enetc_ndev_priv *priv)
 904{
 905	u32 max_size = priv->psfp_cap.max_psfp_filter;
 906	unsigned long index;
 907
 908	index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
 909	if (index == max_size)
 910		return -1;
 911
 912	return index;
 913}
 914
 915static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
 916{
 917	struct enetc_psfp_filter *sfi;
 918	u8 z;
 919
 920	sfi = enetc_get_filter_by_index(index);
 921	WARN_ON(!sfi);
 922	z = refcount_dec_and_test(&sfi->refcount);
 923
 924	if (z) {
 925		enetc_streamfilter_hw_set(priv, sfi, false);
 926		hlist_del(&sfi->node);
 927		kfree(sfi);
 928		clear_bit(index, epsfp.psfp_sfi_bitmap);
 929	}
 930}
 931
 932static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
 933{
 934	struct enetc_psfp_gate *sgi;
 935	u8 z;
 936
 937	sgi = enetc_get_gate_by_index(index);
 938	WARN_ON(!sgi);
 939	z = refcount_dec_and_test(&sgi->refcount);
 940	if (z) {
 941		enetc_streamgate_hw_set(priv, sgi, false);
 942		hlist_del(&sgi->node);
 943		kfree(sgi);
 944	}
 945}
 946
 947static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
 948{
 949	struct enetc_psfp_meter *fmi;
 950	u8 z;
 951
 952	fmi = enetc_get_meter_by_index(index);
 953	WARN_ON(!fmi);
 954	z = refcount_dec_and_test(&fmi->refcount);
 955	if (z) {
 956		enetc_flowmeter_hw_set(priv, fmi, false);
 957		hlist_del(&fmi->node);
 958		kfree(fmi);
 959	}
 960}
 961
 962static void remove_one_chain(struct enetc_ndev_priv *priv,
 963			     struct enetc_stream_filter *filter)
 964{
 965	if (filter->flags & ENETC_PSFP_FLAGS_FMI)
 966		flow_meter_unref(priv, filter->fmi_index);
 967
 968	stream_gate_unref(priv, filter->sgi_index);
 969	stream_filter_unref(priv, filter->sfi_index);
 970
 971	hlist_del(&filter->node);
 972	kfree(filter);
 973}
 974
 975static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
 976			     struct enetc_streamid *sid,
 977			     struct enetc_psfp_filter *sfi,
 978			     struct enetc_psfp_gate *sgi,
 979			     struct enetc_psfp_meter *fmi)
 980{
 981	int err;
 982
 983	err = enetc_streamid_hw_set(priv, sid, true);
 984	if (err)
 985		return err;
 986
 987	if (sfi) {
 988		err = enetc_streamfilter_hw_set(priv, sfi, true);
 989		if (err)
 990			goto revert_sid;
 991	}
 992
 993	err = enetc_streamgate_hw_set(priv, sgi, true);
 994	if (err)
 995		goto revert_sfi;
 996
 997	if (fmi) {
 998		err = enetc_flowmeter_hw_set(priv, fmi, true);
 999		if (err)
1000			goto revert_sgi;
1001	}
1002
1003	return 0;
1004
1005revert_sgi:
1006	enetc_streamgate_hw_set(priv, sgi, false);
1007revert_sfi:
1008	if (sfi)
1009		enetc_streamfilter_hw_set(priv, sfi, false);
1010revert_sid:
1011	enetc_streamid_hw_set(priv, sid, false);
1012	return err;
1013}
1014
1015static struct actions_fwd *enetc_check_flow_actions(u64 acts,
1016						    unsigned int inputkeys)
1017{
1018	int i;
1019
1020	for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
1021		if (acts == enetc_act_fwd[i].actions &&
1022		    inputkeys & enetc_act_fwd[i].keys)
1023			return &enetc_act_fwd[i];
1024
1025	return NULL;
1026}
1027
1028static int enetc_psfp_policer_validate(const struct flow_action *action,
1029				       const struct flow_action_entry *act,
1030				       struct netlink_ext_ack *extack)
1031{
1032	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1033		NL_SET_ERR_MSG_MOD(extack,
1034				   "Offload not supported when exceed action is not drop");
1035		return -EOPNOTSUPP;
1036	}
1037
1038	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1039	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1040		NL_SET_ERR_MSG_MOD(extack,
1041				   "Offload not supported when conform action is not pipe or ok");
1042		return -EOPNOTSUPP;
1043	}
1044
1045	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1046	    !flow_action_is_last_entry(action, act)) {
1047		NL_SET_ERR_MSG_MOD(extack,
1048				   "Offload not supported when conform action is ok, but action is not last");
1049		return -EOPNOTSUPP;
1050	}
1051
1052	if (act->police.peakrate_bytes_ps ||
1053	    act->police.avrate || act->police.overhead) {
1054		NL_SET_ERR_MSG_MOD(extack,
1055				   "Offload not supported when peakrate/avrate/overhead is configured");
1056		return -EOPNOTSUPP;
1057	}
1058
1059	if (act->police.rate_pkt_ps) {
1060		NL_SET_ERR_MSG_MOD(extack,
1061				   "QoS offload not support packets per second");
1062		return -EOPNOTSUPP;
1063	}
1064
1065	return 0;
1066}
1067
1068static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
1069				      struct flow_cls_offload *f)
1070{
1071	struct flow_action_entry *entryg = NULL, *entryp = NULL;
1072	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1073	struct netlink_ext_ack *extack = f->common.extack;
1074	struct enetc_stream_filter *filter, *old_filter;
1075	struct enetc_psfp_meter *fmi = NULL, *old_fmi;
1076	struct enetc_psfp_filter *sfi, *old_sfi;
1077	struct enetc_psfp_gate *sgi, *old_sgi;
1078	struct flow_action_entry *entry;
1079	struct action_gate_entry *e;
1080	u8 sfi_overwrite = 0;
1081	int entries_size;
1082	int i, err;
1083
1084	if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1085		NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1086		return -ENOSPC;
1087	}
1088
1089	flow_action_for_each(i, entry, &rule->action)
1090		if (entry->id == FLOW_ACTION_GATE)
1091			entryg = entry;
1092		else if (entry->id == FLOW_ACTION_POLICE)
1093			entryp = entry;
1094
1095	/* Not support without gate action */
1096	if (!entryg)
1097		return -EINVAL;
1098
1099	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
1100	if (!filter)
1101		return -ENOMEM;
1102
1103	filter->sid.index = f->common.chain_index;
1104
1105	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1106		struct flow_match_eth_addrs match;
1107
1108		flow_rule_match_eth_addrs(rule, &match);
1109
1110		if (!is_zero_ether_addr(match.mask->dst) &&
1111		    !is_zero_ether_addr(match.mask->src)) {
1112			NL_SET_ERR_MSG_MOD(extack,
1113					   "Cannot match on both source and destination MAC");
1114			err = -EINVAL;
1115			goto free_filter;
1116		}
1117
1118		if (!is_zero_ether_addr(match.mask->dst)) {
1119			if (!is_broadcast_ether_addr(match.mask->dst)) {
1120				NL_SET_ERR_MSG_MOD(extack,
1121						   "Masked matching on destination MAC not supported");
1122				err = -EINVAL;
1123				goto free_filter;
1124			}
1125			ether_addr_copy(filter->sid.dst_mac, match.key->dst);
1126			filter->sid.filtertype = STREAMID_TYPE_NULL;
1127		}
1128
1129		if (!is_zero_ether_addr(match.mask->src)) {
1130			if (!is_broadcast_ether_addr(match.mask->src)) {
1131				NL_SET_ERR_MSG_MOD(extack,
1132						   "Masked matching on source MAC not supported");
1133				err = -EINVAL;
1134				goto free_filter;
1135			}
1136			ether_addr_copy(filter->sid.src_mac, match.key->src);
1137			filter->sid.filtertype = STREAMID_TYPE_SMAC;
1138		}
1139	} else {
1140		NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
1141		err = -EINVAL;
1142		goto free_filter;
1143	}
1144
1145	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1146		struct flow_match_vlan match;
1147
1148		flow_rule_match_vlan(rule, &match);
1149		if (match.mask->vlan_priority) {
1150			if (match.mask->vlan_priority !=
1151			    (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
1152				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
1153				err = -EINVAL;
1154				goto free_filter;
1155			}
1156		}
1157
1158		if (match.mask->vlan_id) {
1159			if (match.mask->vlan_id != VLAN_VID_MASK) {
1160				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
1161				err = -EINVAL;
1162				goto free_filter;
1163			}
1164
1165			filter->sid.vid = match.key->vlan_id;
1166			if (!filter->sid.vid)
1167				filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
1168			else
1169				filter->sid.tagged = STREAMID_VLAN_TAGGED;
1170		}
1171	} else {
1172		filter->sid.tagged = STREAMID_VLAN_ALL;
1173	}
1174
1175	/* parsing gate action */
1176	if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
1177		NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1178		err = -ENOSPC;
1179		goto free_filter;
1180	}
1181
1182	if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
1183		NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1184		err = -ENOSPC;
1185		goto free_filter;
1186	}
1187
1188	entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
1189	sgi = kzalloc(entries_size, GFP_KERNEL);
1190	if (!sgi) {
1191		err = -ENOMEM;
1192		goto free_filter;
1193	}
1194
1195	refcount_set(&sgi->refcount, 1);
1196	sgi->index = entryg->hw_index;
1197	sgi->init_ipv = entryg->gate.prio;
1198	sgi->basetime = entryg->gate.basetime;
1199	sgi->cycletime = entryg->gate.cycletime;
1200	sgi->num_entries = entryg->gate.num_entries;
1201
1202	e = sgi->entries;
1203	for (i = 0; i < entryg->gate.num_entries; i++) {
1204		e[i].gate_state = entryg->gate.entries[i].gate_state;
1205		e[i].interval = entryg->gate.entries[i].interval;
1206		e[i].ipv = entryg->gate.entries[i].ipv;
1207		e[i].maxoctets = entryg->gate.entries[i].maxoctets;
1208	}
1209
1210	filter->sgi_index = sgi->index;
1211
1212	sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
1213	if (!sfi) {
1214		err = -ENOMEM;
1215		goto free_gate;
1216	}
1217
1218	refcount_set(&sfi->refcount, 1);
1219	sfi->gate_id = sgi->index;
1220	sfi->meter_id = ENETC_PSFP_WILDCARD;
1221
1222	/* Flow meter and max frame size */
1223	if (entryp) {
1224		err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
1225		if (err)
1226			goto free_sfi;
1227
1228		if (entryp->police.burst) {
1229			fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
1230			if (!fmi) {
1231				err = -ENOMEM;
1232				goto free_sfi;
1233			}
1234			refcount_set(&fmi->refcount, 1);
1235			fmi->cir = entryp->police.rate_bytes_ps;
1236			fmi->cbs = entryp->police.burst;
1237			fmi->index = entryp->hw_index;
1238			filter->flags |= ENETC_PSFP_FLAGS_FMI;
1239			filter->fmi_index = fmi->index;
1240			sfi->meter_id = fmi->index;
1241		}
1242
1243		if (entryp->police.mtu)
1244			sfi->maxsdu = entryp->police.mtu;
1245	}
1246
1247	/* prio ref the filter prio */
1248	if (f->common.prio && f->common.prio <= BIT(3))
1249		sfi->prio = f->common.prio - 1;
1250	else
1251		sfi->prio = ENETC_PSFP_WILDCARD;
1252
1253	old_sfi = enetc_psfp_check_sfi(sfi);
1254	if (!old_sfi) {
1255		int index;
1256
1257		index = enetc_get_free_index(priv);
1258		if (sfi->handle < 0) {
1259			NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
1260			err = -ENOSPC;
1261			goto free_fmi;
1262		}
1263
1264		sfi->index = index;
1265		sfi->handle = index + HANDLE_OFFSET;
1266		/* Update the stream filter handle also */
1267		filter->sid.handle = sfi->handle;
1268		filter->sfi_index = sfi->index;
1269		sfi_overwrite = 0;
1270	} else {
1271		filter->sfi_index = old_sfi->index;
1272		filter->sid.handle = old_sfi->handle;
1273		sfi_overwrite = 1;
1274	}
1275
1276	err = enetc_psfp_hw_set(priv, &filter->sid,
1277				sfi_overwrite ? NULL : sfi, sgi, fmi);
1278	if (err)
1279		goto free_fmi;
1280
1281	spin_lock(&epsfp.psfp_lock);
1282	if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
1283		old_fmi = enetc_get_meter_by_index(filter->fmi_index);
1284		if (old_fmi) {
1285			fmi->refcount = old_fmi->refcount;
1286			refcount_set(&fmi->refcount,
1287				     refcount_read(&old_fmi->refcount) + 1);
1288			hlist_del(&old_fmi->node);
1289			kfree(old_fmi);
1290		}
1291		hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
1292	}
1293
1294	/* Remove the old node if exist and update with a new node */
1295	old_sgi = enetc_get_gate_by_index(filter->sgi_index);
1296	if (old_sgi) {
1297		refcount_set(&sgi->refcount,
1298			     refcount_read(&old_sgi->refcount) + 1);
1299		hlist_del(&old_sgi->node);
1300		kfree(old_sgi);
1301	}
1302
1303	hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
1304
1305	if (!old_sfi) {
1306		hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
1307		set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
1308	} else {
1309		kfree(sfi);
1310		refcount_inc(&old_sfi->refcount);
1311	}
1312
1313	old_filter = enetc_get_stream_by_index(filter->sid.index);
1314	if (old_filter)
1315		remove_one_chain(priv, old_filter);
1316
1317	filter->stats.lastused = jiffies;
1318	hlist_add_head(&filter->node, &epsfp.stream_list);
1319
1320	spin_unlock(&epsfp.psfp_lock);
1321
1322	return 0;
1323
1324free_fmi:
1325	kfree(fmi);
1326free_sfi:
1327	kfree(sfi);
1328free_gate:
1329	kfree(sgi);
1330free_filter:
1331	kfree(filter);
1332
1333	return err;
1334}
1335
1336static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
1337				  struct flow_cls_offload *cls_flower)
1338{
1339	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1340	struct netlink_ext_ack *extack = cls_flower->common.extack;
1341	struct flow_dissector *dissector = rule->match.dissector;
1342	struct flow_action *action = &rule->action;
1343	struct flow_action_entry *entry;
1344	struct actions_fwd *fwd;
1345	u64 actions = 0;
1346	int i, err;
1347
1348	if (!flow_action_has_entries(action)) {
1349		NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
1350		return -EINVAL;
1351	}
1352
1353	flow_action_for_each(i, entry, action)
1354		actions |= BIT(entry->id);
1355
1356	fwd = enetc_check_flow_actions(actions, dissector->used_keys);
1357	if (!fwd) {
1358		NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
1359		return -EOPNOTSUPP;
1360	}
1361
1362	if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
1363		err = enetc_psfp_parse_clsflower(priv, cls_flower);
1364		if (err) {
1365			NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
1366			return err;
1367		}
1368	} else {
1369		NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
1370		return -EOPNOTSUPP;
1371	}
1372
1373	return 0;
1374}
1375
1376static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
1377					struct flow_cls_offload *f)
1378{
1379	struct enetc_stream_filter *filter;
1380	struct netlink_ext_ack *extack = f->common.extack;
1381	int err;
1382
1383	if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1384		NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1385		return -ENOSPC;
1386	}
1387
1388	filter = enetc_get_stream_by_index(f->common.chain_index);
1389	if (!filter)
1390		return -EINVAL;
1391
1392	err = enetc_streamid_hw_set(priv, &filter->sid, false);
1393	if (err)
1394		return err;
1395
1396	remove_one_chain(priv, filter);
1397
1398	return 0;
1399}
1400
1401static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
1402				   struct flow_cls_offload *f)
1403{
1404	return enetc_psfp_destroy_clsflower(priv, f);
1405}
1406
1407static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
1408				struct flow_cls_offload *f)
1409{
1410	struct psfp_streamfilter_counters counters = {};
1411	struct enetc_stream_filter *filter;
1412	struct flow_stats stats = {};
1413	int err;
1414
1415	filter = enetc_get_stream_by_index(f->common.chain_index);
1416	if (!filter)
1417		return -EINVAL;
1418
1419	err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
1420	if (err)
1421		return -EINVAL;
1422
1423	spin_lock(&epsfp.psfp_lock);
1424	stats.pkts = counters.matching_frames_count +
1425		     counters.not_passing_sdu_count -
1426		     filter->stats.pkts;
1427	stats.drops = counters.not_passing_frames_count +
1428		      counters.not_passing_sdu_count +
1429		      counters.red_frames_count -
1430		      filter->stats.drops;
1431	stats.lastused = filter->stats.lastused;
1432	filter->stats.pkts += stats.pkts;
1433	filter->stats.drops += stats.drops;
1434	spin_unlock(&epsfp.psfp_lock);
1435
1436	flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
1437			  stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
1438
1439	return 0;
1440}
1441
1442static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
1443				     struct flow_cls_offload *cls_flower)
1444{
1445	switch (cls_flower->command) {
1446	case FLOW_CLS_REPLACE:
1447		return enetc_config_clsflower(priv, cls_flower);
1448	case FLOW_CLS_DESTROY:
1449		return enetc_destroy_clsflower(priv, cls_flower);
1450	case FLOW_CLS_STATS:
1451		return enetc_psfp_get_stats(priv, cls_flower);
1452	default:
1453		return -EOPNOTSUPP;
1454	}
1455}
1456
1457static inline void clean_psfp_sfi_bitmap(void)
1458{
1459	bitmap_free(epsfp.psfp_sfi_bitmap);
1460	epsfp.psfp_sfi_bitmap = NULL;
1461}
1462
1463static void clean_stream_list(void)
1464{
1465	struct enetc_stream_filter *s;
1466	struct hlist_node *tmp;
1467
1468	hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
1469		hlist_del(&s->node);
1470		kfree(s);
1471	}
1472}
1473
1474static void clean_sfi_list(void)
1475{
1476	struct enetc_psfp_filter *sfi;
1477	struct hlist_node *tmp;
1478
1479	hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
1480		hlist_del(&sfi->node);
1481		kfree(sfi);
1482	}
1483}
1484
1485static void clean_sgi_list(void)
1486{
1487	struct enetc_psfp_gate *sgi;
1488	struct hlist_node *tmp;
1489
1490	hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
1491		hlist_del(&sgi->node);
1492		kfree(sgi);
1493	}
1494}
1495
1496static void clean_psfp_all(void)
1497{
1498	/* Disable all list nodes and free all memory */
1499	clean_sfi_list();
1500	clean_sgi_list();
1501	clean_stream_list();
1502	epsfp.dev_bitmap = 0;
1503	clean_psfp_sfi_bitmap();
1504}
1505
1506int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1507			    void *cb_priv)
1508{
1509	struct net_device *ndev = cb_priv;
1510
1511	if (!tc_can_offload(ndev))
1512		return -EOPNOTSUPP;
1513
1514	switch (type) {
1515	case TC_SETUP_CLSFLOWER:
1516		return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
1517	default:
1518		return -EOPNOTSUPP;
1519	}
1520}
1521
1522int enetc_set_psfp(struct net_device *ndev, bool en)
1523{
1524	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1525	int err;
1526
1527	if (en) {
1528		err = enetc_psfp_enable(priv);
1529		if (err)
1530			return err;
1531
1532		priv->active_offloads |= ENETC_F_QCI;
1533		return 0;
1534	}
1535
1536	err = enetc_psfp_disable(priv);
1537	if (err)
1538		return err;
1539
1540	priv->active_offloads &= ~ENETC_F_QCI;
1541
1542	return 0;
1543}
1544
1545int enetc_psfp_init(struct enetc_ndev_priv *priv)
1546{
1547	if (epsfp.psfp_sfi_bitmap)
1548		return 0;
1549
1550	epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
1551					      GFP_KERNEL);
1552	if (!epsfp.psfp_sfi_bitmap)
1553		return -ENOMEM;
1554
1555	spin_lock_init(&epsfp.psfp_lock);
1556
1557	if (list_empty(&enetc_block_cb_list))
1558		epsfp.dev_bitmap = 0;
1559
1560	return 0;
1561}
1562
1563int enetc_psfp_clean(struct enetc_ndev_priv *priv)
1564{
1565	if (!list_empty(&enetc_block_cb_list))
1566		return -EBUSY;
1567
1568	clean_psfp_all();
1569
1570	return 0;
1571}
1572
1573int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
1574{
1575	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1576	struct flow_block_offload *f = type_data;
1577	int port, err;
1578
1579	err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
1580					 enetc_setup_tc_block_cb,
1581					 ndev, ndev, true);
1582	if (err)
1583		return err;
1584
1585	switch (f->command) {
1586	case FLOW_BLOCK_BIND:
1587		port = enetc_pf_to_port(priv->si->pdev);
1588		if (port < 0)
1589			return -EINVAL;
1590
1591		set_bit(port, &epsfp.dev_bitmap);
1592		break;
1593	case FLOW_BLOCK_UNBIND:
1594		port = enetc_pf_to_port(priv->si->pdev);
1595		if (port < 0)
1596			return -EINVAL;
1597
1598		clear_bit(port, &epsfp.dev_bitmap);
1599		if (!epsfp.dev_bitmap)
1600			clean_psfp_all();
1601		break;
1602	}
1603
1604	return 0;
1605}
1606
1607int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
1608{
1609	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1610	struct tc_query_caps_base *base = type_data;
1611	struct enetc_si *si = priv->si;
1612
1613	switch (base->type) {
 
 
 
 
 
 
 
1614	case TC_SETUP_QDISC_TAPRIO: {
1615		struct tc_taprio_caps *caps = base->caps;
1616
1617		if (si->hw_features & ENETC_SI_F_QBV)
1618			caps->supports_queue_max_sdu = true;
1619
1620		return 0;
1621	}
1622	default:
1623		return -EOPNOTSUPP;
1624	}
1625}