Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2/*
   3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
   4 * stmmac TC Handling (HW only)
   5 */
   6
   7#include <net/pkt_cls.h>
   8#include <net/tc_act/tc_gact.h>
   9#include "common.h"
  10#include "dwmac4.h"
  11#include "dwmac5.h"
  12#include "stmmac.h"
  13
  14static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
  15{
  16	memset(entry, 0, sizeof(*entry));
  17	entry->in_use = true;
  18	entry->is_last = true;
  19	entry->is_frag = false;
  20	entry->prio = ~0x0;
  21	entry->handle = 0;
  22	entry->val.match_data = 0x0;
  23	entry->val.match_en = 0x0;
  24	entry->val.af = 1;
  25	entry->val.dma_ch_no = 0x0;
  26}
  27
  28static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
  29					     struct tc_cls_u32_offload *cls,
  30					     bool free)
  31{
  32	struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
  33	u32 loc = cls->knode.handle;
  34	int i;
  35
  36	for (i = 0; i < priv->tc_entries_max; i++) {
  37		entry = &priv->tc_entries[i];
  38		if (!entry->in_use && !first && free)
  39			first = entry;
  40		if ((entry->handle == loc) && !free && !entry->is_frag)
  41			dup = entry;
  42	}
  43
  44	if (dup)
  45		return dup;
  46	if (first) {
  47		first->handle = loc;
  48		first->in_use = true;
  49
  50		/* Reset HW values */
  51		memset(&first->val, 0, sizeof(first->val));
  52	}
  53
  54	return first;
  55}
  56
  57static int tc_fill_actions(struct stmmac_tc_entry *entry,
  58			   struct stmmac_tc_entry *frag,
  59			   struct tc_cls_u32_offload *cls)
  60{
  61	struct stmmac_tc_entry *action_entry = entry;
  62	const struct tc_action *act;
  63	struct tcf_exts *exts;
  64	int i;
  65
  66	exts = cls->knode.exts;
  67	if (!tcf_exts_has_actions(exts))
  68		return -EINVAL;
  69	if (frag)
  70		action_entry = frag;
  71
  72	tcf_exts_for_each_action(i, act, exts) {
  73		/* Accept */
  74		if (is_tcf_gact_ok(act)) {
  75			action_entry->val.af = 1;
  76			break;
  77		}
  78		/* Drop */
  79		if (is_tcf_gact_shot(act)) {
  80			action_entry->val.rf = 1;
  81			break;
  82		}
  83
  84		/* Unsupported */
  85		return -EINVAL;
  86	}
  87
  88	return 0;
  89}
  90
  91static int tc_fill_entry(struct stmmac_priv *priv,
  92			 struct tc_cls_u32_offload *cls)
  93{
  94	struct stmmac_tc_entry *entry, *frag = NULL;
  95	struct tc_u32_sel *sel = cls->knode.sel;
  96	u32 off, data, mask, real_off, rem;
  97	u32 prio = cls->common.prio << 16;
  98	int ret;
  99
 100	/* Only 1 match per entry */
 101	if (sel->nkeys <= 0 || sel->nkeys > 1)
 102		return -EINVAL;
 103
 104	off = sel->keys[0].off << sel->offshift;
 105	data = sel->keys[0].val;
 106	mask = sel->keys[0].mask;
 107
 108	switch (ntohs(cls->common.protocol)) {
 109	case ETH_P_ALL:
 110		break;
 111	case ETH_P_IP:
 112		off += ETH_HLEN;
 113		break;
 114	default:
 115		return -EINVAL;
 116	}
 117
 118	if (off > priv->tc_off_max)
 119		return -EINVAL;
 120
 121	real_off = off / 4;
 122	rem = off % 4;
 123
 124	entry = tc_find_entry(priv, cls, true);
 125	if (!entry)
 126		return -EINVAL;
 127
 128	if (rem) {
 129		frag = tc_find_entry(priv, cls, true);
 130		if (!frag) {
 131			ret = -EINVAL;
 132			goto err_unuse;
 133		}
 134
 135		entry->frag_ptr = frag;
 136		entry->val.match_en = (mask << (rem * 8)) &
 137			GENMASK(31, rem * 8);
 138		entry->val.match_data = (data << (rem * 8)) &
 139			GENMASK(31, rem * 8);
 140		entry->val.frame_offset = real_off;
 141		entry->prio = prio;
 142
 143		frag->val.match_en = (mask >> (rem * 8)) &
 144			GENMASK(rem * 8 - 1, 0);
 145		frag->val.match_data = (data >> (rem * 8)) &
 146			GENMASK(rem * 8 - 1, 0);
 147		frag->val.frame_offset = real_off + 1;
 148		frag->prio = prio;
 149		frag->is_frag = true;
 150	} else {
 151		entry->frag_ptr = NULL;
 152		entry->val.match_en = mask;
 153		entry->val.match_data = data;
 154		entry->val.frame_offset = real_off;
 155		entry->prio = prio;
 156	}
 157
 158	ret = tc_fill_actions(entry, frag, cls);
 159	if (ret)
 160		goto err_unuse;
 161
 162	return 0;
 163
 164err_unuse:
 165	if (frag)
 166		frag->in_use = false;
 167	entry->in_use = false;
 168	return ret;
 169}
 170
 171static void tc_unfill_entry(struct stmmac_priv *priv,
 172			    struct tc_cls_u32_offload *cls)
 173{
 174	struct stmmac_tc_entry *entry;
 175
 176	entry = tc_find_entry(priv, cls, false);
 177	if (!entry)
 178		return;
 179
 180	entry->in_use = false;
 181	if (entry->frag_ptr) {
 182		entry = entry->frag_ptr;
 183		entry->is_frag = false;
 184		entry->in_use = false;
 185	}
 186}
 187
 188static int tc_config_knode(struct stmmac_priv *priv,
 189			   struct tc_cls_u32_offload *cls)
 190{
 191	int ret;
 192
 193	ret = tc_fill_entry(priv, cls);
 194	if (ret)
 195		return ret;
 196
 197	ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
 198			priv->tc_entries_max);
 199	if (ret)
 200		goto err_unfill;
 201
 202	return 0;
 203
 204err_unfill:
 205	tc_unfill_entry(priv, cls);
 206	return ret;
 207}
 208
 209static int tc_delete_knode(struct stmmac_priv *priv,
 210			   struct tc_cls_u32_offload *cls)
 211{
 212	/* Set entry and fragments as not used */
 213	tc_unfill_entry(priv, cls);
 214
 215	return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
 216				 priv->tc_entries_max);
 217}
 218
 219static int tc_setup_cls_u32(struct stmmac_priv *priv,
 220			    struct tc_cls_u32_offload *cls)
 221{
 222	switch (cls->command) {
 223	case TC_CLSU32_REPLACE_KNODE:
 224		tc_unfill_entry(priv, cls);
 225		fallthrough;
 226	case TC_CLSU32_NEW_KNODE:
 227		return tc_config_knode(priv, cls);
 228	case TC_CLSU32_DELETE_KNODE:
 229		return tc_delete_knode(priv, cls);
 230	default:
 231		return -EOPNOTSUPP;
 232	}
 233}
 234
 235static int tc_rfs_init(struct stmmac_priv *priv)
 236{
 237	int i;
 238
 239	priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
 240	priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
 241	priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
 242
 243	for (i = 0; i < STMMAC_RFS_T_MAX; i++)
 244		priv->rfs_entries_total += priv->rfs_entries_max[i];
 245
 246	priv->rfs_entries = devm_kcalloc(priv->device,
 247					 priv->rfs_entries_total,
 248					 sizeof(*priv->rfs_entries),
 249					 GFP_KERNEL);
 250	if (!priv->rfs_entries)
 251		return -ENOMEM;
 252
 253	dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
 254		 priv->rfs_entries_total);
 255
 256	return 0;
 257}
 258
 259static int tc_init(struct stmmac_priv *priv)
 260{
 261	struct dma_features *dma_cap = &priv->dma_cap;
 262	unsigned int count;
 263	int ret, i;
 264
 265	if (dma_cap->l3l4fnum) {
 266		priv->flow_entries_max = dma_cap->l3l4fnum;
 267		priv->flow_entries = devm_kcalloc(priv->device,
 268						  dma_cap->l3l4fnum,
 269						  sizeof(*priv->flow_entries),
 270						  GFP_KERNEL);
 271		if (!priv->flow_entries)
 272			return -ENOMEM;
 273
 274		for (i = 0; i < priv->flow_entries_max; i++)
 275			priv->flow_entries[i].idx = i;
 276
 277		dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
 278			 priv->flow_entries_max);
 279	}
 280
 281	ret = tc_rfs_init(priv);
 282	if (ret)
 283		return -ENOMEM;
 284
 
 
 
 
 
 
 
 
 
 
 285	/* Fail silently as we can still use remaining features, e.g. CBS */
 286	if (!dma_cap->frpsel)
 287		return 0;
 288
 289	switch (dma_cap->frpbs) {
 290	case 0x0:
 291		priv->tc_off_max = 64;
 292		break;
 293	case 0x1:
 294		priv->tc_off_max = 128;
 295		break;
 296	case 0x2:
 297		priv->tc_off_max = 256;
 298		break;
 299	default:
 300		return -EINVAL;
 301	}
 302
 303	switch (dma_cap->frpes) {
 304	case 0x0:
 305		count = 64;
 306		break;
 307	case 0x1:
 308		count = 128;
 309		break;
 310	case 0x2:
 311		count = 256;
 312		break;
 313	default:
 314		return -EINVAL;
 315	}
 316
 317	/* Reserve one last filter which lets all pass */
 318	priv->tc_entries_max = count;
 319	priv->tc_entries = devm_kcalloc(priv->device,
 320			count, sizeof(*priv->tc_entries), GFP_KERNEL);
 321	if (!priv->tc_entries)
 322		return -ENOMEM;
 323
 324	tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
 325
 326	dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
 327			priv->tc_entries_max, priv->tc_off_max);
 328
 329	return 0;
 330}
 331
 332static int tc_setup_cbs(struct stmmac_priv *priv,
 333			struct tc_cbs_qopt_offload *qopt)
 334{
 335	u32 tx_queues_count = priv->plat->tx_queues_to_use;
 336	s64 port_transmit_rate_kbps;
 337	u32 queue = qopt->queue;
 
 338	u32 mode_to_use;
 339	u64 value;
 340	u32 ptr;
 341	int ret;
 342
 343	/* Queue 0 is not AVB capable */
 344	if (queue <= 0 || queue >= tx_queues_count)
 345		return -EINVAL;
 346	if (!priv->dma_cap.av)
 347		return -EOPNOTSUPP;
 348
 349	port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
 350
 351	if (qopt->enable) {
 352		/* Port Transmit Rate and Speed Divider */
 353		switch (div_s64(port_transmit_rate_kbps, 1000)) {
 354		case SPEED_10000:
 355		case SPEED_5000:
 356			ptr = 32;
 357			break;
 358		case SPEED_2500:
 359		case SPEED_1000:
 360			ptr = 8;
 361			break;
 362		case SPEED_100:
 363			ptr = 4;
 364			break;
 365		default:
 366			netdev_err(priv->dev,
 367				   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
 368				   port_transmit_rate_kbps);
 369			return -EINVAL;
 370		}
 371	} else {
 372		ptr = 0;
 373	}
 374
 375	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
 376	if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
 377		ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
 378		if (ret)
 379			return ret;
 380
 381		priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
 382	} else if (!qopt->enable) {
 383		ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
 384				       MTL_QUEUE_DCB);
 385		if (ret)
 386			return ret;
 387
 388		priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
 389		return 0;
 390	}
 391
 392	/* Final adjustments for HW */
 393	value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
 394	priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
 395
 396	value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
 397	priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
 398
 399	value = qopt->hicredit * 1024ll * 8;
 400	priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
 401
 402	value = qopt->locredit * 1024ll * 8;
 403	priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
 404
 405	ret = stmmac_config_cbs(priv, priv->hw,
 406				priv->plat->tx_queues_cfg[queue].send_slope,
 407				priv->plat->tx_queues_cfg[queue].idle_slope,
 408				priv->plat->tx_queues_cfg[queue].high_credit,
 409				priv->plat->tx_queues_cfg[queue].low_credit,
 410				queue);
 411	if (ret)
 412		return ret;
 413
 414	dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
 415			queue, qopt->sendslope, qopt->idleslope,
 416			qopt->hicredit, qopt->locredit);
 417	return 0;
 418}
 419
 420static int tc_parse_flow_actions(struct stmmac_priv *priv,
 421				 struct flow_action *action,
 422				 struct stmmac_flow_entry *entry,
 423				 struct netlink_ext_ack *extack)
 424{
 425	struct flow_action_entry *act;
 426	int i;
 427
 428	if (!flow_action_has_entries(action))
 429		return -EINVAL;
 430
 431	if (!flow_action_basic_hw_stats_check(action, extack))
 432		return -EOPNOTSUPP;
 433
 434	flow_action_for_each(i, act, action) {
 435		switch (act->id) {
 436		case FLOW_ACTION_DROP:
 437			entry->action |= STMMAC_FLOW_ACTION_DROP;
 438			return 0;
 439		default:
 440			break;
 441		}
 442	}
 443
 444	/* Nothing to do, maybe inverse filter ? */
 445	return 0;
 446}
 447
 448#define ETHER_TYPE_FULL_MASK	cpu_to_be16(~0)
 449
 450static int tc_add_basic_flow(struct stmmac_priv *priv,
 451			     struct flow_cls_offload *cls,
 452			     struct stmmac_flow_entry *entry)
 453{
 454	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 455	struct flow_dissector *dissector = rule->match.dissector;
 456	struct flow_match_basic match;
 457
 458	/* Nothing to do here */
 459	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
 460		return -EINVAL;
 461
 462	flow_rule_match_basic(rule, &match);
 463
 464	entry->ip_proto = match.key->ip_proto;
 465	return 0;
 466}
 467
 468static int tc_add_ip4_flow(struct stmmac_priv *priv,
 469			   struct flow_cls_offload *cls,
 470			   struct stmmac_flow_entry *entry)
 471{
 472	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 473	struct flow_dissector *dissector = rule->match.dissector;
 474	bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
 475	struct flow_match_ipv4_addrs match;
 476	u32 hw_match;
 477	int ret;
 478
 479	/* Nothing to do here */
 480	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
 481		return -EINVAL;
 482
 483	flow_rule_match_ipv4_addrs(rule, &match);
 484	hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
 485	if (hw_match) {
 486		ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
 487					      false, true, inv, hw_match);
 488		if (ret)
 489			return ret;
 490	}
 491
 492	hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
 493	if (hw_match) {
 494		ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
 495					      false, false, inv, hw_match);
 496		if (ret)
 497			return ret;
 498	}
 499
 500	return 0;
 501}
 502
 503static int tc_add_ports_flow(struct stmmac_priv *priv,
 504			     struct flow_cls_offload *cls,
 505			     struct stmmac_flow_entry *entry)
 506{
 507	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 508	struct flow_dissector *dissector = rule->match.dissector;
 509	bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
 510	struct flow_match_ports match;
 511	u32 hw_match;
 512	bool is_udp;
 513	int ret;
 514
 515	/* Nothing to do here */
 516	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
 517		return -EINVAL;
 518
 519	switch (entry->ip_proto) {
 520	case IPPROTO_TCP:
 521		is_udp = false;
 522		break;
 523	case IPPROTO_UDP:
 524		is_udp = true;
 525		break;
 526	default:
 527		return -EINVAL;
 528	}
 529
 530	flow_rule_match_ports(rule, &match);
 531
 532	hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
 533	if (hw_match) {
 534		ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
 535					      is_udp, true, inv, hw_match);
 536		if (ret)
 537			return ret;
 538	}
 539
 540	hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
 541	if (hw_match) {
 542		ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
 543					      is_udp, false, inv, hw_match);
 544		if (ret)
 545			return ret;
 546	}
 547
 548	entry->is_l4 = true;
 549	return 0;
 550}
 551
 552static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
 553					      struct flow_cls_offload *cls,
 554					      bool get_free)
 555{
 556	int i;
 557
 558	for (i = 0; i < priv->flow_entries_max; i++) {
 559		struct stmmac_flow_entry *entry = &priv->flow_entries[i];
 560
 561		if (entry->cookie == cls->cookie)
 562			return entry;
 563		if (get_free && (entry->in_use == false))
 564			return entry;
 565	}
 566
 567	return NULL;
 568}
 569
 570static struct {
 571	int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
 572		  struct stmmac_flow_entry *entry);
 573} tc_flow_parsers[] = {
 574	{ .fn = tc_add_basic_flow },
 575	{ .fn = tc_add_ip4_flow },
 576	{ .fn = tc_add_ports_flow },
 577};
 578
 579static int tc_add_flow(struct stmmac_priv *priv,
 580		       struct flow_cls_offload *cls)
 581{
 582	struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
 583	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 584	int i, ret;
 585
 586	if (!entry) {
 587		entry = tc_find_flow(priv, cls, true);
 588		if (!entry)
 589			return -ENOENT;
 590	}
 591
 592	ret = tc_parse_flow_actions(priv, &rule->action, entry,
 593				    cls->common.extack);
 594	if (ret)
 595		return ret;
 596
 597	for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
 598		ret = tc_flow_parsers[i].fn(priv, cls, entry);
 599		if (!ret)
 600			entry->in_use = true;
 601	}
 602
 603	if (!entry->in_use)
 604		return -EINVAL;
 605
 606	entry->cookie = cls->cookie;
 607	return 0;
 608}
 609
 610static int tc_del_flow(struct stmmac_priv *priv,
 611		       struct flow_cls_offload *cls)
 612{
 613	struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
 614	int ret;
 615
 616	if (!entry || !entry->in_use)
 617		return -ENOENT;
 618
 619	if (entry->is_l4) {
 620		ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
 621					      false, false, false, 0);
 622	} else {
 623		ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
 624					      false, false, false, 0);
 625	}
 626
 627	entry->in_use = false;
 628	entry->cookie = 0;
 629	entry->is_l4 = false;
 630	return ret;
 631}
 632
 633static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
 634					    struct flow_cls_offload *cls,
 635					    bool get_free)
 636{
 637	int i;
 638
 639	for (i = 0; i < priv->rfs_entries_total; i++) {
 640		struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
 641
 642		if (entry->cookie == cls->cookie)
 643			return entry;
 644		if (get_free && entry->in_use == false)
 645			return entry;
 646	}
 647
 648	return NULL;
 649}
 650
 651#define VLAN_PRIO_FULL_MASK (0x07)
 652
 653static int tc_add_vlan_flow(struct stmmac_priv *priv,
 654			    struct flow_cls_offload *cls)
 655{
 656	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 657	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 658	struct flow_dissector *dissector = rule->match.dissector;
 659	int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
 660	struct flow_match_vlan match;
 661
 662	if (!entry) {
 663		entry = tc_find_rfs(priv, cls, true);
 664		if (!entry)
 665			return -ENOENT;
 666	}
 667
 668	if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
 669	    priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
 670		return -ENOENT;
 671
 672	/* Nothing to do here */
 673	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
 674		return -EINVAL;
 675
 676	if (tc < 0) {
 677		netdev_err(priv->dev, "Invalid traffic class\n");
 678		return -EINVAL;
 679	}
 680
 681	flow_rule_match_vlan(rule, &match);
 682
 683	if (match.mask->vlan_priority) {
 684		u32 prio;
 685
 686		if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
 687			netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
 688			return -EINVAL;
 689		}
 690
 691		prio = BIT(match.key->vlan_priority);
 692		stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
 693
 694		entry->in_use = true;
 695		entry->cookie = cls->cookie;
 696		entry->tc = tc;
 697		entry->type = STMMAC_RFS_T_VLAN;
 698		priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
 699	}
 700
 701	return 0;
 702}
 703
 704static int tc_del_vlan_flow(struct stmmac_priv *priv,
 705			    struct flow_cls_offload *cls)
 706{
 707	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 708
 709	if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
 710		return -ENOENT;
 711
 712	stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
 713
 714	entry->in_use = false;
 715	entry->cookie = 0;
 716	entry->tc = 0;
 717	entry->type = 0;
 718
 719	priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
 720
 721	return 0;
 722}
 723
 724static int tc_add_ethtype_flow(struct stmmac_priv *priv,
 725			       struct flow_cls_offload *cls)
 726{
 727	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 728	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 729	struct flow_dissector *dissector = rule->match.dissector;
 730	int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
 731	struct flow_match_basic match;
 732
 733	if (!entry) {
 734		entry = tc_find_rfs(priv, cls, true);
 735		if (!entry)
 736			return -ENOENT;
 737	}
 738
 739	/* Nothing to do here */
 740	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
 741		return -EINVAL;
 742
 743	if (tc < 0) {
 744		netdev_err(priv->dev, "Invalid traffic class\n");
 745		return -EINVAL;
 746	}
 747
 748	flow_rule_match_basic(rule, &match);
 749
 750	if (match.mask->n_proto) {
 751		u16 etype = ntohs(match.key->n_proto);
 752
 753		if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
 754			netdev_err(priv->dev, "Only full mask is supported for EthType filter");
 755			return -EINVAL;
 756		}
 757		switch (etype) {
 758		case ETH_P_LLDP:
 759			if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
 760			    priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
 761				return -ENOENT;
 762
 763			entry->type = STMMAC_RFS_T_LLDP;
 764			priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
 765
 766			stmmac_rx_queue_routing(priv, priv->hw,
 767						PACKET_DCBCPQ, tc);
 768			break;
 769		case ETH_P_1588:
 770			if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
 771			    priv->rfs_entries_max[STMMAC_RFS_T_1588])
 772				return -ENOENT;
 773
 774			entry->type = STMMAC_RFS_T_1588;
 775			priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
 776
 777			stmmac_rx_queue_routing(priv, priv->hw,
 778						PACKET_PTPQ, tc);
 779			break;
 780		default:
 781			netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
 782			return -EINVAL;
 783		}
 784
 785		entry->in_use = true;
 786		entry->cookie = cls->cookie;
 787		entry->tc = tc;
 788		entry->etype = etype;
 789
 790		return 0;
 791	}
 792
 793	return -EINVAL;
 794}
 795
 796static int tc_del_ethtype_flow(struct stmmac_priv *priv,
 797			       struct flow_cls_offload *cls)
 798{
 799	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 800
 801	if (!entry || !entry->in_use ||
 802	    entry->type < STMMAC_RFS_T_LLDP ||
 803	    entry->type > STMMAC_RFS_T_1588)
 804		return -ENOENT;
 805
 806	switch (entry->etype) {
 807	case ETH_P_LLDP:
 808		stmmac_rx_queue_routing(priv, priv->hw,
 809					PACKET_DCBCPQ, 0);
 810		priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
 811		break;
 812	case ETH_P_1588:
 813		stmmac_rx_queue_routing(priv, priv->hw,
 814					PACKET_PTPQ, 0);
 815		priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
 816		break;
 817	default:
 818		netdev_err(priv->dev, "EthType(0x%x) is not supported",
 819			   entry->etype);
 820		return -EINVAL;
 821	}
 822
 823	entry->in_use = false;
 824	entry->cookie = 0;
 825	entry->tc = 0;
 826	entry->etype = 0;
 827	entry->type = 0;
 828
 829	return 0;
 830}
 831
 832static int tc_add_flow_cls(struct stmmac_priv *priv,
 833			   struct flow_cls_offload *cls)
 834{
 835	int ret;
 836
 837	ret = tc_add_flow(priv, cls);
 838	if (!ret)
 839		return ret;
 840
 841	ret = tc_add_ethtype_flow(priv, cls);
 842	if (!ret)
 843		return ret;
 844
 845	return tc_add_vlan_flow(priv, cls);
 846}
 847
 848static int tc_del_flow_cls(struct stmmac_priv *priv,
 849			   struct flow_cls_offload *cls)
 850{
 851	int ret;
 852
 853	ret = tc_del_flow(priv, cls);
 854	if (!ret)
 855		return ret;
 856
 857	ret = tc_del_ethtype_flow(priv, cls);
 858	if (!ret)
 859		return ret;
 860
 861	return tc_del_vlan_flow(priv, cls);
 862}
 863
 864static int tc_setup_cls(struct stmmac_priv *priv,
 865			struct flow_cls_offload *cls)
 866{
 867	int ret = 0;
 868
 869	/* When RSS is enabled, the filtering will be bypassed */
 870	if (priv->rss.enable)
 871		return -EBUSY;
 872
 873	switch (cls->command) {
 874	case FLOW_CLS_REPLACE:
 875		ret = tc_add_flow_cls(priv, cls);
 876		break;
 877	case FLOW_CLS_DESTROY:
 878		ret = tc_del_flow_cls(priv, cls);
 879		break;
 880	default:
 881		return -EOPNOTSUPP;
 882	}
 883
 884	return ret;
 885}
 886
 887struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
 888					   ktime_t current_time,
 889					   u64 cycle_time)
 890{
 891	struct timespec64 time;
 892
 893	if (ktime_after(old_base_time, current_time)) {
 894		time = ktime_to_timespec64(old_base_time);
 895	} else {
 896		s64 n;
 897		ktime_t base_time;
 898
 899		n = div64_s64(ktime_sub_ns(current_time, old_base_time),
 900			      cycle_time);
 901		base_time = ktime_add_ns(old_base_time,
 902					 (n + 1) * cycle_time);
 903
 904		time = ktime_to_timespec64(base_time);
 905	}
 906
 907	return time;
 908}
 909
 910static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
 911				     struct tc_taprio_qopt_offload *qopt)
 912{
 913	u32 num_tc = qopt->mqprio.qopt.num_tc;
 914	u32 offset, count, i, j;
 915
 916	/* QueueMaxSDU received from the driver corresponds to the Linux traffic
 917	 * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
 918	 */
 919	for (i = 0; i < num_tc; i++) {
 920		if (!qopt->max_sdu[i])
 921			continue;
 922
 923		offset = qopt->mqprio.qopt.offset[i];
 924		count = qopt->mqprio.qopt.count[i];
 925
 926		for (j = offset; j < offset + count; j++)
 927			priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
 928	}
 929}
 930
 931static int tc_taprio_configure(struct stmmac_priv *priv,
 932			       struct tc_taprio_qopt_offload *qopt)
 933{
 934	u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
 935	struct netlink_ext_ack *extack = qopt->mqprio.extack;
 936	struct timespec64 time, current_time, qopt_time;
 937	ktime_t current_time_ns;
 
 938	int i, ret = 0;
 939	u64 ctr;
 940
 941	if (qopt->base_time < 0)
 942		return -ERANGE;
 943
 944	if (!priv->dma_cap.estsel)
 945		return -EOPNOTSUPP;
 946
 947	switch (wid) {
 948	case 0x1:
 949		wid = 16;
 950		break;
 951	case 0x2:
 952		wid = 20;
 953		break;
 954	case 0x3:
 955		wid = 24;
 956		break;
 957	default:
 958		return -EOPNOTSUPP;
 959	}
 960
 961	switch (dep) {
 962	case 0x1:
 963		dep = 64;
 964		break;
 965	case 0x2:
 966		dep = 128;
 967		break;
 968	case 0x3:
 969		dep = 256;
 970		break;
 971	case 0x4:
 972		dep = 512;
 973		break;
 974	case 0x5:
 975		dep = 1024;
 976		break;
 977	default:
 978		return -EOPNOTSUPP;
 979	}
 980
 981	if (qopt->cmd == TAPRIO_CMD_DESTROY)
 982		goto disable;
 
 
 983
 984	if (qopt->num_entries >= dep)
 985		return -EINVAL;
 986	if (!qopt->cycle_time)
 987		return -ERANGE;
 988	if (qopt->cycle_time_extension >= BIT(wid + 7))
 989		return -ERANGE;
 990
 991	if (!priv->est) {
 992		priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
 993					 GFP_KERNEL);
 994		if (!priv->est)
 995			return -ENOMEM;
 996
 997		mutex_init(&priv->est_lock);
 998	} else {
 999		mutex_lock(&priv->est_lock);
1000		memset(priv->est, 0, sizeof(*priv->est));
1001		mutex_unlock(&priv->est_lock);
1002	}
1003
1004	size = qopt->num_entries;
1005
1006	mutex_lock(&priv->est_lock);
1007	priv->est->gcl_size = size;
1008	priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
1009	mutex_unlock(&priv->est_lock);
1010
1011	for (i = 0; i < size; i++) {
1012		s64 delta_ns = qopt->entries[i].interval;
1013		u32 gates = qopt->entries[i].gate_mask;
1014
1015		if (delta_ns > GENMASK(wid, 0))
1016			return -ERANGE;
1017		if (gates > GENMASK(31 - wid, 0))
1018			return -ERANGE;
1019
1020		switch (qopt->entries[i].command) {
1021		case TC_TAPRIO_CMD_SET_GATES:
 
 
1022			break;
1023		case TC_TAPRIO_CMD_SET_AND_HOLD:
1024			gates |= BIT(0);
 
1025			break;
1026		case TC_TAPRIO_CMD_SET_AND_RELEASE:
1027			gates &= ~BIT(0);
 
1028			break;
1029		default:
1030			return -EOPNOTSUPP;
1031		}
1032
1033		priv->est->gcl[i] = delta_ns | (gates << wid);
1034	}
1035
1036	mutex_lock(&priv->est_lock);
1037	/* Adjust for real system time */
1038	priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
1039	current_time_ns = timespec64_to_ktime(current_time);
1040	time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
1041					qopt->cycle_time);
1042
1043	priv->est->btr[0] = (u32)time.tv_nsec;
1044	priv->est->btr[1] = (u32)time.tv_sec;
1045
1046	qopt_time = ktime_to_timespec64(qopt->base_time);
1047	priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
1048	priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
1049
1050	ctr = qopt->cycle_time;
1051	priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
1052	priv->est->ctr[1] = (u32)ctr;
1053
1054	priv->est->ter = qopt->cycle_time_extension;
1055
1056	tc_taprio_map_maxsdu_txq(priv, qopt);
 
 
 
 
 
 
 
 
1057
1058	ret = stmmac_est_configure(priv, priv, priv->est,
1059				   priv->plat->clk_ptp_rate);
1060	mutex_unlock(&priv->est_lock);
1061	if (ret) {
1062		netdev_err(priv->dev, "failed to configure EST\n");
1063		goto disable;
1064	}
1065
1066	ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
1067					      qopt->mqprio.preemptible_tcs);
1068	if (ret)
1069		goto disable;
 
 
1070
1071	return 0;
1072
1073disable:
1074	if (priv->est) {
1075		mutex_lock(&priv->est_lock);
1076		priv->est->enable = false;
1077		stmmac_est_configure(priv, priv, priv->est,
1078				     priv->plat->clk_ptp_rate);
1079		/* Reset taprio status */
1080		for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
1081			priv->xstats.max_sdu_txq_drop[i] = 0;
1082			priv->xstats.mtl_est_txq_hlbf[i] = 0;
1083		}
1084		mutex_unlock(&priv->est_lock);
1085	}
1086
1087	stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
1088
1089	return ret;
1090}
1091
1092static void tc_taprio_stats(struct stmmac_priv *priv,
1093			    struct tc_taprio_qopt_offload *qopt)
1094{
1095	u64 window_drops = 0;
1096	int i = 0;
1097
1098	for (i = 0; i < priv->plat->tx_queues_to_use; i++)
1099		window_drops += priv->xstats.max_sdu_txq_drop[i] +
1100				priv->xstats.mtl_est_txq_hlbf[i];
1101	qopt->stats.window_drops = window_drops;
1102
1103	/* Transmission overrun doesn't happen for stmmac, hence always 0 */
1104	qopt->stats.tx_overruns = 0;
1105}
1106
1107static void tc_taprio_queue_stats(struct stmmac_priv *priv,
1108				  struct tc_taprio_qopt_offload *qopt)
1109{
1110	struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
1111	int queue = qopt->queue_stats.queue;
1112
1113	q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
1114				      priv->xstats.mtl_est_txq_hlbf[queue];
1115
1116	/* Transmission overrun doesn't happen for stmmac, hence always 0 */
1117	q_stats->stats.tx_overruns = 0;
1118}
1119
1120static int tc_setup_taprio(struct stmmac_priv *priv,
1121			   struct tc_taprio_qopt_offload *qopt)
1122{
1123	int err = 0;
1124
1125	switch (qopt->cmd) {
1126	case TAPRIO_CMD_REPLACE:
1127	case TAPRIO_CMD_DESTROY:
1128		err = tc_taprio_configure(priv, qopt);
1129		break;
1130	case TAPRIO_CMD_STATS:
1131		tc_taprio_stats(priv, qopt);
1132		break;
1133	case TAPRIO_CMD_QUEUE_STATS:
1134		tc_taprio_queue_stats(priv, qopt);
1135		break;
1136	default:
1137		err = -EOPNOTSUPP;
1138	}
1139
1140	return err;
1141}
1142
1143static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
1144				       struct tc_taprio_qopt_offload *qopt)
1145{
1146	if (!qopt->mqprio.preemptible_tcs)
1147		return tc_setup_taprio(priv, qopt);
1148
1149	NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
1150			   "taprio with FPE is not implemented for this MAC");
1151
1152	return -EOPNOTSUPP;
1153}
1154
1155static int tc_setup_etf(struct stmmac_priv *priv,
1156			struct tc_etf_qopt_offload *qopt)
1157{
1158	if (!priv->dma_cap.tbssel)
1159		return -EOPNOTSUPP;
1160	if (qopt->queue >= priv->plat->tx_queues_to_use)
1161		return -EINVAL;
1162	if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
1163		return -EINVAL;
1164
1165	if (qopt->enable)
1166		priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
1167	else
1168		priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
1169
1170	netdev_info(priv->dev, "%s ETF for Queue %d\n",
1171		    qopt->enable ? "enabled" : "disabled", qopt->queue);
1172	return 0;
1173}
1174
1175static int tc_query_caps(struct stmmac_priv *priv,
1176			 struct tc_query_caps_base *base)
1177{
1178	switch (base->type) {
1179	case TC_SETUP_QDISC_MQPRIO: {
1180		struct tc_mqprio_caps *caps = base->caps;
1181
1182		caps->validate_queue_counts = true;
1183
1184		return 0;
1185	}
1186	case TC_SETUP_QDISC_TAPRIO: {
1187		struct tc_taprio_caps *caps = base->caps;
1188
1189		if (!priv->dma_cap.estsel)
1190			return -EOPNOTSUPP;
1191
1192		caps->gate_mask_per_txq = true;
1193		caps->supports_queue_max_sdu = true;
1194
1195		return 0;
1196	}
1197	default:
1198		return -EOPNOTSUPP;
1199	}
1200}
1201
1202static void stmmac_reset_tc_mqprio(struct net_device *ndev,
1203				   struct netlink_ext_ack *extack)
1204{
1205	struct stmmac_priv *priv = netdev_priv(ndev);
1206
1207	netdev_reset_tc(ndev);
1208	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
1209	stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
1210}
1211
1212static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
1213				    struct tc_mqprio_qopt_offload *mqprio)
1214{
1215	struct netlink_ext_ack *extack = mqprio->extack;
1216	struct tc_mqprio_qopt *qopt = &mqprio->qopt;
1217	u32 offset, count, num_stack_tx_queues = 0;
1218	struct net_device *ndev = priv->dev;
1219	u32 num_tc = qopt->num_tc;
1220	int err;
1221
1222	if (!num_tc) {
1223		stmmac_reset_tc_mqprio(ndev, extack);
1224		return 0;
1225	}
1226
1227	err = netdev_set_num_tc(ndev, num_tc);
1228	if (err)
1229		return err;
1230
1231	for (u32 tc = 0; tc < num_tc; tc++) {
1232		offset = qopt->offset[tc];
1233		count = qopt->count[tc];
1234		num_stack_tx_queues += count;
1235
1236		err = netdev_set_tc_queue(ndev, tc, count, offset);
1237		if (err)
1238			goto err_reset_tc;
1239	}
1240
1241	err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
1242	if (err)
1243		goto err_reset_tc;
1244
1245	err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
1246					      mqprio->preemptible_tcs);
1247	if (err)
1248		goto err_reset_tc;
1249
1250	return 0;
1251
1252err_reset_tc:
1253	stmmac_reset_tc_mqprio(ndev, extack);
1254
1255	return err;
1256}
1257
1258static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
1259					 struct tc_mqprio_qopt_offload *mqprio)
1260{
1261	NL_SET_ERR_MSG_MOD(mqprio->extack,
1262			   "mqprio HW offload is not implemented for this MAC");
1263	return -EOPNOTSUPP;
1264}
1265
1266const struct stmmac_tc_ops dwmac4_tc_ops = {
1267	.init = tc_init,
1268	.setup_cls_u32 = tc_setup_cls_u32,
1269	.setup_cbs = tc_setup_cbs,
1270	.setup_cls = tc_setup_cls,
1271	.setup_taprio = tc_setup_taprio_without_fpe,
1272	.setup_etf = tc_setup_etf,
1273	.query_caps = tc_query_caps,
1274	.setup_mqprio = tc_setup_mqprio_unimplemented,
1275};
1276
1277const struct stmmac_tc_ops dwmac510_tc_ops = {
1278	.init = tc_init,
1279	.setup_cls_u32 = tc_setup_cls_u32,
1280	.setup_cbs = tc_setup_cbs,
1281	.setup_cls = tc_setup_cls,
1282	.setup_taprio = tc_setup_taprio,
1283	.setup_etf = tc_setup_etf,
1284	.query_caps = tc_query_caps,
1285	.setup_mqprio = tc_setup_dwmac510_mqprio,
1286};
1287
1288const struct stmmac_tc_ops dwxgmac_tc_ops = {
1289	.init = tc_init,
1290	.setup_cls_u32 = tc_setup_cls_u32,
1291	.setup_cbs = tc_setup_cbs,
1292	.setup_cls = tc_setup_cls,
1293	.setup_taprio = tc_setup_taprio,
1294	.setup_etf = tc_setup_etf,
1295	.query_caps = tc_query_caps,
1296	.setup_mqprio = tc_setup_dwmac510_mqprio,
1297};
v6.8
   1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2/*
   3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
   4 * stmmac TC Handling (HW only)
   5 */
   6
   7#include <net/pkt_cls.h>
   8#include <net/tc_act/tc_gact.h>
   9#include "common.h"
  10#include "dwmac4.h"
  11#include "dwmac5.h"
  12#include "stmmac.h"
  13
  14static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
  15{
  16	memset(entry, 0, sizeof(*entry));
  17	entry->in_use = true;
  18	entry->is_last = true;
  19	entry->is_frag = false;
  20	entry->prio = ~0x0;
  21	entry->handle = 0;
  22	entry->val.match_data = 0x0;
  23	entry->val.match_en = 0x0;
  24	entry->val.af = 1;
  25	entry->val.dma_ch_no = 0x0;
  26}
  27
  28static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
  29					     struct tc_cls_u32_offload *cls,
  30					     bool free)
  31{
  32	struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
  33	u32 loc = cls->knode.handle;
  34	int i;
  35
  36	for (i = 0; i < priv->tc_entries_max; i++) {
  37		entry = &priv->tc_entries[i];
  38		if (!entry->in_use && !first && free)
  39			first = entry;
  40		if ((entry->handle == loc) && !free && !entry->is_frag)
  41			dup = entry;
  42	}
  43
  44	if (dup)
  45		return dup;
  46	if (first) {
  47		first->handle = loc;
  48		first->in_use = true;
  49
  50		/* Reset HW values */
  51		memset(&first->val, 0, sizeof(first->val));
  52	}
  53
  54	return first;
  55}
  56
  57static int tc_fill_actions(struct stmmac_tc_entry *entry,
  58			   struct stmmac_tc_entry *frag,
  59			   struct tc_cls_u32_offload *cls)
  60{
  61	struct stmmac_tc_entry *action_entry = entry;
  62	const struct tc_action *act;
  63	struct tcf_exts *exts;
  64	int i;
  65
  66	exts = cls->knode.exts;
  67	if (!tcf_exts_has_actions(exts))
  68		return -EINVAL;
  69	if (frag)
  70		action_entry = frag;
  71
  72	tcf_exts_for_each_action(i, act, exts) {
  73		/* Accept */
  74		if (is_tcf_gact_ok(act)) {
  75			action_entry->val.af = 1;
  76			break;
  77		}
  78		/* Drop */
  79		if (is_tcf_gact_shot(act)) {
  80			action_entry->val.rf = 1;
  81			break;
  82		}
  83
  84		/* Unsupported */
  85		return -EINVAL;
  86	}
  87
  88	return 0;
  89}
  90
  91static int tc_fill_entry(struct stmmac_priv *priv,
  92			 struct tc_cls_u32_offload *cls)
  93{
  94	struct stmmac_tc_entry *entry, *frag = NULL;
  95	struct tc_u32_sel *sel = cls->knode.sel;
  96	u32 off, data, mask, real_off, rem;
  97	u32 prio = cls->common.prio << 16;
  98	int ret;
  99
 100	/* Only 1 match per entry */
 101	if (sel->nkeys <= 0 || sel->nkeys > 1)
 102		return -EINVAL;
 103
 104	off = sel->keys[0].off << sel->offshift;
 105	data = sel->keys[0].val;
 106	mask = sel->keys[0].mask;
 107
 108	switch (ntohs(cls->common.protocol)) {
 109	case ETH_P_ALL:
 110		break;
 111	case ETH_P_IP:
 112		off += ETH_HLEN;
 113		break;
 114	default:
 115		return -EINVAL;
 116	}
 117
 118	if (off > priv->tc_off_max)
 119		return -EINVAL;
 120
 121	real_off = off / 4;
 122	rem = off % 4;
 123
 124	entry = tc_find_entry(priv, cls, true);
 125	if (!entry)
 126		return -EINVAL;
 127
 128	if (rem) {
 129		frag = tc_find_entry(priv, cls, true);
 130		if (!frag) {
 131			ret = -EINVAL;
 132			goto err_unuse;
 133		}
 134
 135		entry->frag_ptr = frag;
 136		entry->val.match_en = (mask << (rem * 8)) &
 137			GENMASK(31, rem * 8);
 138		entry->val.match_data = (data << (rem * 8)) &
 139			GENMASK(31, rem * 8);
 140		entry->val.frame_offset = real_off;
 141		entry->prio = prio;
 142
 143		frag->val.match_en = (mask >> (rem * 8)) &
 144			GENMASK(rem * 8 - 1, 0);
 145		frag->val.match_data = (data >> (rem * 8)) &
 146			GENMASK(rem * 8 - 1, 0);
 147		frag->val.frame_offset = real_off + 1;
 148		frag->prio = prio;
 149		frag->is_frag = true;
 150	} else {
 151		entry->frag_ptr = NULL;
 152		entry->val.match_en = mask;
 153		entry->val.match_data = data;
 154		entry->val.frame_offset = real_off;
 155		entry->prio = prio;
 156	}
 157
 158	ret = tc_fill_actions(entry, frag, cls);
 159	if (ret)
 160		goto err_unuse;
 161
 162	return 0;
 163
 164err_unuse:
 165	if (frag)
 166		frag->in_use = false;
 167	entry->in_use = false;
 168	return ret;
 169}
 170
 171static void tc_unfill_entry(struct stmmac_priv *priv,
 172			    struct tc_cls_u32_offload *cls)
 173{
 174	struct stmmac_tc_entry *entry;
 175
 176	entry = tc_find_entry(priv, cls, false);
 177	if (!entry)
 178		return;
 179
 180	entry->in_use = false;
 181	if (entry->frag_ptr) {
 182		entry = entry->frag_ptr;
 183		entry->is_frag = false;
 184		entry->in_use = false;
 185	}
 186}
 187
 188static int tc_config_knode(struct stmmac_priv *priv,
 189			   struct tc_cls_u32_offload *cls)
 190{
 191	int ret;
 192
 193	ret = tc_fill_entry(priv, cls);
 194	if (ret)
 195		return ret;
 196
 197	ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
 198			priv->tc_entries_max);
 199	if (ret)
 200		goto err_unfill;
 201
 202	return 0;
 203
 204err_unfill:
 205	tc_unfill_entry(priv, cls);
 206	return ret;
 207}
 208
 209static int tc_delete_knode(struct stmmac_priv *priv,
 210			   struct tc_cls_u32_offload *cls)
 211{
 212	/* Set entry and fragments as not used */
 213	tc_unfill_entry(priv, cls);
 214
 215	return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
 216				 priv->tc_entries_max);
 217}
 218
 219static int tc_setup_cls_u32(struct stmmac_priv *priv,
 220			    struct tc_cls_u32_offload *cls)
 221{
 222	switch (cls->command) {
 223	case TC_CLSU32_REPLACE_KNODE:
 224		tc_unfill_entry(priv, cls);
 225		fallthrough;
 226	case TC_CLSU32_NEW_KNODE:
 227		return tc_config_knode(priv, cls);
 228	case TC_CLSU32_DELETE_KNODE:
 229		return tc_delete_knode(priv, cls);
 230	default:
 231		return -EOPNOTSUPP;
 232	}
 233}
 234
 235static int tc_rfs_init(struct stmmac_priv *priv)
 236{
 237	int i;
 238
 239	priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
 240	priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
 241	priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
 242
 243	for (i = 0; i < STMMAC_RFS_T_MAX; i++)
 244		priv->rfs_entries_total += priv->rfs_entries_max[i];
 245
 246	priv->rfs_entries = devm_kcalloc(priv->device,
 247					 priv->rfs_entries_total,
 248					 sizeof(*priv->rfs_entries),
 249					 GFP_KERNEL);
 250	if (!priv->rfs_entries)
 251		return -ENOMEM;
 252
 253	dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
 254		 priv->rfs_entries_total);
 255
 256	return 0;
 257}
 258
 259static int tc_init(struct stmmac_priv *priv)
 260{
 261	struct dma_features *dma_cap = &priv->dma_cap;
 262	unsigned int count;
 263	int ret, i;
 264
 265	if (dma_cap->l3l4fnum) {
 266		priv->flow_entries_max = dma_cap->l3l4fnum;
 267		priv->flow_entries = devm_kcalloc(priv->device,
 268						  dma_cap->l3l4fnum,
 269						  sizeof(*priv->flow_entries),
 270						  GFP_KERNEL);
 271		if (!priv->flow_entries)
 272			return -ENOMEM;
 273
 274		for (i = 0; i < priv->flow_entries_max; i++)
 275			priv->flow_entries[i].idx = i;
 276
 277		dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
 278			 priv->flow_entries_max);
 279	}
 280
 281	ret = tc_rfs_init(priv);
 282	if (ret)
 283		return -ENOMEM;
 284
 285	if (!priv->plat->fpe_cfg) {
 286		priv->plat->fpe_cfg = devm_kzalloc(priv->device,
 287						   sizeof(*priv->plat->fpe_cfg),
 288						   GFP_KERNEL);
 289		if (!priv->plat->fpe_cfg)
 290			return -ENOMEM;
 291	} else {
 292		memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
 293	}
 294
 295	/* Fail silently as we can still use remaining features, e.g. CBS */
 296	if (!dma_cap->frpsel)
 297		return 0;
 298
 299	switch (dma_cap->frpbs) {
 300	case 0x0:
 301		priv->tc_off_max = 64;
 302		break;
 303	case 0x1:
 304		priv->tc_off_max = 128;
 305		break;
 306	case 0x2:
 307		priv->tc_off_max = 256;
 308		break;
 309	default:
 310		return -EINVAL;
 311	}
 312
 313	switch (dma_cap->frpes) {
 314	case 0x0:
 315		count = 64;
 316		break;
 317	case 0x1:
 318		count = 128;
 319		break;
 320	case 0x2:
 321		count = 256;
 322		break;
 323	default:
 324		return -EINVAL;
 325	}
 326
 327	/* Reserve one last filter which lets all pass */
 328	priv->tc_entries_max = count;
 329	priv->tc_entries = devm_kcalloc(priv->device,
 330			count, sizeof(*priv->tc_entries), GFP_KERNEL);
 331	if (!priv->tc_entries)
 332		return -ENOMEM;
 333
 334	tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
 335
 336	dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
 337			priv->tc_entries_max, priv->tc_off_max);
 338
 339	return 0;
 340}
 341
 342static int tc_setup_cbs(struct stmmac_priv *priv,
 343			struct tc_cbs_qopt_offload *qopt)
 344{
 345	u32 tx_queues_count = priv->plat->tx_queues_to_use;
 
 346	u32 queue = qopt->queue;
 347	u32 ptr, speed_div;
 348	u32 mode_to_use;
 349	u64 value;
 
 350	int ret;
 351
 352	/* Queue 0 is not AVB capable */
 353	if (queue <= 0 || queue >= tx_queues_count)
 354		return -EINVAL;
 355	if (!priv->dma_cap.av)
 356		return -EOPNOTSUPP;
 357
 358	/* Port Transmit Rate and Speed Divider */
 359	switch (priv->speed) {
 360	case SPEED_10000:
 361		ptr = 32;
 362		speed_div = 10000000;
 363		break;
 364	case SPEED_5000:
 365		ptr = 32;
 366		speed_div = 5000000;
 367		break;
 368	case SPEED_2500:
 369		ptr = 8;
 370		speed_div = 2500000;
 371		break;
 372	case SPEED_1000:
 373		ptr = 8;
 374		speed_div = 1000000;
 375		break;
 376	case SPEED_100:
 377		ptr = 4;
 378		speed_div = 100000;
 379		break;
 380	default:
 381		return -EOPNOTSUPP;
 382	}
 383
 384	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
 385	if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
 386		ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
 387		if (ret)
 388			return ret;
 389
 390		priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
 391	} else if (!qopt->enable) {
 392		ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
 393				       MTL_QUEUE_DCB);
 394		if (ret)
 395			return ret;
 396
 397		priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
 
 398	}
 399
 400	/* Final adjustments for HW */
 401	value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
 402	priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
 403
 404	value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
 405	priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
 406
 407	value = qopt->hicredit * 1024ll * 8;
 408	priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
 409
 410	value = qopt->locredit * 1024ll * 8;
 411	priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
 412
 413	ret = stmmac_config_cbs(priv, priv->hw,
 414				priv->plat->tx_queues_cfg[queue].send_slope,
 415				priv->plat->tx_queues_cfg[queue].idle_slope,
 416				priv->plat->tx_queues_cfg[queue].high_credit,
 417				priv->plat->tx_queues_cfg[queue].low_credit,
 418				queue);
 419	if (ret)
 420		return ret;
 421
 422	dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
 423			queue, qopt->sendslope, qopt->idleslope,
 424			qopt->hicredit, qopt->locredit);
 425	return 0;
 426}
 427
 428static int tc_parse_flow_actions(struct stmmac_priv *priv,
 429				 struct flow_action *action,
 430				 struct stmmac_flow_entry *entry,
 431				 struct netlink_ext_ack *extack)
 432{
 433	struct flow_action_entry *act;
 434	int i;
 435
 436	if (!flow_action_has_entries(action))
 437		return -EINVAL;
 438
 439	if (!flow_action_basic_hw_stats_check(action, extack))
 440		return -EOPNOTSUPP;
 441
 442	flow_action_for_each(i, act, action) {
 443		switch (act->id) {
 444		case FLOW_ACTION_DROP:
 445			entry->action |= STMMAC_FLOW_ACTION_DROP;
 446			return 0;
 447		default:
 448			break;
 449		}
 450	}
 451
 452	/* Nothing to do, maybe inverse filter ? */
 453	return 0;
 454}
 455
 456#define ETHER_TYPE_FULL_MASK	cpu_to_be16(~0)
 457
 458static int tc_add_basic_flow(struct stmmac_priv *priv,
 459			     struct flow_cls_offload *cls,
 460			     struct stmmac_flow_entry *entry)
 461{
 462	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 463	struct flow_dissector *dissector = rule->match.dissector;
 464	struct flow_match_basic match;
 465
 466	/* Nothing to do here */
 467	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
 468		return -EINVAL;
 469
 470	flow_rule_match_basic(rule, &match);
 471
 472	entry->ip_proto = match.key->ip_proto;
 473	return 0;
 474}
 475
 476static int tc_add_ip4_flow(struct stmmac_priv *priv,
 477			   struct flow_cls_offload *cls,
 478			   struct stmmac_flow_entry *entry)
 479{
 480	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 481	struct flow_dissector *dissector = rule->match.dissector;
 482	bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
 483	struct flow_match_ipv4_addrs match;
 484	u32 hw_match;
 485	int ret;
 486
 487	/* Nothing to do here */
 488	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
 489		return -EINVAL;
 490
 491	flow_rule_match_ipv4_addrs(rule, &match);
 492	hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
 493	if (hw_match) {
 494		ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
 495					      false, true, inv, hw_match);
 496		if (ret)
 497			return ret;
 498	}
 499
 500	hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
 501	if (hw_match) {
 502		ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
 503					      false, false, inv, hw_match);
 504		if (ret)
 505			return ret;
 506	}
 507
 508	return 0;
 509}
 510
 511static int tc_add_ports_flow(struct stmmac_priv *priv,
 512			     struct flow_cls_offload *cls,
 513			     struct stmmac_flow_entry *entry)
 514{
 515	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 516	struct flow_dissector *dissector = rule->match.dissector;
 517	bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
 518	struct flow_match_ports match;
 519	u32 hw_match;
 520	bool is_udp;
 521	int ret;
 522
 523	/* Nothing to do here */
 524	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
 525		return -EINVAL;
 526
 527	switch (entry->ip_proto) {
 528	case IPPROTO_TCP:
 529		is_udp = false;
 530		break;
 531	case IPPROTO_UDP:
 532		is_udp = true;
 533		break;
 534	default:
 535		return -EINVAL;
 536	}
 537
 538	flow_rule_match_ports(rule, &match);
 539
 540	hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
 541	if (hw_match) {
 542		ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
 543					      is_udp, true, inv, hw_match);
 544		if (ret)
 545			return ret;
 546	}
 547
 548	hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
 549	if (hw_match) {
 550		ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
 551					      is_udp, false, inv, hw_match);
 552		if (ret)
 553			return ret;
 554	}
 555
 556	entry->is_l4 = true;
 557	return 0;
 558}
 559
 560static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
 561					      struct flow_cls_offload *cls,
 562					      bool get_free)
 563{
 564	int i;
 565
 566	for (i = 0; i < priv->flow_entries_max; i++) {
 567		struct stmmac_flow_entry *entry = &priv->flow_entries[i];
 568
 569		if (entry->cookie == cls->cookie)
 570			return entry;
 571		if (get_free && (entry->in_use == false))
 572			return entry;
 573	}
 574
 575	return NULL;
 576}
 577
 578static struct {
 579	int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
 580		  struct stmmac_flow_entry *entry);
 581} tc_flow_parsers[] = {
 582	{ .fn = tc_add_basic_flow },
 583	{ .fn = tc_add_ip4_flow },
 584	{ .fn = tc_add_ports_flow },
 585};
 586
 587static int tc_add_flow(struct stmmac_priv *priv,
 588		       struct flow_cls_offload *cls)
 589{
 590	struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
 591	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 592	int i, ret;
 593
 594	if (!entry) {
 595		entry = tc_find_flow(priv, cls, true);
 596		if (!entry)
 597			return -ENOENT;
 598	}
 599
 600	ret = tc_parse_flow_actions(priv, &rule->action, entry,
 601				    cls->common.extack);
 602	if (ret)
 603		return ret;
 604
 605	for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
 606		ret = tc_flow_parsers[i].fn(priv, cls, entry);
 607		if (!ret)
 608			entry->in_use = true;
 609	}
 610
 611	if (!entry->in_use)
 612		return -EINVAL;
 613
 614	entry->cookie = cls->cookie;
 615	return 0;
 616}
 617
 618static int tc_del_flow(struct stmmac_priv *priv,
 619		       struct flow_cls_offload *cls)
 620{
 621	struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
 622	int ret;
 623
 624	if (!entry || !entry->in_use)
 625		return -ENOENT;
 626
 627	if (entry->is_l4) {
 628		ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
 629					      false, false, false, 0);
 630	} else {
 631		ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
 632					      false, false, false, 0);
 633	}
 634
 635	entry->in_use = false;
 636	entry->cookie = 0;
 637	entry->is_l4 = false;
 638	return ret;
 639}
 640
 641static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
 642					    struct flow_cls_offload *cls,
 643					    bool get_free)
 644{
 645	int i;
 646
 647	for (i = 0; i < priv->rfs_entries_total; i++) {
 648		struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
 649
 650		if (entry->cookie == cls->cookie)
 651			return entry;
 652		if (get_free && entry->in_use == false)
 653			return entry;
 654	}
 655
 656	return NULL;
 657}
 658
 659#define VLAN_PRIO_FULL_MASK (0x07)
 660
 661static int tc_add_vlan_flow(struct stmmac_priv *priv,
 662			    struct flow_cls_offload *cls)
 663{
 664	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 665	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 666	struct flow_dissector *dissector = rule->match.dissector;
 667	int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
 668	struct flow_match_vlan match;
 669
 670	if (!entry) {
 671		entry = tc_find_rfs(priv, cls, true);
 672		if (!entry)
 673			return -ENOENT;
 674	}
 675
 676	if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
 677	    priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
 678		return -ENOENT;
 679
 680	/* Nothing to do here */
 681	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
 682		return -EINVAL;
 683
 684	if (tc < 0) {
 685		netdev_err(priv->dev, "Invalid traffic class\n");
 686		return -EINVAL;
 687	}
 688
 689	flow_rule_match_vlan(rule, &match);
 690
 691	if (match.mask->vlan_priority) {
 692		u32 prio;
 693
 694		if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
 695			netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
 696			return -EINVAL;
 697		}
 698
 699		prio = BIT(match.key->vlan_priority);
 700		stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
 701
 702		entry->in_use = true;
 703		entry->cookie = cls->cookie;
 704		entry->tc = tc;
 705		entry->type = STMMAC_RFS_T_VLAN;
 706		priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
 707	}
 708
 709	return 0;
 710}
 711
 712static int tc_del_vlan_flow(struct stmmac_priv *priv,
 713			    struct flow_cls_offload *cls)
 714{
 715	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 716
 717	if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
 718		return -ENOENT;
 719
 720	stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
 721
 722	entry->in_use = false;
 723	entry->cookie = 0;
 724	entry->tc = 0;
 725	entry->type = 0;
 726
 727	priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
 728
 729	return 0;
 730}
 731
 732static int tc_add_ethtype_flow(struct stmmac_priv *priv,
 733			       struct flow_cls_offload *cls)
 734{
 735	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 736	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
 737	struct flow_dissector *dissector = rule->match.dissector;
 738	int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
 739	struct flow_match_basic match;
 740
 741	if (!entry) {
 742		entry = tc_find_rfs(priv, cls, true);
 743		if (!entry)
 744			return -ENOENT;
 745	}
 746
 747	/* Nothing to do here */
 748	if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
 749		return -EINVAL;
 750
 751	if (tc < 0) {
 752		netdev_err(priv->dev, "Invalid traffic class\n");
 753		return -EINVAL;
 754	}
 755
 756	flow_rule_match_basic(rule, &match);
 757
 758	if (match.mask->n_proto) {
 759		u16 etype = ntohs(match.key->n_proto);
 760
 761		if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
 762			netdev_err(priv->dev, "Only full mask is supported for EthType filter");
 763			return -EINVAL;
 764		}
 765		switch (etype) {
 766		case ETH_P_LLDP:
 767			if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
 768			    priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
 769				return -ENOENT;
 770
 771			entry->type = STMMAC_RFS_T_LLDP;
 772			priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
 773
 774			stmmac_rx_queue_routing(priv, priv->hw,
 775						PACKET_DCBCPQ, tc);
 776			break;
 777		case ETH_P_1588:
 778			if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
 779			    priv->rfs_entries_max[STMMAC_RFS_T_1588])
 780				return -ENOENT;
 781
 782			entry->type = STMMAC_RFS_T_1588;
 783			priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
 784
 785			stmmac_rx_queue_routing(priv, priv->hw,
 786						PACKET_PTPQ, tc);
 787			break;
 788		default:
 789			netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
 790			return -EINVAL;
 791		}
 792
 793		entry->in_use = true;
 794		entry->cookie = cls->cookie;
 795		entry->tc = tc;
 796		entry->etype = etype;
 797
 798		return 0;
 799	}
 800
 801	return -EINVAL;
 802}
 803
 804static int tc_del_ethtype_flow(struct stmmac_priv *priv,
 805			       struct flow_cls_offload *cls)
 806{
 807	struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 808
 809	if (!entry || !entry->in_use ||
 810	    entry->type < STMMAC_RFS_T_LLDP ||
 811	    entry->type > STMMAC_RFS_T_1588)
 812		return -ENOENT;
 813
 814	switch (entry->etype) {
 815	case ETH_P_LLDP:
 816		stmmac_rx_queue_routing(priv, priv->hw,
 817					PACKET_DCBCPQ, 0);
 818		priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
 819		break;
 820	case ETH_P_1588:
 821		stmmac_rx_queue_routing(priv, priv->hw,
 822					PACKET_PTPQ, 0);
 823		priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
 824		break;
 825	default:
 826		netdev_err(priv->dev, "EthType(0x%x) is not supported",
 827			   entry->etype);
 828		return -EINVAL;
 829	}
 830
 831	entry->in_use = false;
 832	entry->cookie = 0;
 833	entry->tc = 0;
 834	entry->etype = 0;
 835	entry->type = 0;
 836
 837	return 0;
 838}
 839
 840static int tc_add_flow_cls(struct stmmac_priv *priv,
 841			   struct flow_cls_offload *cls)
 842{
 843	int ret;
 844
 845	ret = tc_add_flow(priv, cls);
 846	if (!ret)
 847		return ret;
 848
 849	ret = tc_add_ethtype_flow(priv, cls);
 850	if (!ret)
 851		return ret;
 852
 853	return tc_add_vlan_flow(priv, cls);
 854}
 855
 856static int tc_del_flow_cls(struct stmmac_priv *priv,
 857			   struct flow_cls_offload *cls)
 858{
 859	int ret;
 860
 861	ret = tc_del_flow(priv, cls);
 862	if (!ret)
 863		return ret;
 864
 865	ret = tc_del_ethtype_flow(priv, cls);
 866	if (!ret)
 867		return ret;
 868
 869	return tc_del_vlan_flow(priv, cls);
 870}
 871
 872static int tc_setup_cls(struct stmmac_priv *priv,
 873			struct flow_cls_offload *cls)
 874{
 875	int ret = 0;
 876
 877	/* When RSS is enabled, the filtering will be bypassed */
 878	if (priv->rss.enable)
 879		return -EBUSY;
 880
 881	switch (cls->command) {
 882	case FLOW_CLS_REPLACE:
 883		ret = tc_add_flow_cls(priv, cls);
 884		break;
 885	case FLOW_CLS_DESTROY:
 886		ret = tc_del_flow_cls(priv, cls);
 887		break;
 888	default:
 889		return -EOPNOTSUPP;
 890	}
 891
 892	return ret;
 893}
 894
 895struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
 896					   ktime_t current_time,
 897					   u64 cycle_time)
 898{
 899	struct timespec64 time;
 900
 901	if (ktime_after(old_base_time, current_time)) {
 902		time = ktime_to_timespec64(old_base_time);
 903	} else {
 904		s64 n;
 905		ktime_t base_time;
 906
 907		n = div64_s64(ktime_sub_ns(current_time, old_base_time),
 908			      cycle_time);
 909		base_time = ktime_add_ns(old_base_time,
 910					 (n + 1) * cycle_time);
 911
 912		time = ktime_to_timespec64(base_time);
 913	}
 914
 915	return time;
 916}
 917
 918static int tc_setup_taprio(struct stmmac_priv *priv,
 919			   struct tc_taprio_qopt_offload *qopt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920{
 921	u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
 922	struct plat_stmmacenet_data *plat = priv->plat;
 923	struct timespec64 time, current_time, qopt_time;
 924	ktime_t current_time_ns;
 925	bool fpe = false;
 926	int i, ret = 0;
 927	u64 ctr;
 928
 929	if (qopt->base_time < 0)
 930		return -ERANGE;
 931
 932	if (!priv->dma_cap.estsel)
 933		return -EOPNOTSUPP;
 934
 935	switch (wid) {
 936	case 0x1:
 937		wid = 16;
 938		break;
 939	case 0x2:
 940		wid = 20;
 941		break;
 942	case 0x3:
 943		wid = 24;
 944		break;
 945	default:
 946		return -EOPNOTSUPP;
 947	}
 948
 949	switch (dep) {
 950	case 0x1:
 951		dep = 64;
 952		break;
 953	case 0x2:
 954		dep = 128;
 955		break;
 956	case 0x3:
 957		dep = 256;
 958		break;
 959	case 0x4:
 960		dep = 512;
 961		break;
 962	case 0x5:
 963		dep = 1024;
 964		break;
 965	default:
 966		return -EOPNOTSUPP;
 967	}
 968
 969	if (qopt->cmd == TAPRIO_CMD_DESTROY)
 970		goto disable;
 971	else if (qopt->cmd != TAPRIO_CMD_REPLACE)
 972		return -EOPNOTSUPP;
 973
 974	if (qopt->num_entries >= dep)
 975		return -EINVAL;
 976	if (!qopt->cycle_time)
 977		return -ERANGE;
 978	if (qopt->cycle_time_extension >= BIT(wid + 7))
 979		return -ERANGE;
 980
 981	if (!plat->est) {
 982		plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
 983					 GFP_KERNEL);
 984		if (!plat->est)
 985			return -ENOMEM;
 986
 987		mutex_init(&priv->plat->est->lock);
 988	} else {
 989		memset(plat->est, 0, sizeof(*plat->est));
 
 
 990	}
 991
 992	size = qopt->num_entries;
 993
 994	mutex_lock(&priv->plat->est->lock);
 995	priv->plat->est->gcl_size = size;
 996	priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
 997	mutex_unlock(&priv->plat->est->lock);
 998
 999	for (i = 0; i < size; i++) {
1000		s64 delta_ns = qopt->entries[i].interval;
1001		u32 gates = qopt->entries[i].gate_mask;
1002
1003		if (delta_ns > GENMASK(wid, 0))
1004			return -ERANGE;
1005		if (gates > GENMASK(31 - wid, 0))
1006			return -ERANGE;
1007
1008		switch (qopt->entries[i].command) {
1009		case TC_TAPRIO_CMD_SET_GATES:
1010			if (fpe)
1011				return -EINVAL;
1012			break;
1013		case TC_TAPRIO_CMD_SET_AND_HOLD:
1014			gates |= BIT(0);
1015			fpe = true;
1016			break;
1017		case TC_TAPRIO_CMD_SET_AND_RELEASE:
1018			gates &= ~BIT(0);
1019			fpe = true;
1020			break;
1021		default:
1022			return -EOPNOTSUPP;
1023		}
1024
1025		priv->plat->est->gcl[i] = delta_ns | (gates << wid);
1026	}
1027
1028	mutex_lock(&priv->plat->est->lock);
1029	/* Adjust for real system time */
1030	priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
1031	current_time_ns = timespec64_to_ktime(current_time);
1032	time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
1033					qopt->cycle_time);
1034
1035	priv->plat->est->btr[0] = (u32)time.tv_nsec;
1036	priv->plat->est->btr[1] = (u32)time.tv_sec;
1037
1038	qopt_time = ktime_to_timespec64(qopt->base_time);
1039	priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
1040	priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
1041
1042	ctr = qopt->cycle_time;
1043	priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
1044	priv->plat->est->ctr[1] = (u32)ctr;
1045
1046	priv->plat->est->ter = qopt->cycle_time_extension;
1047
1048	if (fpe && !priv->dma_cap.fpesel) {
1049		mutex_unlock(&priv->plat->est->lock);
1050		return -EOPNOTSUPP;
1051	}
1052
1053	/* Actual FPE register configuration will be done after FPE handshake
1054	 * is success.
1055	 */
1056	priv->plat->fpe_cfg->enable = fpe;
1057
1058	ret = stmmac_est_configure(priv, priv, priv->plat->est,
1059				   priv->plat->clk_ptp_rate);
1060	mutex_unlock(&priv->plat->est->lock);
1061	if (ret) {
1062		netdev_err(priv->dev, "failed to configure EST\n");
1063		goto disable;
1064	}
1065
1066	netdev_info(priv->dev, "configured EST\n");
1067
1068	if (fpe) {
1069		stmmac_fpe_handshake(priv, true);
1070		netdev_info(priv->dev, "start FPE handshake\n");
1071	}
1072
1073	return 0;
1074
1075disable:
1076	if (priv->plat->est) {
1077		mutex_lock(&priv->plat->est->lock);
1078		priv->plat->est->enable = false;
1079		stmmac_est_configure(priv, priv, priv->plat->est,
1080				     priv->plat->clk_ptp_rate);
1081		mutex_unlock(&priv->plat->est->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082	}
1083
1084	priv->plat->fpe_cfg->enable = false;
1085	stmmac_fpe_configure(priv, priv->ioaddr,
1086			     priv->plat->fpe_cfg,
1087			     priv->plat->tx_queues_to_use,
1088			     priv->plat->rx_queues_to_use,
1089			     false);
1090	netdev_info(priv->dev, "disabled FPE\n");
 
1091
1092	stmmac_fpe_handshake(priv, false);
1093	netdev_info(priv->dev, "stop FPE handshake\n");
1094
1095	return ret;
1096}
1097
1098static int tc_setup_etf(struct stmmac_priv *priv,
1099			struct tc_etf_qopt_offload *qopt)
1100{
1101	if (!priv->dma_cap.tbssel)
1102		return -EOPNOTSUPP;
1103	if (qopt->queue >= priv->plat->tx_queues_to_use)
1104		return -EINVAL;
1105	if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
1106		return -EINVAL;
1107
1108	if (qopt->enable)
1109		priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
1110	else
1111		priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
1112
1113	netdev_info(priv->dev, "%s ETF for Queue %d\n",
1114		    qopt->enable ? "enabled" : "disabled", qopt->queue);
1115	return 0;
1116}
1117
1118static int tc_query_caps(struct stmmac_priv *priv,
1119			 struct tc_query_caps_base *base)
1120{
1121	switch (base->type) {
 
 
 
 
 
 
 
1122	case TC_SETUP_QDISC_TAPRIO: {
1123		struct tc_taprio_caps *caps = base->caps;
1124
1125		if (!priv->dma_cap.estsel)
1126			return -EOPNOTSUPP;
1127
1128		caps->gate_mask_per_txq = true;
 
1129
1130		return 0;
1131	}
1132	default:
1133		return -EOPNOTSUPP;
1134	}
1135}
1136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137const struct stmmac_tc_ops dwmac510_tc_ops = {
1138	.init = tc_init,
1139	.setup_cls_u32 = tc_setup_cls_u32,
1140	.setup_cbs = tc_setup_cbs,
1141	.setup_cls = tc_setup_cls,
1142	.setup_taprio = tc_setup_taprio,
1143	.setup_etf = tc_setup_etf,
1144	.query_caps = tc_query_caps,
 
 
 
 
 
 
 
 
 
 
 
 
1145};