Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
   3
   4#include <linux/kernel.h>
   5#include <linux/io.h>
   6#include <linux/iopoll.h>
   7#include <linux/etherdevice.h>
   8#include <linux/platform_device.h>
   9#include <linux/if_ether.h>
  10#include <linux/if_vlan.h>
  11#include <net/dst_metadata.h>
  12#include <net/dsa.h>
  13#include "mtk_eth_soc.h"
  14#include "mtk_ppe.h"
  15#include "mtk_ppe_regs.h"
  16
  17static DEFINE_SPINLOCK(ppe_lock);
  18
  19static const struct rhashtable_params mtk_flow_l2_ht_params = {
  20	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
  21	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
  22	.key_len = offsetof(struct mtk_foe_bridge, key_end),
  23	.automatic_shrinking = true,
  24};
  25
  26static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
  27{
  28	writel(val, ppe->base + reg);
  29}
  30
  31static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
  32{
  33	return readl(ppe->base + reg);
  34}
  35
  36static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
  37{
  38	u32 val;
  39
  40	val = ppe_r32(ppe, reg);
  41	val &= ~mask;
  42	val |= set;
  43	ppe_w32(ppe, reg, val);
  44
  45	return val;
  46}
  47
  48static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
  49{
  50	return ppe_m32(ppe, reg, 0, val);
  51}
  52
  53static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
  54{
  55	return ppe_m32(ppe, reg, val, 0);
  56}
  57
  58static u32 mtk_eth_timestamp(struct mtk_eth *eth)
  59{
  60	return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
  61}
  62
  63static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
  64{
  65	int ret;
  66	u32 val;
  67
  68	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
  69				 !(val & MTK_PPE_GLO_CFG_BUSY),
  70				 20, MTK_PPE_WAIT_TIMEOUT_US);
  71
  72	if (ret)
  73		dev_err(ppe->dev, "PPE table busy");
  74
  75	return ret;
  76}
  77
  78static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
  79{
  80	int ret;
  81	u32 val;
  82
  83	ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
  84				 !(val & MTK_PPE_MIB_SER_CR_ST),
  85				 20, MTK_PPE_WAIT_TIMEOUT_US);
  86
  87	if (ret)
  88		dev_err(ppe->dev, "MIB table busy");
  89
  90	return ret;
  91}
  92
  93static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
  94{
  95	u32 val, cnt_r0, cnt_r1, cnt_r2;
  96	int ret;
  97
  98	val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
  99	ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
 100
 101	ret = mtk_ppe_mib_wait_busy(ppe);
 102	if (ret)
 103		return ret;
 104
 105	cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
 106	cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
 107	cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
 108
 109	if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
 110		/* 64 bit for each counter */
 111		u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
 112		*bytes = ((u64)cnt_r1 << 32) | cnt_r0;
 113		*packets = ((u64)cnt_r3 << 32) | cnt_r2;
 114	} else {
 115		/* 48 bit byte counter, 40 bit packet counter */
 116		u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
 117		u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
 118		u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
 119		u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
 120		*bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
 121		*packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
 122	}
 123
 124	return 0;
 125}
 126
 127static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
 128{
 129	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
 130	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
 131}
 132
 133static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
 134{
 135	mtk_ppe_cache_clear(ppe);
 136
 137	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
 138		enable * MTK_PPE_CACHE_CTL_EN);
 139}
 140
 141static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
 142{
 143	u32 hv1, hv2, hv3;
 144	u32 hash;
 145
 146	switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
 147		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
 148		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
 149			hv1 = e->ipv4.orig.ports;
 150			hv2 = e->ipv4.orig.dest_ip;
 151			hv3 = e->ipv4.orig.src_ip;
 152			break;
 153		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
 154		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
 155			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
 156			hv1 ^= e->ipv6.ports;
 157
 158			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
 159			hv2 ^= e->ipv6.dest_ip[0];
 160
 161			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
 162			hv3 ^= e->ipv6.src_ip[0];
 163			break;
 164		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
 165		case MTK_PPE_PKT_TYPE_IPV6_6RD:
 166		default:
 167			WARN_ON_ONCE(1);
 168			return MTK_PPE_HASH_MASK;
 169	}
 170
 171	hash = (hv1 & hv2) | ((~hv1) & hv3);
 172	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
 173	hash ^= hv1 ^ hv2 ^ hv3;
 174	hash ^= hash >> 16;
 175	hash <<= (ffs(eth->soc->hash_offset) - 1);
 176	hash &= MTK_PPE_ENTRIES - 1;
 177
 178	return hash;
 179}
 180
 181static inline struct mtk_foe_mac_info *
 182mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
 183{
 184	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
 185
 186	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
 187		return &entry->bridge.l2;
 188
 189	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 190		return &entry->ipv6.l2;
 191
 192	return &entry->ipv4.l2;
 193}
 194
 195static inline u32 *
 196mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
 197{
 198	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
 199
 200	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
 201		return &entry->bridge.ib2;
 202
 203	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 204		return &entry->ipv6.ib2;
 205
 206	return &entry->ipv4.ib2;
 207}
 208
 209int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
 210			  int type, int l4proto, u8 pse_port, u8 *src_mac,
 211			  u8 *dest_mac)
 212{
 213	struct mtk_foe_mac_info *l2;
 214	u32 ports_pad, val;
 215
 216	memset(entry, 0, sizeof(*entry));
 217
 218	if (mtk_is_netsys_v2_or_greater(eth)) {
 219		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
 220		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
 221		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
 222		      MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
 223		entry->ib1 = val;
 224
 225		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
 226		      FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
 227	} else {
 228		int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
 229
 230		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
 231		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
 232		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
 233		      MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
 234		entry->ib1 = val;
 235
 236		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
 237		      FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
 238		      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
 239	}
 240
 241	if (is_multicast_ether_addr(dest_mac))
 242		val |= mtk_get_ib2_multicast_mask(eth);
 243
 244	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
 245	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
 246		entry->ipv4.orig.ports = ports_pad;
 247	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
 248		entry->ipv6.ports = ports_pad;
 249
 250	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
 251		ether_addr_copy(entry->bridge.src_mac, src_mac);
 252		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
 253		entry->bridge.ib2 = val;
 254		l2 = &entry->bridge.l2;
 255	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
 256		entry->ipv6.ib2 = val;
 257		l2 = &entry->ipv6.l2;
 258	} else {
 259		entry->ipv4.ib2 = val;
 260		l2 = &entry->ipv4.l2;
 261	}
 262
 263	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
 264	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
 265	l2->src_mac_hi = get_unaligned_be32(src_mac);
 266	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
 267
 268	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
 269		l2->etype = ETH_P_IPV6;
 270	else
 271		l2->etype = ETH_P_IP;
 272
 273	return 0;
 274}
 275
 276int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
 277			       struct mtk_foe_entry *entry, u8 port)
 278{
 279	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
 280	u32 val = *ib2;
 281
 282	if (mtk_is_netsys_v2_or_greater(eth)) {
 283		val &= ~MTK_FOE_IB2_DEST_PORT_V2;
 284		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
 285	} else {
 286		val &= ~MTK_FOE_IB2_DEST_PORT;
 287		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
 288	}
 289	*ib2 = val;
 290
 291	return 0;
 292}
 293
 294int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
 295				 struct mtk_foe_entry *entry, bool egress,
 296				 __be32 src_addr, __be16 src_port,
 297				 __be32 dest_addr, __be16 dest_port)
 298{
 299	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
 300	struct mtk_ipv4_tuple *t;
 301
 302	switch (type) {
 303	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
 304		if (egress) {
 305			t = &entry->ipv4.new;
 306			break;
 307		}
 308		fallthrough;
 309	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
 310	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
 311		t = &entry->ipv4.orig;
 312		break;
 313	case MTK_PPE_PKT_TYPE_IPV6_6RD:
 314		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
 315		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
 316		return 0;
 317	default:
 318		WARN_ON_ONCE(1);
 319		return -EINVAL;
 320	}
 321
 322	t->src_ip = be32_to_cpu(src_addr);
 323	t->dest_ip = be32_to_cpu(dest_addr);
 324
 325	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
 326		return 0;
 327
 328	t->src_port = be16_to_cpu(src_port);
 329	t->dest_port = be16_to_cpu(dest_port);
 330
 331	return 0;
 332}
 333
 334int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
 335				 struct mtk_foe_entry *entry,
 336				 __be32 *src_addr, __be16 src_port,
 337				 __be32 *dest_addr, __be16 dest_port)
 338{
 339	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
 340	u32 *src, *dest;
 341	int i;
 342
 343	switch (type) {
 344	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
 345		src = entry->dslite.tunnel_src_ip;
 346		dest = entry->dslite.tunnel_dest_ip;
 347		break;
 348	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
 349	case MTK_PPE_PKT_TYPE_IPV6_6RD:
 350		entry->ipv6.src_port = be16_to_cpu(src_port);
 351		entry->ipv6.dest_port = be16_to_cpu(dest_port);
 352		fallthrough;
 353	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
 354		src = entry->ipv6.src_ip;
 355		dest = entry->ipv6.dest_ip;
 356		break;
 357	default:
 358		WARN_ON_ONCE(1);
 359		return -EINVAL;
 360	}
 361
 362	for (i = 0; i < 4; i++)
 363		src[i] = be32_to_cpu(src_addr[i]);
 364	for (i = 0; i < 4; i++)
 365		dest[i] = be32_to_cpu(dest_addr[i]);
 366
 367	return 0;
 368}
 369
 370int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
 371			  int port)
 372{
 373	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 374
 375	l2->etype = BIT(port);
 376
 377	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
 378		entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
 379	else
 380		l2->etype |= BIT(8);
 381
 382	entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
 383
 384	return 0;
 385}
 386
 387int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
 388			   int vid)
 389{
 390	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 391
 392	switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
 393	case 0:
 394		entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
 395			      mtk_prep_ib1_vlan_layer(eth, 1);
 396		l2->vlan1 = vid;
 397		return 0;
 398	case 1:
 399		if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
 400			l2->vlan1 = vid;
 401			l2->etype |= BIT(8);
 402		} else {
 403			l2->vlan2 = vid;
 404			entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
 405		}
 406		return 0;
 407	default:
 408		return -ENOSPC;
 409	}
 410}
 411
 412int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
 413			    int sid)
 414{
 415	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 416
 417	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
 418	    (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
 419		l2->etype = ETH_P_PPP_SES;
 420
 421	entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
 422	l2->pppoe_id = sid;
 423
 424	return 0;
 425}
 426
 427int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
 428			   int wdma_idx, int txq, int bss, int wcid,
 429			   bool amsdu_en)
 430{
 431	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 432	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
 433
 434	switch (eth->soc->version) {
 435	case 3:
 436		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
 437		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
 438			 MTK_FOE_IB2_WDMA_WINFO_V2;
 439		l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
 440			     FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
 441		l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
 442		break;
 443	case 2:
 444		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
 445		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
 446			 MTK_FOE_IB2_WDMA_WINFO_V2;
 447		l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
 448			    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
 449		break;
 450	default:
 451		*ib2 &= ~MTK_FOE_IB2_PORT_MG;
 452		*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
 453		if (wdma_idx)
 454			*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
 455		l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
 456			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
 457			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
 458		break;
 459	}
 460
 461	return 0;
 462}
 463
 464int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
 465			    unsigned int queue)
 466{
 467	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
 468
 469	if (mtk_is_netsys_v2_or_greater(eth)) {
 470		*ib2 &= ~MTK_FOE_IB2_QID_V2;
 471		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
 472		*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
 473	} else {
 474		*ib2 &= ~MTK_FOE_IB2_QID;
 475		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
 476		*ib2 |= MTK_FOE_IB2_PSE_QOS;
 477	}
 478
 479	return 0;
 480}
 481
 482static bool
 483mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
 484		     struct mtk_foe_entry *data)
 485{
 486	int type, len;
 487
 488	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
 489		return false;
 490
 491	type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
 492	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 493		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
 494	else
 495		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
 496
 497	return !memcmp(&entry->data.data, &data->data, len - 4);
 498}
 499
 500static void
 501__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 502{
 503	struct hlist_head *head;
 504	struct hlist_node *tmp;
 505
 506	if (entry->type == MTK_FLOW_TYPE_L2) {
 507		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
 508				       mtk_flow_l2_ht_params);
 509
 510		head = &entry->l2_flows;
 511		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
 512			__mtk_foe_entry_clear(ppe, entry);
 513		return;
 514	}
 515
 516	hlist_del_init(&entry->list);
 517	if (entry->hash != 0xffff) {
 518		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
 519
 520		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
 521		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
 522		dma_wmb();
 523		mtk_ppe_cache_clear(ppe);
 524
 525		if (ppe->accounting) {
 526			struct mtk_foe_accounting *acct;
 527
 528			acct = ppe->acct_table + entry->hash * sizeof(*acct);
 529			acct->packets = 0;
 530			acct->bytes = 0;
 531		}
 532	}
 533	entry->hash = 0xffff;
 534
 535	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
 536		return;
 537
 538	hlist_del_init(&entry->l2_data.list);
 539	kfree(entry);
 540}
 541
 542static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
 543{
 544	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
 545	u16 now = mtk_eth_timestamp(ppe->eth);
 546	u16 timestamp = ib1 & ib1_ts_mask;
 547
 548	if (timestamp > now)
 549		return ib1_ts_mask + 1 - timestamp + now;
 550	else
 551		return now - timestamp;
 552}
 553
 554static void
 555mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 556{
 557	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
 558	struct mtk_flow_entry *cur;
 559	struct mtk_foe_entry *hwe;
 560	struct hlist_node *tmp;
 561	int idle;
 562
 563	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
 564	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
 565		int cur_idle;
 566		u32 ib1;
 567
 568		hwe = mtk_foe_get_entry(ppe, cur->hash);
 569		ib1 = READ_ONCE(hwe->ib1);
 570
 571		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
 572			cur->hash = 0xffff;
 573			__mtk_foe_entry_clear(ppe, cur);
 574			continue;
 575		}
 576
 577		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
 578		if (cur_idle >= idle)
 579			continue;
 580
 581		idle = cur_idle;
 582		entry->data.ib1 &= ~ib1_ts_mask;
 583		entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
 584	}
 585}
 586
 587static void
 588mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 589{
 590	struct mtk_foe_entry foe = {};
 591	struct mtk_foe_entry *hwe;
 592
 593	spin_lock_bh(&ppe_lock);
 594
 595	if (entry->type == MTK_FLOW_TYPE_L2) {
 596		mtk_flow_entry_update_l2(ppe, entry);
 597		goto out;
 598	}
 599
 600	if (entry->hash == 0xffff)
 601		goto out;
 602
 603	hwe = mtk_foe_get_entry(ppe, entry->hash);
 604	memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
 605	if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
 606		entry->hash = 0xffff;
 607		goto out;
 608	}
 609
 610	entry->data.ib1 = foe.ib1;
 611
 612out:
 613	spin_unlock_bh(&ppe_lock);
 614}
 615
 616static void
 617__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
 618		       u16 hash)
 619{
 620	struct mtk_eth *eth = ppe->eth;
 621	u16 timestamp = mtk_eth_timestamp(eth);
 622	struct mtk_foe_entry *hwe;
 623	u32 val;
 624
 625	if (mtk_is_netsys_v2_or_greater(eth)) {
 626		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
 627		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
 628					 timestamp);
 629	} else {
 630		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
 631		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
 632					 timestamp);
 633	}
 634
 635	hwe = mtk_foe_get_entry(ppe, hash);
 636	memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
 637	wmb();
 638	hwe->ib1 = entry->ib1;
 639
 640	if (ppe->accounting) {
 641		if (mtk_is_netsys_v2_or_greater(eth))
 642			val = MTK_FOE_IB2_MIB_CNT_V2;
 643		else
 644			val = MTK_FOE_IB2_MIB_CNT;
 645		*mtk_foe_entry_ib2(eth, hwe) |= val;
 646	}
 647
 648	dma_wmb();
 649
 650	mtk_ppe_cache_clear(ppe);
 651}
 652
 653void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 654{
 655	spin_lock_bh(&ppe_lock);
 656	__mtk_foe_entry_clear(ppe, entry);
 657	spin_unlock_bh(&ppe_lock);
 658}
 659
 660static int
 661mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 662{
 663	struct mtk_flow_entry *prev;
 664
 665	entry->type = MTK_FLOW_TYPE_L2;
 666
 667	prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
 668						 mtk_flow_l2_ht_params);
 669	if (likely(!prev))
 670		return 0;
 671
 672	if (IS_ERR(prev))
 673		return PTR_ERR(prev);
 674
 675	return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
 676				       &entry->l2_node, mtk_flow_l2_ht_params);
 677}
 678
 679int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 680{
 681	const struct mtk_soc_data *soc = ppe->eth->soc;
 682	int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
 683	u32 hash;
 684
 685	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
 686		return mtk_foe_entry_commit_l2(ppe, entry);
 687
 688	hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
 689	entry->hash = 0xffff;
 690	spin_lock_bh(&ppe_lock);
 691	hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
 692	spin_unlock_bh(&ppe_lock);
 693
 694	return 0;
 695}
 696
 697static void
 698mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
 699			     u16 hash)
 700{
 701	const struct mtk_soc_data *soc = ppe->eth->soc;
 702	struct mtk_flow_entry *flow_info;
 703	struct mtk_foe_entry foe = {}, *hwe;
 704	struct mtk_foe_mac_info *l2;
 705	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
 706	int type;
 707
 708	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
 709	if (!flow_info)
 710		return;
 711
 712	flow_info->l2_data.base_flow = entry;
 713	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
 714	flow_info->hash = hash;
 715	hlist_add_head(&flow_info->list,
 716		       &ppe->foe_flow[hash / soc->hash_offset]);
 717	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
 718
 719	hwe = mtk_foe_get_entry(ppe, hash);
 720	memcpy(&foe, hwe, soc->foe_entry_size);
 721	foe.ib1 &= ib1_mask;
 722	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
 723
 724	l2 = mtk_foe_entry_l2(ppe->eth, &foe);
 725	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
 726
 727	type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
 728	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
 729		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
 730	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
 731		l2->etype = ETH_P_IPV6;
 732
 733	*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
 734
 735	__mtk_foe_entry_commit(ppe, &foe, hash);
 736}
 737
 738void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
 739{
 740	const struct mtk_soc_data *soc = ppe->eth->soc;
 741	struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
 742	struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
 743	struct mtk_flow_entry *entry;
 744	struct mtk_foe_bridge key = {};
 745	struct hlist_node *n;
 746	struct ethhdr *eh;
 747	bool found = false;
 748	u8 *tag;
 749
 750	spin_lock_bh(&ppe_lock);
 751
 752	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
 753		goto out;
 754
 755	hlist_for_each_entry_safe(entry, n, head, list) {
 756		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
 757			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
 758				     MTK_FOE_STATE_BIND))
 759				continue;
 760
 761			entry->hash = 0xffff;
 762			__mtk_foe_entry_clear(ppe, entry);
 763			continue;
 764		}
 765
 766		if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
 767			if (entry->hash != 0xffff)
 768				entry->hash = 0xffff;
 769			continue;
 770		}
 771
 772		entry->hash = hash;
 773		__mtk_foe_entry_commit(ppe, &entry->data, hash);
 774		found = true;
 775	}
 776
 777	if (found)
 778		goto out;
 779
 780	eh = eth_hdr(skb);
 781	ether_addr_copy(key.dest_mac, eh->h_dest);
 782	ether_addr_copy(key.src_mac, eh->h_source);
 783	tag = skb->data - 2;
 784	key.vlan = 0;
 785	switch (skb->protocol) {
 786#if IS_ENABLED(CONFIG_NET_DSA)
 787	case htons(ETH_P_XDSA):
 788		if (!netdev_uses_dsa(skb->dev) ||
 789		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
 790			goto out;
 791
 792		if (!skb_metadata_dst(skb))
 793			tag += 4;
 794
 795		if (get_unaligned_be16(tag) != ETH_P_8021Q)
 796			break;
 797
 798		fallthrough;
 799#endif
 800	case htons(ETH_P_8021Q):
 801		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
 802		break;
 803	default:
 804		break;
 805	}
 806
 807	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
 808	if (!entry)
 809		goto out;
 810
 811	mtk_foe_entry_commit_subflow(ppe, entry, hash);
 812
 813out:
 814	spin_unlock_bh(&ppe_lock);
 815}
 816
 817int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 818{
 819	mtk_flow_entry_update(ppe, entry);
 820
 821	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
 822}
 823
 824int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
 825{
 826	if (!ppe)
 827		return -EINVAL;
 828
 829	/* disable KA */
 830	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
 831	ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
 832	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
 833	usleep_range(10000, 11000);
 834
 835	/* set KA timer to maximum */
 836	ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
 837	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
 838
 839	/* set KA tick select */
 840	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
 841	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
 842	usleep_range(10000, 11000);
 843
 844	/* disable scan mode */
 845	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
 846	usleep_range(10000, 11000);
 847
 848	return mtk_ppe_wait_busy(ppe);
 849}
 850
 851struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
 852						 struct mtk_foe_accounting *diff)
 853{
 854	struct mtk_foe_accounting *acct;
 855	int size = sizeof(struct mtk_foe_accounting);
 856	u64 bytes, packets;
 857
 858	if (!ppe->accounting)
 859		return NULL;
 860
 861	if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
 862		return NULL;
 863
 864	acct = ppe->acct_table + index * size;
 865
 866	acct->bytes += bytes;
 867	acct->packets += packets;
 868
 869	if (diff) {
 870		diff->bytes = bytes;
 871		diff->packets = packets;
 872	}
 873
 874	return acct;
 875}
 876
 877struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
 878{
 879	bool accounting = eth->soc->has_accounting;
 880	const struct mtk_soc_data *soc = eth->soc;
 881	struct mtk_foe_accounting *acct;
 882	struct device *dev = eth->dev;
 883	struct mtk_mib_entry *mib;
 884	struct mtk_ppe *ppe;
 885	u32 foe_flow_size;
 886	void *foe;
 887
 888	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
 889	if (!ppe)
 890		return NULL;
 891
 892	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
 893
 894	/* need to allocate a separate device, since it PPE DMA access is
 895	 * not coherent.
 896	 */
 897	ppe->base = base;
 898	ppe->eth = eth;
 899	ppe->dev = dev;
 900	ppe->version = eth->soc->offload_version;
 901	ppe->accounting = accounting;
 902
 903	foe = dmam_alloc_coherent(ppe->dev,
 904				  MTK_PPE_ENTRIES * soc->foe_entry_size,
 905				  &ppe->foe_phys, GFP_KERNEL);
 906	if (!foe)
 907		goto err_free_l2_flows;
 908
 909	ppe->foe_table = foe;
 910
 911	foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
 912			sizeof(*ppe->foe_flow);
 913	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
 914	if (!ppe->foe_flow)
 915		goto err_free_l2_flows;
 916
 917	if (accounting) {
 918		mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
 919					  &ppe->mib_phys, GFP_KERNEL);
 920		if (!mib)
 921			return NULL;
 922
 923		ppe->mib_table = mib;
 924
 925		acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
 926				    GFP_KERNEL);
 927
 928		if (!acct)
 929			return NULL;
 930
 931		ppe->acct_table = acct;
 932	}
 933
 934	mtk_ppe_debugfs_init(ppe, index);
 935
 936	return ppe;
 937
 938err_free_l2_flows:
 939	rhashtable_destroy(&ppe->l2_flows);
 940	return NULL;
 941}
 942
 943void mtk_ppe_deinit(struct mtk_eth *eth)
 944{
 945	int i;
 946
 947	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
 948		if (!eth->ppe[i])
 949			return;
 950		rhashtable_destroy(&eth->ppe[i]->l2_flows);
 951	}
 952}
 953
 954static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
 955{
 956	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
 957	int i, k;
 958
 959	memset(ppe->foe_table, 0,
 960	       MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
 961
 962	if (!IS_ENABLED(CONFIG_SOC_MT7621))
 963		return;
 964
 965	/* skip all entries that cross the 1024 byte boundary */
 966	for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
 967		for (k = 0; k < ARRAY_SIZE(skip); k++) {
 968			struct mtk_foe_entry *hwe;
 969
 970			hwe = mtk_foe_get_entry(ppe, i + skip[k]);
 971			hwe->ib1 |= MTK_FOE_IB1_STATIC;
 972		}
 973	}
 974}
 975
 976void mtk_ppe_start(struct mtk_ppe *ppe)
 977{
 978	u32 val;
 979
 980	if (!ppe)
 981		return;
 982
 983	mtk_ppe_init_foe_table(ppe);
 984	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
 985
 986	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
 
 987	      MTK_PPE_TB_CFG_AGE_UNBIND |
 988	      MTK_PPE_TB_CFG_AGE_TCP |
 989	      MTK_PPE_TB_CFG_AGE_UDP |
 990	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
 991	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
 992			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
 993	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
 994			 MTK_PPE_KEEPALIVE_DISABLE) |
 995	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
 996	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
 997			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
 998	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
 999			 MTK_PPE_ENTRIES_SHIFT);
1000	if (mtk_is_netsys_v2_or_greater(ppe->eth))
1001		val |= MTK_PPE_TB_CFG_INFO_SEL;
1002	if (!mtk_is_netsys_v3_or_greater(ppe->eth))
1003		val |= MTK_PPE_TB_CFG_ENTRY_80B;
1004	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
1005
1006	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
1007		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
1008
1009	mtk_ppe_cache_enable(ppe, true);
1010
1011	val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
1012	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
1013	      MTK_PPE_FLOW_CFG_IP6_6RD |
1014	      MTK_PPE_FLOW_CFG_IP4_NAT |
1015	      MTK_PPE_FLOW_CFG_IP4_NAPT |
1016	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
1017	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1018	if (mtk_is_netsys_v2_or_greater(ppe->eth))
1019		val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
1020		       MTK_PPE_MD_TOAP_BYP_CRSN1 |
1021		       MTK_PPE_MD_TOAP_BYP_CRSN2 |
1022		       MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
1023	else
1024		val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
1025		       MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
1026	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1027
1028	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
1029	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
1030	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
1031
1032	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
1033	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
1034	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
1035
1036	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
1037	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
1038	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
1039
1040	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1041	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1042
1043	val = MTK_PPE_BIND_LIMIT1_FULL |
1044	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1045	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1046
1047	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1048	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1049	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1050
1051	/* enable PPE */
1052	val = MTK_PPE_GLO_CFG_EN |
1053	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1054	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
1055	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1056	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1057
1058	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1059
1060	if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
1061		ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1062		ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1063	}
1064
1065	if (ppe->accounting && ppe->mib_phys) {
1066		ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1067		ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1068			MTK_PPE_MIB_CFG_EN);
1069		ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1070			MTK_PPE_MIB_CFG_RD_CLR);
1071		ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1072			MTK_PPE_MIB_CFG_RD_CLR);
1073	}
1074}
1075
1076int mtk_ppe_stop(struct mtk_ppe *ppe)
1077{
1078	u32 val;
1079	int i;
1080
1081	if (!ppe)
1082		return 0;
1083
1084	for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1085		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1086
1087		hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1088				      MTK_FOE_STATE_INVALID);
1089	}
1090
1091	mtk_ppe_cache_enable(ppe, false);
1092
1093	/* disable offload engine */
1094	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1095	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1096
1097	/* disable aging */
1098	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1099	      MTK_PPE_TB_CFG_AGE_UNBIND |
1100	      MTK_PPE_TB_CFG_AGE_TCP |
1101	      MTK_PPE_TB_CFG_AGE_UDP |
1102	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
1103	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1104
1105	return mtk_ppe_wait_busy(ppe);
1106}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
  3
  4#include <linux/kernel.h>
  5#include <linux/io.h>
  6#include <linux/iopoll.h>
  7#include <linux/etherdevice.h>
  8#include <linux/platform_device.h>
  9#include <linux/if_ether.h>
 10#include <linux/if_vlan.h>
 
 11#include <net/dsa.h>
 12#include "mtk_eth_soc.h"
 13#include "mtk_ppe.h"
 14#include "mtk_ppe_regs.h"
 15
 16static DEFINE_SPINLOCK(ppe_lock);
 17
 18static const struct rhashtable_params mtk_flow_l2_ht_params = {
 19	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
 20	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
 21	.key_len = offsetof(struct mtk_foe_bridge, key_end),
 22	.automatic_shrinking = true,
 23};
 24
 25static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
 26{
 27	writel(val, ppe->base + reg);
 28}
 29
 30static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
 31{
 32	return readl(ppe->base + reg);
 33}
 34
 35static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
 36{
 37	u32 val;
 38
 39	val = ppe_r32(ppe, reg);
 40	val &= ~mask;
 41	val |= set;
 42	ppe_w32(ppe, reg, val);
 43
 44	return val;
 45}
 46
 47static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
 48{
 49	return ppe_m32(ppe, reg, 0, val);
 50}
 51
 52static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
 53{
 54	return ppe_m32(ppe, reg, val, 0);
 55}
 56
 57static u32 mtk_eth_timestamp(struct mtk_eth *eth)
 58{
 59	return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
 60}
 61
 62static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
 63{
 64	int ret;
 65	u32 val;
 66
 67	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
 68				 !(val & MTK_PPE_GLO_CFG_BUSY),
 69				 20, MTK_PPE_WAIT_TIMEOUT_US);
 70
 71	if (ret)
 72		dev_err(ppe->dev, "PPE table busy");
 73
 74	return ret;
 75}
 76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
 78{
 79	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
 80	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
 81}
 82
 83static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
 84{
 85	mtk_ppe_cache_clear(ppe);
 86
 87	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
 88		enable * MTK_PPE_CACHE_CTL_EN);
 89}
 90
 91static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
 92{
 93	u32 hv1, hv2, hv3;
 94	u32 hash;
 95
 96	switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
 97		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
 98		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
 99			hv1 = e->ipv4.orig.ports;
100			hv2 = e->ipv4.orig.dest_ip;
101			hv3 = e->ipv4.orig.src_ip;
102			break;
103		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
104		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
105			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
106			hv1 ^= e->ipv6.ports;
107
108			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
109			hv2 ^= e->ipv6.dest_ip[0];
110
111			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
112			hv3 ^= e->ipv6.src_ip[0];
113			break;
114		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
115		case MTK_PPE_PKT_TYPE_IPV6_6RD:
116		default:
117			WARN_ON_ONCE(1);
118			return MTK_PPE_HASH_MASK;
119	}
120
121	hash = (hv1 & hv2) | ((~hv1) & hv3);
122	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
123	hash ^= hv1 ^ hv2 ^ hv3;
124	hash ^= hash >> 16;
125	hash <<= (ffs(eth->soc->hash_offset) - 1);
126	hash &= MTK_PPE_ENTRIES - 1;
127
128	return hash;
129}
130
131static inline struct mtk_foe_mac_info *
132mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
133{
134	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
135
136	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
137		return &entry->bridge.l2;
138
139	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
140		return &entry->ipv6.l2;
141
142	return &entry->ipv4.l2;
143}
144
145static inline u32 *
146mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
147{
148	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
149
150	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
151		return &entry->bridge.ib2;
152
153	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
154		return &entry->ipv6.ib2;
155
156	return &entry->ipv4.ib2;
157}
158
159int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
160			  int type, int l4proto, u8 pse_port, u8 *src_mac,
161			  u8 *dest_mac)
162{
163	struct mtk_foe_mac_info *l2;
164	u32 ports_pad, val;
165
166	memset(entry, 0, sizeof(*entry));
167
168	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
169		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
170		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
171		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
172		      MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
173		entry->ib1 = val;
174
175		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
176		      FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
177	} else {
178		int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
179
180		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
181		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
182		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
183		      MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
184		entry->ib1 = val;
185
186		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
187		      FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
188		      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
189	}
190
191	if (is_multicast_ether_addr(dest_mac))
192		val |= mtk_get_ib2_multicast_mask(eth);
193
194	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
195	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
196		entry->ipv4.orig.ports = ports_pad;
197	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
198		entry->ipv6.ports = ports_pad;
199
200	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
201		ether_addr_copy(entry->bridge.src_mac, src_mac);
202		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
203		entry->bridge.ib2 = val;
204		l2 = &entry->bridge.l2;
205	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
206		entry->ipv6.ib2 = val;
207		l2 = &entry->ipv6.l2;
208	} else {
209		entry->ipv4.ib2 = val;
210		l2 = &entry->ipv4.l2;
211	}
212
213	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
214	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
215	l2->src_mac_hi = get_unaligned_be32(src_mac);
216	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
217
218	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
219		l2->etype = ETH_P_IPV6;
220	else
221		l2->etype = ETH_P_IP;
222
223	return 0;
224}
225
226int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
227			       struct mtk_foe_entry *entry, u8 port)
228{
229	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
230	u32 val = *ib2;
231
232	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
233		val &= ~MTK_FOE_IB2_DEST_PORT_V2;
234		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
235	} else {
236		val &= ~MTK_FOE_IB2_DEST_PORT;
237		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
238	}
239	*ib2 = val;
240
241	return 0;
242}
243
244int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
245				 struct mtk_foe_entry *entry, bool egress,
246				 __be32 src_addr, __be16 src_port,
247				 __be32 dest_addr, __be16 dest_port)
248{
249	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
250	struct mtk_ipv4_tuple *t;
251
252	switch (type) {
253	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
254		if (egress) {
255			t = &entry->ipv4.new;
256			break;
257		}
258		fallthrough;
259	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
260	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
261		t = &entry->ipv4.orig;
262		break;
263	case MTK_PPE_PKT_TYPE_IPV6_6RD:
264		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
265		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
266		return 0;
267	default:
268		WARN_ON_ONCE(1);
269		return -EINVAL;
270	}
271
272	t->src_ip = be32_to_cpu(src_addr);
273	t->dest_ip = be32_to_cpu(dest_addr);
274
275	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
276		return 0;
277
278	t->src_port = be16_to_cpu(src_port);
279	t->dest_port = be16_to_cpu(dest_port);
280
281	return 0;
282}
283
284int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
285				 struct mtk_foe_entry *entry,
286				 __be32 *src_addr, __be16 src_port,
287				 __be32 *dest_addr, __be16 dest_port)
288{
289	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
290	u32 *src, *dest;
291	int i;
292
293	switch (type) {
294	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
295		src = entry->dslite.tunnel_src_ip;
296		dest = entry->dslite.tunnel_dest_ip;
297		break;
298	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
299	case MTK_PPE_PKT_TYPE_IPV6_6RD:
300		entry->ipv6.src_port = be16_to_cpu(src_port);
301		entry->ipv6.dest_port = be16_to_cpu(dest_port);
302		fallthrough;
303	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
304		src = entry->ipv6.src_ip;
305		dest = entry->ipv6.dest_ip;
306		break;
307	default:
308		WARN_ON_ONCE(1);
309		return -EINVAL;
310	}
311
312	for (i = 0; i < 4; i++)
313		src[i] = be32_to_cpu(src_addr[i]);
314	for (i = 0; i < 4; i++)
315		dest[i] = be32_to_cpu(dest_addr[i]);
316
317	return 0;
318}
319
320int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
321			  int port)
322{
323	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
324
325	l2->etype = BIT(port);
326
327	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
328		entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
329	else
330		l2->etype |= BIT(8);
331
332	entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
333
334	return 0;
335}
336
337int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
338			   int vid)
339{
340	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
341
342	switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
343	case 0:
344		entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
345			      mtk_prep_ib1_vlan_layer(eth, 1);
346		l2->vlan1 = vid;
347		return 0;
348	case 1:
349		if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
350			l2->vlan1 = vid;
351			l2->etype |= BIT(8);
352		} else {
353			l2->vlan2 = vid;
354			entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
355		}
356		return 0;
357	default:
358		return -ENOSPC;
359	}
360}
361
362int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
363			    int sid)
364{
365	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
366
367	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
368	    (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
369		l2->etype = ETH_P_PPP_SES;
370
371	entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
372	l2->pppoe_id = sid;
373
374	return 0;
375}
376
377int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
378			   int wdma_idx, int txq, int bss, int wcid)
 
379{
380	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
381	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
382
383	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
 
 
 
 
 
 
 
 
 
384		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
385		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
386			 MTK_FOE_IB2_WDMA_WINFO_V2;
387		l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
388			    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
389	} else {
 
390		*ib2 &= ~MTK_FOE_IB2_PORT_MG;
391		*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
392		if (wdma_idx)
393			*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
394		l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
395			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
396			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
 
397	}
398
399	return 0;
400}
401
402int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
403			    unsigned int queue)
404{
405	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
406
407	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
408		*ib2 &= ~MTK_FOE_IB2_QID_V2;
409		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
410		*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
411	} else {
412		*ib2 &= ~MTK_FOE_IB2_QID;
413		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
414		*ib2 |= MTK_FOE_IB2_PSE_QOS;
415	}
416
417	return 0;
418}
419
420static bool
421mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
422		     struct mtk_foe_entry *data)
423{
424	int type, len;
425
426	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
427		return false;
428
429	type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
430	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
431		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
432	else
433		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
434
435	return !memcmp(&entry->data.data, &data->data, len - 4);
436}
437
438static void
439__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
440{
441	struct hlist_head *head;
442	struct hlist_node *tmp;
443
444	if (entry->type == MTK_FLOW_TYPE_L2) {
445		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
446				       mtk_flow_l2_ht_params);
447
448		head = &entry->l2_flows;
449		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
450			__mtk_foe_entry_clear(ppe, entry);
451		return;
452	}
453
454	hlist_del_init(&entry->list);
455	if (entry->hash != 0xffff) {
456		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
457
458		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
459		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
460		dma_wmb();
 
 
 
 
 
 
 
 
 
461	}
462	entry->hash = 0xffff;
463
464	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
465		return;
466
467	hlist_del_init(&entry->l2_data.list);
468	kfree(entry);
469}
470
471static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
472{
473	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
474	u16 now = mtk_eth_timestamp(ppe->eth);
475	u16 timestamp = ib1 & ib1_ts_mask;
476
477	if (timestamp > now)
478		return ib1_ts_mask + 1 - timestamp + now;
479	else
480		return now - timestamp;
481}
482
483static void
484mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
485{
486	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
487	struct mtk_flow_entry *cur;
488	struct mtk_foe_entry *hwe;
489	struct hlist_node *tmp;
490	int idle;
491
492	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
493	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
494		int cur_idle;
495		u32 ib1;
496
497		hwe = mtk_foe_get_entry(ppe, cur->hash);
498		ib1 = READ_ONCE(hwe->ib1);
499
500		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
501			cur->hash = 0xffff;
502			__mtk_foe_entry_clear(ppe, cur);
503			continue;
504		}
505
506		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
507		if (cur_idle >= idle)
508			continue;
509
510		idle = cur_idle;
511		entry->data.ib1 &= ~ib1_ts_mask;
512		entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
513	}
514}
515
516static void
517mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
518{
519	struct mtk_foe_entry foe = {};
520	struct mtk_foe_entry *hwe;
521
522	spin_lock_bh(&ppe_lock);
523
524	if (entry->type == MTK_FLOW_TYPE_L2) {
525		mtk_flow_entry_update_l2(ppe, entry);
526		goto out;
527	}
528
529	if (entry->hash == 0xffff)
530		goto out;
531
532	hwe = mtk_foe_get_entry(ppe, entry->hash);
533	memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
534	if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
535		entry->hash = 0xffff;
536		goto out;
537	}
538
539	entry->data.ib1 = foe.ib1;
540
541out:
542	spin_unlock_bh(&ppe_lock);
543}
544
545static void
546__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
547		       u16 hash)
548{
549	struct mtk_eth *eth = ppe->eth;
550	u16 timestamp = mtk_eth_timestamp(eth);
551	struct mtk_foe_entry *hwe;
 
552
553	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
554		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
555		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
556					 timestamp);
557	} else {
558		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
559		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
560					 timestamp);
561	}
562
563	hwe = mtk_foe_get_entry(ppe, hash);
564	memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
565	wmb();
566	hwe->ib1 = entry->ib1;
567
 
 
 
 
 
 
 
 
568	dma_wmb();
569
570	mtk_ppe_cache_clear(ppe);
571}
572
573void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
574{
575	spin_lock_bh(&ppe_lock);
576	__mtk_foe_entry_clear(ppe, entry);
577	spin_unlock_bh(&ppe_lock);
578}
579
580static int
581mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
582{
 
 
583	entry->type = MTK_FLOW_TYPE_L2;
584
585	return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
586				      mtk_flow_l2_ht_params);
 
 
 
 
 
 
 
 
587}
588
589int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
590{
591	const struct mtk_soc_data *soc = ppe->eth->soc;
592	int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
593	u32 hash;
594
595	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
596		return mtk_foe_entry_commit_l2(ppe, entry);
597
598	hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
599	entry->hash = 0xffff;
600	spin_lock_bh(&ppe_lock);
601	hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
602	spin_unlock_bh(&ppe_lock);
603
604	return 0;
605}
606
607static void
608mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
609			     u16 hash)
610{
611	const struct mtk_soc_data *soc = ppe->eth->soc;
612	struct mtk_flow_entry *flow_info;
613	struct mtk_foe_entry foe = {}, *hwe;
614	struct mtk_foe_mac_info *l2;
615	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
616	int type;
617
618	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
619	if (!flow_info)
620		return;
621
622	flow_info->l2_data.base_flow = entry;
623	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
624	flow_info->hash = hash;
625	hlist_add_head(&flow_info->list,
626		       &ppe->foe_flow[hash / soc->hash_offset]);
627	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
628
629	hwe = mtk_foe_get_entry(ppe, hash);
630	memcpy(&foe, hwe, soc->foe_entry_size);
631	foe.ib1 &= ib1_mask;
632	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
633
634	l2 = mtk_foe_entry_l2(ppe->eth, &foe);
635	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
636
637	type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
638	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
639		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
640	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
641		l2->etype = ETH_P_IPV6;
642
643	*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
644
645	__mtk_foe_entry_commit(ppe, &foe, hash);
646}
647
648void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
649{
650	const struct mtk_soc_data *soc = ppe->eth->soc;
651	struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
652	struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
653	struct mtk_flow_entry *entry;
654	struct mtk_foe_bridge key = {};
655	struct hlist_node *n;
656	struct ethhdr *eh;
657	bool found = false;
658	u8 *tag;
659
660	spin_lock_bh(&ppe_lock);
661
662	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
663		goto out;
664
665	hlist_for_each_entry_safe(entry, n, head, list) {
666		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
667			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
668				     MTK_FOE_STATE_BIND))
669				continue;
670
671			entry->hash = 0xffff;
672			__mtk_foe_entry_clear(ppe, entry);
673			continue;
674		}
675
676		if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
677			if (entry->hash != 0xffff)
678				entry->hash = 0xffff;
679			continue;
680		}
681
682		entry->hash = hash;
683		__mtk_foe_entry_commit(ppe, &entry->data, hash);
684		found = true;
685	}
686
687	if (found)
688		goto out;
689
690	eh = eth_hdr(skb);
691	ether_addr_copy(key.dest_mac, eh->h_dest);
692	ether_addr_copy(key.src_mac, eh->h_source);
693	tag = skb->data - 2;
694	key.vlan = 0;
695	switch (skb->protocol) {
696#if IS_ENABLED(CONFIG_NET_DSA)
697	case htons(ETH_P_XDSA):
698		if (!netdev_uses_dsa(skb->dev) ||
699		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
700			goto out;
701
702		tag += 4;
 
 
703		if (get_unaligned_be16(tag) != ETH_P_8021Q)
704			break;
705
706		fallthrough;
707#endif
708	case htons(ETH_P_8021Q):
709		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
710		break;
711	default:
712		break;
713	}
714
715	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
716	if (!entry)
717		goto out;
718
719	mtk_foe_entry_commit_subflow(ppe, entry, hash);
720
721out:
722	spin_unlock_bh(&ppe_lock);
723}
724
725int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
726{
727	mtk_flow_entry_update(ppe, entry);
728
729	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
730}
731
732struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
733			     int version, int index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
734{
 
735	const struct mtk_soc_data *soc = eth->soc;
 
736	struct device *dev = eth->dev;
 
737	struct mtk_ppe *ppe;
738	u32 foe_flow_size;
739	void *foe;
740
741	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
742	if (!ppe)
743		return NULL;
744
745	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
746
747	/* need to allocate a separate device, since it PPE DMA access is
748	 * not coherent.
749	 */
750	ppe->base = base;
751	ppe->eth = eth;
752	ppe->dev = dev;
753	ppe->version = version;
 
754
755	foe = dmam_alloc_coherent(ppe->dev,
756				  MTK_PPE_ENTRIES * soc->foe_entry_size,
757				  &ppe->foe_phys, GFP_KERNEL);
758	if (!foe)
759		goto err_free_l2_flows;
760
761	ppe->foe_table = foe;
762
763	foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
764			sizeof(*ppe->foe_flow);
765	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
766	if (!ppe->foe_flow)
767		goto err_free_l2_flows;
768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
769	mtk_ppe_debugfs_init(ppe, index);
770
771	return ppe;
772
773err_free_l2_flows:
774	rhashtable_destroy(&ppe->l2_flows);
775	return NULL;
776}
777
778void mtk_ppe_deinit(struct mtk_eth *eth)
779{
780	int i;
781
782	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
783		if (!eth->ppe[i])
784			return;
785		rhashtable_destroy(&eth->ppe[i]->l2_flows);
786	}
787}
788
789static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
790{
791	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
792	int i, k;
793
794	memset(ppe->foe_table, 0,
795	       MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
796
797	if (!IS_ENABLED(CONFIG_SOC_MT7621))
798		return;
799
800	/* skip all entries that cross the 1024 byte boundary */
801	for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
802		for (k = 0; k < ARRAY_SIZE(skip); k++) {
803			struct mtk_foe_entry *hwe;
804
805			hwe = mtk_foe_get_entry(ppe, i + skip[k]);
806			hwe->ib1 |= MTK_FOE_IB1_STATIC;
807		}
808	}
809}
810
811void mtk_ppe_start(struct mtk_ppe *ppe)
812{
813	u32 val;
814
815	if (!ppe)
816		return;
817
818	mtk_ppe_init_foe_table(ppe);
819	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
820
821	val = MTK_PPE_TB_CFG_ENTRY_80B |
822	      MTK_PPE_TB_CFG_AGE_NON_L4 |
823	      MTK_PPE_TB_CFG_AGE_UNBIND |
824	      MTK_PPE_TB_CFG_AGE_TCP |
825	      MTK_PPE_TB_CFG_AGE_UDP |
826	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
827	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
828			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
829	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
830			 MTK_PPE_KEEPALIVE_DISABLE) |
831	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
832	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
833			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
834	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
835			 MTK_PPE_ENTRIES_SHIFT);
836	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
837		val |= MTK_PPE_TB_CFG_INFO_SEL;
 
 
838	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
839
840	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
841		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
842
843	mtk_ppe_cache_enable(ppe, true);
844
845	val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
846	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
847	      MTK_PPE_FLOW_CFG_IP6_6RD |
848	      MTK_PPE_FLOW_CFG_IP4_NAT |
849	      MTK_PPE_FLOW_CFG_IP4_NAPT |
850	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
851	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
852	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
853		val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
854		       MTK_PPE_MD_TOAP_BYP_CRSN1 |
855		       MTK_PPE_MD_TOAP_BYP_CRSN2 |
856		       MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
857	else
858		val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
859		       MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
860	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
861
862	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
863	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
864	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
865
866	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
867	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
868	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
869
870	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
871	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
872	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
873
874	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
875	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
876
877	val = MTK_PPE_BIND_LIMIT1_FULL |
878	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
879	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
880
881	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
882	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
883	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
884
885	/* enable PPE */
886	val = MTK_PPE_GLO_CFG_EN |
887	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
888	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
889	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
890	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
891
892	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
893
894	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
895		ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
896		ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
 
 
 
 
 
 
 
 
 
 
897	}
898}
899
900int mtk_ppe_stop(struct mtk_ppe *ppe)
901{
902	u32 val;
903	int i;
904
905	if (!ppe)
906		return 0;
907
908	for (i = 0; i < MTK_PPE_ENTRIES; i++) {
909		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
910
911		hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
912				      MTK_FOE_STATE_INVALID);
913	}
914
915	mtk_ppe_cache_enable(ppe, false);
916
917	/* disable offload engine */
918	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
919	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
920
921	/* disable aging */
922	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
923	      MTK_PPE_TB_CFG_AGE_UNBIND |
924	      MTK_PPE_TB_CFG_AGE_TCP |
925	      MTK_PPE_TB_CFG_AGE_UDP |
926	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
927	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
928
929	return mtk_ppe_wait_busy(ppe);
930}