Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: ISC
   2/* Copyright (C) 2020 MediaTek Inc. */
   3
   4#include <linux/etherdevice.h>
   5#include <linux/timekeeping.h>
   6#include "coredump.h"
   7#include "mt7915.h"
   8#include "../dma.h"
   9#include "mac.h"
  10#include "mcu.h"
  11
  12#define to_rssi(field, rcpi)	((FIELD_GET(field, rcpi) - 220) / 2)
  13
  14static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
  15	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
  16	.radar_pattern = {
  17		[5] =  { 1, 0,  6, 32, 28, 0,  990, 5010, 17, 1, 1 },
  18		[6] =  { 1, 0,  9, 32, 28, 0,  615, 5010, 27, 1, 1 },
  19		[7] =  { 1, 0, 15, 32, 28, 0,  240,  445, 27, 1, 1 },
  20		[8] =  { 1, 0, 12, 32, 28, 0,  240,  510, 42, 1, 1 },
  21		[9] =  { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
  22		[10] = { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
  23		[11] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 18, 32, 28, { },  54 },
  24		[12] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 27, 32, 24, { },  54 },
  25	},
  26};
  27
  28static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
  29	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
  30	.radar_pattern = {
  31		[0] = { 1, 0,  8,  32, 28, 0, 508, 3076, 13, 1,  1 },
  32		[1] = { 1, 0, 12,  32, 28, 0, 140,  240, 17, 1,  1 },
  33		[2] = { 1, 0,  8,  32, 28, 0, 190,  510, 22, 1,  1 },
  34		[3] = { 1, 0,  6,  32, 28, 0, 190,  510, 32, 1,  1 },
  35		[4] = { 1, 0,  9, 255, 28, 0, 323,  343, 13, 1, 32 },
  36	},
  37};
  38
  39static const struct mt7915_dfs_radar_spec jp_radar_specs = {
  40	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
  41	.radar_pattern = {
  42		[0] =  { 1, 0,  8,  32, 28, 0,  508, 3076,  13, 1,  1 },
  43		[1] =  { 1, 0, 12,  32, 28, 0,  140,  240,  17, 1,  1 },
  44		[2] =  { 1, 0,  8,  32, 28, 0,  190,  510,  22, 1,  1 },
  45		[3] =  { 1, 0,  6,  32, 28, 0,  190,  510,  32, 1,  1 },
  46		[4] =  { 1, 0,  9, 255, 28, 0,  323,  343,  13, 1, 32 },
  47		[13] = { 1, 0,  7,  32, 28, 0, 3836, 3856,  14, 1,  1 },
  48		[14] = { 1, 0,  6,  32, 28, 0,  615, 5010, 110, 1,  1 },
  49		[15] = { 1, 1,  0,   0,  0, 0,   15, 5010, 110, 0,  0, 12, 32, 28 },
  50	},
  51};
  52
  53static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
  54					    u16 idx, bool unicast)
  55{
  56	struct mt7915_sta *sta;
  57	struct mt76_wcid *wcid;
  58
  59	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
  60		return NULL;
  61
  62	wcid = rcu_dereference(dev->mt76.wcid[idx]);
  63	if (unicast || !wcid)
  64		return wcid;
  65
  66	if (!wcid->sta)
  67		return NULL;
  68
  69	sta = container_of(wcid, struct mt7915_sta, wcid);
  70	if (!sta->vif)
  71		return NULL;
  72
  73	return &sta->vif->sta.wcid;
  74}
  75
  76bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
  77{
  78	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
  79		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
  80
  81	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
  82			 0, 5000);
  83}
  84
  85u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw)
  86{
  87	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
  88		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
  89
  90	return MT_WTBL_LMAC_OFFS(wcid, dw);
  91}
  92
  93static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
  94{
  95	static const u8 ac_to_tid[] = {
  96		[IEEE80211_AC_BE] = 0,
  97		[IEEE80211_AC_BK] = 1,
  98		[IEEE80211_AC_VI] = 4,
  99		[IEEE80211_AC_VO] = 6
 100	};
 101	struct ieee80211_sta *sta;
 102	struct mt7915_sta *msta;
 103	struct rate_info *rate;
 104	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
 105	LIST_HEAD(sta_poll_list);
 106	int i;
 107
 108	spin_lock_bh(&dev->mt76.sta_poll_lock);
 109	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
 110	spin_unlock_bh(&dev->mt76.sta_poll_lock);
 111
 112	rcu_read_lock();
 113
 114	while (true) {
 115		bool clear = false;
 116		u32 addr, val;
 117		u16 idx;
 118		s8 rssi[4];
 119		u8 bw;
 120
 121		spin_lock_bh(&dev->mt76.sta_poll_lock);
 122		if (list_empty(&sta_poll_list)) {
 123			spin_unlock_bh(&dev->mt76.sta_poll_lock);
 124			break;
 125		}
 126		msta = list_first_entry(&sta_poll_list,
 127					struct mt7915_sta, wcid.poll_list);
 128		list_del_init(&msta->wcid.poll_list);
 129		spin_unlock_bh(&dev->mt76.sta_poll_lock);
 130
 131		idx = msta->wcid.idx;
 132
 133		/* refresh peer's airtime reporting */
 134		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
 135
 136		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 137			u32 tx_last = msta->airtime_ac[i];
 138			u32 rx_last = msta->airtime_ac[i + 4];
 139
 140			msta->airtime_ac[i] = mt76_rr(dev, addr);
 141			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
 142
 143			tx_time[i] = msta->airtime_ac[i] - tx_last;
 144			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
 
 
 
 
 
 
 
 145
 146			if ((tx_last | rx_last) & BIT(30))
 147				clear = true;
 148
 149			addr += 8;
 150		}
 151
 152		if (clear) {
 153			mt7915_mac_wtbl_update(dev, idx,
 154					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
 155			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
 156		}
 157
 158		if (!msta->wcid.sta)
 159			continue;
 160
 161		sta = container_of((void *)msta, struct ieee80211_sta,
 162				   drv_priv);
 163		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 164			u8 queue = mt76_connac_lmac_mapping(i);
 165			u32 tx_cur = tx_time[queue];
 166			u32 rx_cur = rx_time[queue];
 167			u8 tid = ac_to_tid[i];
 168
 169			if (!tx_cur && !rx_cur)
 170				continue;
 171
 172			ieee80211_sta_register_airtime(sta, tid, tx_cur,
 173						       rx_cur);
 174		}
 175
 176		/*
 177		 * We don't support reading GI info from txs packets.
 178		 * For accurate tx status reporting and AQL improvement,
 179		 * we need to make sure that flags match so polling GI
 180		 * from per-sta counters directly.
 181		 */
 182		rate = &msta->wcid.rate;
 183		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7);
 184		val = mt76_rr(dev, addr);
 185
 186		switch (rate->bw) {
 187		case RATE_INFO_BW_160:
 188			bw = IEEE80211_STA_RX_BW_160;
 189			break;
 190		case RATE_INFO_BW_80:
 191			bw = IEEE80211_STA_RX_BW_80;
 192			break;
 193		case RATE_INFO_BW_40:
 194			bw = IEEE80211_STA_RX_BW_40;
 195			break;
 196		default:
 197			bw = IEEE80211_STA_RX_BW_20;
 198			break;
 199		}
 200
 201		if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
 202			u8 offs = 24 + 2 * bw;
 203
 204			rate->he_gi = (val & (0x3 << offs)) >> offs;
 205		} else if (rate->flags &
 206			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
 207			if (val & BIT(12 + bw))
 208				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
 209			else
 210				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
 211		}
 212
 213		/* get signal strength of resp frames (CTS/BA/ACK) */
 214		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30);
 215		val = mt76_rr(dev, addr);
 216
 217		rssi[0] = to_rssi(GENMASK(7, 0), val);
 218		rssi[1] = to_rssi(GENMASK(15, 8), val);
 219		rssi[2] = to_rssi(GENMASK(23, 16), val);
 220		rssi[3] = to_rssi(GENMASK(31, 14), val);
 221
 222		msta->ack_signal =
 223			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
 224
 225		ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
 226	}
 227
 228	rcu_read_unlock();
 229}
 230
 231void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
 232			      struct ieee80211_vif *vif, bool enable)
 233{
 234	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
 235	u32 addr;
 236
 237	addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
 238	if (enable)
 239		mt76_set(dev, addr, BIT(5));
 240	else
 241		mt76_clear(dev, addr, BIT(5));
 242}
 243
 244static void
 245mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
 246		     struct mt7915_sta *msta, struct sk_buff *skb,
 247		     u32 info)
 248{
 249	struct ieee80211_vif *vif;
 250	struct wireless_dev *wdev;
 251
 252	if (!msta || !msta->vif)
 253		return;
 254
 255	if (!mt76_queue_is_wed_rx(q))
 256		return;
 257
 258	if (!(info & MT_DMA_INFO_PPE_VLD))
 259		return;
 260
 261	vif = container_of((void *)msta->vif, struct ieee80211_vif,
 262			   drv_priv);
 263	wdev = ieee80211_vif_to_wdev(vif);
 264	skb->dev = wdev->netdev;
 265
 266	mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
 267				 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
 268				 FIELD_GET(MT_DMA_PPE_ENTRY, info));
 269}
 270
 271static int
 272mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
 273		   enum mt76_rxq_id q, u32 *info)
 274{
 275	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
 276	struct mt76_phy *mphy = &dev->mt76.phy;
 277	struct mt7915_phy *phy = &dev->phy;
 278	struct ieee80211_supported_band *sband;
 279	__le32 *rxd = (__le32 *)skb->data;
 280	__le32 *rxv = NULL;
 281	u32 rxd0 = le32_to_cpu(rxd[0]);
 282	u32 rxd1 = le32_to_cpu(rxd[1]);
 283	u32 rxd2 = le32_to_cpu(rxd[2]);
 284	u32 rxd3 = le32_to_cpu(rxd[3]);
 285	u32 rxd4 = le32_to_cpu(rxd[4]);
 286	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
 287	bool unicast, insert_ccmp_hdr = false;
 288	u8 remove_pad, amsdu_info;
 289	u8 mode = 0, qos_ctl = 0;
 290	struct mt7915_sta *msta = NULL;
 291	u32 csum_status = *(u32 *)skb->cb;
 292	bool hdr_trans;
 293	u16 hdr_gap;
 294	u16 seq_ctrl = 0;
 295	__le16 fc = 0;
 296	int idx;
 297
 298	memset(status, 0, sizeof(*status));
 299
 300	if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) {
 301		mphy = dev->mt76.phys[MT_BAND1];
 302		if (!mphy)
 303			return -EINVAL;
 304
 305		phy = mphy->priv;
 306		status->phy_idx = 1;
 307	}
 308
 309	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
 310		return -EINVAL;
 311
 312	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
 313		return -EINVAL;
 314
 315	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
 316	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
 317		return -EINVAL;
 318
 319	/* ICV error or CCMP/BIP/WPI MIC error */
 320	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
 321		status->flag |= RX_FLAG_ONLY_MONITOR;
 322
 323	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
 324	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
 325	status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
 326
 327	if (status->wcid) {
 328		msta = container_of(status->wcid, struct mt7915_sta, wcid);
 329		spin_lock_bh(&dev->mt76.sta_poll_lock);
 330		if (list_empty(&msta->wcid.poll_list))
 331			list_add_tail(&msta->wcid.poll_list,
 332				      &dev->mt76.sta_poll_list);
 333		spin_unlock_bh(&dev->mt76.sta_poll_lock);
 334	}
 335
 336	status->freq = mphy->chandef.chan->center_freq;
 337	status->band = mphy->chandef.chan->band;
 338	if (status->band == NL80211_BAND_5GHZ)
 339		sband = &mphy->sband_5g.sband;
 340	else if (status->band == NL80211_BAND_6GHZ)
 341		sband = &mphy->sband_6g.sband;
 342	else
 343		sband = &mphy->sband_2g.sband;
 344
 345	if (!sband->channels)
 346		return -EINVAL;
 347
 348	if ((rxd0 & csum_mask) == csum_mask &&
 349	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
 350		skb->ip_summed = CHECKSUM_UNNECESSARY;
 351
 352	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
 353		status->flag |= RX_FLAG_FAILED_FCS_CRC;
 354
 355	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
 356		status->flag |= RX_FLAG_MMIC_ERROR;
 357
 358	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
 359	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
 360		status->flag |= RX_FLAG_DECRYPTED;
 361		status->flag |= RX_FLAG_IV_STRIPPED;
 362		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
 363	}
 364
 365	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
 366
 367	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
 368		return -EINVAL;
 369
 370	rxd += 6;
 371	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
 372		u32 v0 = le32_to_cpu(rxd[0]);
 373		u32 v2 = le32_to_cpu(rxd[2]);
 374
 375		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
 376		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
 377		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
 378
 379		rxd += 4;
 380		if ((u8 *)rxd - skb->data >= skb->len)
 381			return -EINVAL;
 382	}
 383
 384	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
 385		u8 *data = (u8 *)rxd;
 386
 387		if (status->flag & RX_FLAG_DECRYPTED) {
 388			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
 389			case MT_CIPHER_AES_CCMP:
 390			case MT_CIPHER_CCMP_CCX:
 391			case MT_CIPHER_CCMP_256:
 392				insert_ccmp_hdr =
 393					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
 394				fallthrough;
 395			case MT_CIPHER_TKIP:
 396			case MT_CIPHER_TKIP_NO_MIC:
 397			case MT_CIPHER_GCMP:
 398			case MT_CIPHER_GCMP_256:
 399				status->iv[0] = data[5];
 400				status->iv[1] = data[4];
 401				status->iv[2] = data[3];
 402				status->iv[3] = data[2];
 403				status->iv[4] = data[1];
 404				status->iv[5] = data[0];
 405				break;
 406			default:
 407				break;
 408			}
 409		}
 410		rxd += 4;
 411		if ((u8 *)rxd - skb->data >= skb->len)
 412			return -EINVAL;
 413	}
 414
 415	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
 416		status->timestamp = le32_to_cpu(rxd[0]);
 417		status->flag |= RX_FLAG_MACTIME_START;
 418
 419		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
 420			status->flag |= RX_FLAG_AMPDU_DETAILS;
 421
 422			/* all subframes of an A-MPDU have the same timestamp */
 423			if (phy->rx_ampdu_ts != status->timestamp) {
 424				if (!++phy->ampdu_ref)
 425					phy->ampdu_ref++;
 426			}
 427			phy->rx_ampdu_ts = status->timestamp;
 428
 429			status->ampdu_ref = phy->ampdu_ref;
 430		}
 431
 432		rxd += 2;
 433		if ((u8 *)rxd - skb->data >= skb->len)
 434			return -EINVAL;
 435	}
 436
 437	/* RXD Group 3 - P-RXV */
 438	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
 439		u32 v0, v1;
 440		int ret;
 441
 442		rxv = rxd;
 443		rxd += 2;
 444		if ((u8 *)rxd - skb->data >= skb->len)
 445			return -EINVAL;
 446
 447		v0 = le32_to_cpu(rxv[0]);
 448		v1 = le32_to_cpu(rxv[1]);
 449
 450		if (v0 & MT_PRXV_HT_AD_CODE)
 451			status->enc_flags |= RX_ENC_FLAG_LDPC;
 452
 453		status->chains = mphy->antenna_mask;
 454		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
 455		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
 456		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
 457		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
 458
 459		/* RXD Group 5 - C-RXV */
 460		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
 461			rxd += 18;
 462			if ((u8 *)rxd - skb->data >= skb->len)
 463				return -EINVAL;
 464		}
 465
 466		if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
 467			ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
 468							    sband, rxv, &mode);
 469			if (ret < 0)
 470				return ret;
 471		}
 472	}
 473
 474	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
 475	status->amsdu = !!amsdu_info;
 476	if (status->amsdu) {
 477		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
 478		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
 479	}
 480
 481	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
 482	if (hdr_trans && ieee80211_has_morefrags(fc)) {
 483		struct ieee80211_vif *vif;
 484		int err;
 485
 486		if (!msta || !msta->vif)
 487			return -EINVAL;
 488
 489		vif = container_of((void *)msta->vif, struct ieee80211_vif,
 490				   drv_priv);
 491		err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
 492		if (err)
 493			return err;
 494
 495		hdr_trans = false;
 496	} else {
 497		int pad_start = 0;
 498
 499		skb_pull(skb, hdr_gap);
 500		if (!hdr_trans && status->amsdu) {
 501			pad_start = ieee80211_get_hdrlen_from_skb(skb);
 502		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
 503			/*
 504			 * When header translation failure is indicated,
 505			 * the hardware will insert an extra 2-byte field
 506			 * containing the data length after the protocol
 507			 * type field. This happens either when the LLC-SNAP
 508			 * pattern did not match, or if a VLAN header was
 509			 * detected.
 510			 */
 511			pad_start = 12;
 512			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
 513				pad_start += 4;
 514			else
 515				pad_start = 0;
 516		}
 517
 518		if (pad_start) {
 519			memmove(skb->data + 2, skb->data, pad_start);
 520			skb_pull(skb, 2);
 521		}
 522	}
 523
 524	if (!hdr_trans) {
 525		struct ieee80211_hdr *hdr;
 526
 527		if (insert_ccmp_hdr) {
 528			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
 529
 530			mt76_insert_ccmp_hdr(skb, key_id);
 531		}
 532
 533		hdr = mt76_skb_get_hdr(skb);
 534		fc = hdr->frame_control;
 535		if (ieee80211_is_data_qos(fc)) {
 536			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
 537			qos_ctl = *ieee80211_get_qos_ctl(hdr);
 538		}
 539	} else {
 540		status->flag |= RX_FLAG_8023;
 541		mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
 542				     *info);
 543	}
 544
 545	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
 546		mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
 547
 548	if (!status->wcid || !ieee80211_is_data_qos(fc))
 549		return 0;
 550
 551	status->aggr = unicast &&
 552		       !ieee80211_is_qos_nullfunc(fc);
 553	status->qos_ctl = qos_ctl;
 554	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
 555
 556	return 0;
 557}
 558
 559static void
 560mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
 561{
 562#ifdef CONFIG_NL80211_TESTMODE
 563	struct mt7915_phy *phy = &dev->phy;
 564	__le32 *rxd = (__le32 *)skb->data;
 565	__le32 *rxv_hdr = rxd + 2;
 566	__le32 *rxv = rxd + 4;
 567	u32 rcpi, ib_rssi, wb_rssi, v20, v21;
 568	u8 band_idx;
 569	s32 foe;
 570	u8 snr;
 571	int i;
 572
 573	band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
 574	if (band_idx && !phy->mt76->band_idx) {
 575		phy = mt7915_ext_phy(dev);
 576		if (!phy)
 577			goto out;
 578	}
 579
 580	rcpi = le32_to_cpu(rxv[6]);
 581	ib_rssi = le32_to_cpu(rxv[7]);
 582	wb_rssi = le32_to_cpu(rxv[8]) >> 5;
 583
 584	for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
 585		if (i == 3)
 586			wb_rssi = le32_to_cpu(rxv[9]);
 587
 588		phy->test.last_rcpi[i] = rcpi & 0xff;
 589		phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
 590		phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
 591	}
 592
 593	v20 = le32_to_cpu(rxv[20]);
 594	v21 = le32_to_cpu(rxv[21]);
 595
 596	foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
 597	      (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
 598
 599	snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
 600
 601	phy->test.last_freq_offset = foe;
 602	phy->test.last_snr = snr;
 603out:
 604#endif
 605	dev_kfree_skb(skb);
 606}
 607
 608static void
 609mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
 610			 struct sk_buff *skb)
 611{
 612#ifdef CONFIG_NL80211_TESTMODE
 613	struct mt76_testmode_data *td = &phy->mt76->test;
 614	const struct ieee80211_rate *r;
 615	u8 bw, mode, nss = td->tx_rate_nss;
 616	u8 rate_idx = td->tx_rate_idx;
 617	u16 rateval = 0;
 618	u32 val;
 619	bool cck = false;
 620	int band;
 621
 622	if (skb != phy->mt76->test.tx_skb)
 623		return;
 624
 625	switch (td->tx_rate_mode) {
 626	case MT76_TM_TX_MODE_HT:
 627		nss = 1 + (rate_idx >> 3);
 628		mode = MT_PHY_TYPE_HT;
 629		break;
 630	case MT76_TM_TX_MODE_VHT:
 631		mode = MT_PHY_TYPE_VHT;
 632		break;
 633	case MT76_TM_TX_MODE_HE_SU:
 634		mode = MT_PHY_TYPE_HE_SU;
 635		break;
 636	case MT76_TM_TX_MODE_HE_EXT_SU:
 637		mode = MT_PHY_TYPE_HE_EXT_SU;
 638		break;
 639	case MT76_TM_TX_MODE_HE_TB:
 640		mode = MT_PHY_TYPE_HE_TB;
 641		break;
 642	case MT76_TM_TX_MODE_HE_MU:
 643		mode = MT_PHY_TYPE_HE_MU;
 644		break;
 645	case MT76_TM_TX_MODE_CCK:
 646		cck = true;
 647		fallthrough;
 648	case MT76_TM_TX_MODE_OFDM:
 649		band = phy->mt76->chandef.chan->band;
 650		if (band == NL80211_BAND_2GHZ && !cck)
 651			rate_idx += 4;
 652
 653		r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
 654		val = cck ? r->hw_value_short : r->hw_value;
 655
 656		mode = val >> 8;
 657		rate_idx = val & 0xff;
 658		break;
 659	default:
 660		mode = MT_PHY_TYPE_OFDM;
 661		break;
 662	}
 663
 664	switch (phy->mt76->chandef.width) {
 665	case NL80211_CHAN_WIDTH_40:
 666		bw = 1;
 667		break;
 668	case NL80211_CHAN_WIDTH_80:
 669		bw = 2;
 670		break;
 671	case NL80211_CHAN_WIDTH_80P80:
 672	case NL80211_CHAN_WIDTH_160:
 673		bw = 3;
 674		break;
 675	default:
 676		bw = 0;
 677		break;
 678	}
 679
 680	if (td->tx_rate_stbc && nss == 1) {
 681		nss++;
 682		rateval |= MT_TX_RATE_STBC;
 683	}
 684
 685	rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
 686		   FIELD_PREP(MT_TX_RATE_MODE, mode) |
 687		   FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
 688
 689	txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
 690
 691	le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
 692	if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
 693		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
 694
 695	val = MT_TXD6_FIXED_BW |
 696	      FIELD_PREP(MT_TXD6_BW, bw) |
 697	      FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
 698	      FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
 699
 700	/* for HE_SU/HE_EXT_SU PPDU
 701	 * - 1x, 2x, 4x LTF + 0.8us GI
 702	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
 703	 * for HE_MU PPDU
 704	 * - 2x, 4x LTF + 0.8us GI
 705	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
 706	 * for HE_TB PPDU
 707	 * - 1x, 2x LTF + 1.6us GI
 708	 * - 4x LTF + 3.2us GI
 709	 */
 710	if (mode >= MT_PHY_TYPE_HE_SU)
 711		val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
 712
 713	if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
 714		val |= MT_TXD6_LDPC;
 715
 716	txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
 717	txwi[6] |= cpu_to_le32(val);
 718	txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
 719					  phy->test.spe_idx));
 720#endif
 721}
 722
 723void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
 724			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
 725			   struct ieee80211_key_conf *key,
 726			   enum mt76_txq_id qid, u32 changed)
 727{
 728	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 729	u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
 730	struct mt76_phy *mphy = &dev->phy;
 731
 732	if (phy_idx && dev->phys[MT_BAND1])
 733		mphy = dev->phys[MT_BAND1];
 734
 735	mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
 736
 737	if (mt76_testmode_enabled(mphy))
 738		mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
 739}
 740
 741int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
 742			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
 743			  struct ieee80211_sta *sta,
 744			  struct mt76_tx_info *tx_info)
 745{
 746	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
 747	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
 748	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
 749	struct ieee80211_key_conf *key = info->control.hw_key;
 750	struct ieee80211_vif *vif = info->control.vif;
 751	struct mt76_connac_fw_txp *txp;
 752	struct mt76_txwi_cache *t;
 753	int id, i, nbuf = tx_info->nbuf - 1;
 754	u8 *txwi = (u8 *)txwi_ptr;
 755	int pid;
 756
 757	if (unlikely(tx_info->skb->len <= ETH_HLEN))
 758		return -EINVAL;
 759
 760	if (!wcid)
 761		wcid = &dev->mt76.global_wcid;
 762
 763	if (sta) {
 764		struct mt7915_sta *msta;
 765
 766		msta = (struct mt7915_sta *)sta->drv_priv;
 767
 768		if (time_after(jiffies, msta->jiffies + HZ / 4)) {
 769			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
 770			msta->jiffies = jiffies;
 771		}
 772	}
 773
 774	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
 775	t->skb = tx_info->skb;
 776
 777	id = mt76_token_consume(mdev, &t);
 778	if (id < 0)
 779		return id;
 780
 781	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
 782	mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
 783			      qid, 0);
 784
 785	txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
 786	for (i = 0; i < nbuf; i++) {
 787		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
 788		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
 789	}
 790	txp->nbuf = nbuf;
 791
 792	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
 793
 794	if (!key)
 795		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
 796
 797	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 798	    ieee80211_is_mgmt(hdr->frame_control))
 799		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
 800
 801	if (vif) {
 802		struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
 803
 804		txp->bss_idx = mvif->mt76.idx;
 805	}
 806
 807	txp->token = cpu_to_le16(id);
 808	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
 809		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
 810	else
 811		txp->rept_wds_wcid = cpu_to_le16(0x3ff);
 812	tx_info->skb = NULL;
 813
 814	/* pass partial skb header to fw */
 815	tx_info->buf[1].len = MT_CT_PARSE_LEN;
 816	tx_info->buf[1].skip_unmap = true;
 817	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
 818
 819	return 0;
 820}
 821
 822u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
 823{
 824	struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
 825	__le32 *txwi = ptr;
 826	u32 val;
 827
 828	memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
 829
 830	val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
 831	      FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
 832	txwi[0] = cpu_to_le32(val);
 833
 834	val = MT_TXD1_LONG_FORMAT |
 835	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
 836	txwi[1] = cpu_to_le32(val);
 837
 838	txp->token = cpu_to_le16(token_id);
 839	txp->nbuf = 1;
 840	txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
 841
 842	return MT_TXD_SIZE + sizeof(*txp);
 843}
 844
 845static void
 846mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
 847{
 848	struct mt76_dev *mdev = &dev->mt76;
 849	struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
 850
 851	/* clean DMA queues and unmap buffers first */
 852	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
 853	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
 854	if (mphy_ext) {
 855		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
 856		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
 857	}
 858}
 859
 860static void
 861mt7915_mac_tx_free_done(struct mt7915_dev *dev,
 862			struct list_head *free_list, bool wake)
 863{
 864	struct sk_buff *skb, *tmp;
 865
 866	mt7915_mac_sta_poll(dev);
 867
 868	if (wake)
 869		mt76_set_tx_blocked(&dev->mt76, false);
 870
 871	mt76_worker_schedule(&dev->mt76.tx_worker);
 872
 873	list_for_each_entry_safe(skb, tmp, free_list, list) {
 874		skb_list_del_init(skb);
 875		napi_consume_skb(skb, 1);
 876	}
 877}
 878
 879static void
 880mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
 881{
 882	struct mt76_connac_tx_free *free = data;
 883	__le32 *tx_info = (__le32 *)(data + sizeof(*free));
 884	struct mt76_dev *mdev = &dev->mt76;
 885	struct mt76_txwi_cache *txwi;
 886	struct ieee80211_sta *sta = NULL;
 887	struct mt76_wcid *wcid = NULL;
 888	LIST_HEAD(free_list);
 889	void *end = data + len;
 890	bool v3, wake = false;
 891	u16 total, count = 0;
 892	u32 txd = le32_to_cpu(free->txd);
 893	__le32 *cur_info;
 894
 895	mt7915_mac_tx_free_prepare(dev);
 896
 897	total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
 898	v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
 899
 900	for (cur_info = tx_info; count < total; cur_info++) {
 901		u32 msdu, info;
 902		u8 i;
 903
 904		if (WARN_ON_ONCE((void *)cur_info >= end))
 905			return;
 906
 907		/*
 908		 * 1'b1: new wcid pair.
 909		 * 1'b0: msdu_id with the same 'wcid pair' as above.
 910		 */
 911		info = le32_to_cpu(*cur_info);
 912		if (info & MT_TX_FREE_PAIR) {
 913			struct mt7915_sta *msta;
 914			u16 idx;
 915
 916			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
 917			wcid = rcu_dereference(dev->mt76.wcid[idx]);
 918			sta = wcid_to_sta(wcid);
 919			if (!sta)
 920				continue;
 921
 922			msta = container_of(wcid, struct mt7915_sta, wcid);
 923			spin_lock_bh(&mdev->sta_poll_lock);
 924			if (list_empty(&msta->wcid.poll_list))
 925				list_add_tail(&msta->wcid.poll_list,
 926					      &mdev->sta_poll_list);
 927			spin_unlock_bh(&mdev->sta_poll_lock);
 928			continue;
 929		}
 930
 931		if (!mtk_wed_device_active(&mdev->mmio.wed) && wcid) {
 932			u32 tx_retries = 0, tx_failed = 0;
 933
 934			if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) {
 935				tx_retries =
 936					FIELD_GET(MT_TX_FREE_COUNT_V3, info) - 1;
 937				tx_failed = tx_retries +
 938					!!FIELD_GET(MT_TX_FREE_STAT_V3, info);
 939			} else if (!v3 && (info & MT_TX_FREE_MPDU_HEADER)) {
 940				tx_retries =
 941					FIELD_GET(MT_TX_FREE_COUNT, info) - 1;
 942				tx_failed = tx_retries +
 943					!!FIELD_GET(MT_TX_FREE_STAT, info);
 944			}
 945			wcid->stats.tx_retries += tx_retries;
 946			wcid->stats.tx_failed += tx_failed;
 947		}
 948
 949		if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3))
 950			continue;
 951
 952		for (i = 0; i < 1 + v3; i++) {
 953			if (v3) {
 954				msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
 955				if (msdu == MT_TX_FREE_MSDU_ID_V3)
 956					continue;
 957			} else {
 958				msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
 959			}
 960			count++;
 961			txwi = mt76_token_release(mdev, msdu, &wake);
 962			if (!txwi)
 963				continue;
 964
 965			mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
 966		}
 967	}
 968
 969	mt7915_mac_tx_free_done(dev, &free_list, wake);
 970}
 971
 972static void
 973mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
 974{
 975	struct mt76_connac_tx_free *free = data;
 976	__le16 *info = (__le16 *)(data + sizeof(*free));
 977	struct mt76_dev *mdev = &dev->mt76;
 978	void *end = data + len;
 979	LIST_HEAD(free_list);
 980	bool wake = false;
 981	u8 i, count;
 982
 983	mt7915_mac_tx_free_prepare(dev);
 984
 985	count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
 986	if (WARN_ON_ONCE((void *)&info[count] > end))
 987		return;
 988
 989	for (i = 0; i < count; i++) {
 990		struct mt76_txwi_cache *txwi;
 991		u16 msdu = le16_to_cpu(info[i]);
 992
 993		txwi = mt76_token_release(mdev, msdu, &wake);
 994		if (!txwi)
 995			continue;
 996
 997		mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list);
 998	}
 999
1000	mt7915_mac_tx_free_done(dev, &free_list, wake);
1001}
1002
1003static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
1004{
1005	struct mt7915_sta *msta = NULL;
1006	struct mt76_wcid *wcid;
1007	__le32 *txs_data = data;
1008	u16 wcidx;
1009	u8 pid;
1010
1011	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1012	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1013
1014	if (pid < MT_PACKET_ID_WED)
1015		return;
1016
1017	if (wcidx >= mt7915_wtbl_size(dev))
1018		return;
1019
1020	rcu_read_lock();
1021
1022	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1023	if (!wcid)
1024		goto out;
1025
1026	msta = container_of(wcid, struct mt7915_sta, wcid);
1027
1028	if (pid == MT_PACKET_ID_WED)
1029		mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
1030	else
1031		mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
1032
1033	if (!wcid->sta)
1034		goto out;
1035
1036	spin_lock_bh(&dev->mt76.sta_poll_lock);
1037	if (list_empty(&msta->wcid.poll_list))
1038		list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1039	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1040
1041out:
1042	rcu_read_unlock();
1043}
1044
1045bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
1046{
1047	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1048	__le32 *rxd = (__le32 *)data;
1049	__le32 *end = (__le32 *)&rxd[len / 4];
1050	enum rx_pkt_type type;
1051
1052	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1053
1054	switch (type) {
1055	case PKT_TYPE_TXRX_NOTIFY:
1056		mt7915_mac_tx_free(dev, data, len);
1057		return false;
1058	case PKT_TYPE_TXRX_NOTIFY_V0:
1059		mt7915_mac_tx_free_v0(dev, data, len);
1060		return false;
1061	case PKT_TYPE_TXS:
1062		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1063			mt7915_mac_add_txs(dev, rxd);
1064		return false;
1065	case PKT_TYPE_RX_FW_MONITOR:
1066		mt7915_debugfs_rx_fw_monitor(dev, data, len);
1067		return false;
1068	default:
1069		return true;
1070	}
1071}
1072
1073void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1074			 struct sk_buff *skb, u32 *info)
1075{
1076	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1077	__le32 *rxd = (__le32 *)skb->data;
1078	__le32 *end = (__le32 *)&skb->data[skb->len];
1079	enum rx_pkt_type type;
1080
1081	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1082
1083	switch (type) {
1084	case PKT_TYPE_TXRX_NOTIFY:
1085		mt7915_mac_tx_free(dev, skb->data, skb->len);
1086		napi_consume_skb(skb, 1);
1087		break;
1088	case PKT_TYPE_TXRX_NOTIFY_V0:
1089		mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
1090		napi_consume_skb(skb, 1);
1091		break;
1092	case PKT_TYPE_RX_EVENT:
1093		mt7915_mcu_rx_event(dev, skb);
1094		break;
1095	case PKT_TYPE_TXRXV:
1096		mt7915_mac_fill_rx_vector(dev, skb);
1097		break;
1098	case PKT_TYPE_TXS:
1099		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1100			mt7915_mac_add_txs(dev, rxd);
1101		dev_kfree_skb(skb);
1102		break;
1103	case PKT_TYPE_RX_FW_MONITOR:
1104		mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1105		dev_kfree_skb(skb);
1106		break;
1107	case PKT_TYPE_NORMAL:
1108		if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
1109			mt76_rx(&dev->mt76, q, skb);
1110			return;
1111		}
1112		fallthrough;
1113	default:
1114		dev_kfree_skb(skb);
1115		break;
1116	}
1117}
1118
1119void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
1120{
1121	struct mt7915_dev *dev = phy->dev;
1122	u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx);
1123
1124	mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
1125	mt76_set(dev, reg, BIT(11) | BIT(9));
1126}
1127
1128void mt7915_mac_reset_counters(struct mt7915_phy *phy)
1129{
1130	struct mt7915_dev *dev = phy->dev;
1131	int i;
1132
1133	for (i = 0; i < 4; i++) {
1134		mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i));
1135		mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i));
1136	}
1137
1138	phy->mt76->survey_time = ktime_get_boottime();
1139	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1140
1141	/* reset airtime counters */
1142	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx),
1143		 MT_WF_RMAC_MIB_RXTIME_CLR);
1144
1145	mt7915_mcu_get_chan_mib_info(phy, true);
1146}
1147
1148void mt7915_mac_set_timing(struct mt7915_phy *phy)
1149{
1150	s16 coverage_class = phy->coverage_class;
1151	struct mt7915_dev *dev = phy->dev;
1152	struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
1153	u32 val, reg_offset;
1154	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1155		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1156	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1157		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1158	u8 band = phy->mt76->band_idx;
1159	int eifs_ofdm = 360, sifs = 10, offset;
1160	bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
1161
1162	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1163		return;
1164
1165	if (ext_phy)
1166		coverage_class = max_t(s16, dev->phy.coverage_class,
1167				       ext_phy->coverage_class);
1168
1169	mt76_set(dev, MT_ARB_SCR(band),
1170		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1171	udelay(1);
1172
1173	offset = 3 * coverage_class;
1174	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1175		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1176
1177	if (!is_mt7915(&dev->mt76)) {
1178		if (!a_band) {
1179			mt76_wr(dev, MT_TMAC_ICR1(band),
1180				FIELD_PREP(MT_IFS_EIFS_CCK, 314));
1181			eifs_ofdm = 78;
1182		} else {
1183			eifs_ofdm = 84;
1184		}
1185	} else if (a_band) {
1186		sifs = 16;
1187	}
1188
1189	mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset);
1190	mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset);
1191	mt76_wr(dev, MT_TMAC_ICR0(band),
1192		FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
1193		FIELD_PREP(MT_IFS_RIFS, 2) |
1194		FIELD_PREP(MT_IFS_SIFS, sifs) |
1195		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1196
1197	if (phy->slottime < 20 || a_band)
1198		val = MT7915_CFEND_RATE_DEFAULT;
1199	else
1200		val = MT7915_CFEND_RATE_11B;
1201
1202	mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val);
1203	mt76_clear(dev, MT_ARB_SCR(band),
1204		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1205}
1206
1207void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band)
1208{
1209	u32 reg;
1210
1211	reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) :
1212				      MT_WF_PHY_RXTD12_MT7916(band);
1213	mt76_set(dev, reg,
1214		 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1215		 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1216
1217	reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) :
1218				      MT_WF_PHY_RX_CTRL1_MT7916(band);
1219	mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1220}
1221
1222static u8
1223mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
1224{
1225	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1226	struct mt7915_dev *dev = phy->dev;
1227	u32 val, sum = 0, n = 0;
1228	int nss, i;
1229
1230	for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
1231		u32 reg = is_mt7915(&dev->mt76) ?
1232			MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
1233			MT_WF_IRPI_NSS_MT7916(idx, nss);
1234
1235		for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1236			val = mt76_rr(dev, reg);
1237			sum += val * nf_power[i];
1238			n += val;
1239		}
1240	}
1241
1242	if (!n)
1243		return 0;
1244
1245	return sum / n;
1246}
1247
1248void mt7915_update_channel(struct mt76_phy *mphy)
1249{
1250	struct mt7915_phy *phy = mphy->priv;
1251	struct mt76_channel_state *state = mphy->chan_state;
1252	int nf;
1253
1254	mt7915_mcu_get_chan_mib_info(phy, false);
1255
1256	nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx);
1257	if (!phy->noise)
1258		phy->noise = nf << 4;
1259	else if (nf)
1260		phy->noise += nf - (phy->noise >> 4);
1261
1262	state->noise = -(phy->noise >> 4);
1263}
1264
1265static bool
1266mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1267{
1268	bool ret;
1269
1270	ret = wait_event_timeout(dev->reset_wait,
1271				 (READ_ONCE(dev->recovery.state) & state),
1272				 MT7915_RESET_TIMEOUT);
1273
1274	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1275	return ret;
1276}
1277
1278static void
1279mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1280{
1281	struct ieee80211_hw *hw = priv;
1282
1283	switch (vif->type) {
1284	case NL80211_IFTYPE_MESH_POINT:
1285	case NL80211_IFTYPE_ADHOC:
1286	case NL80211_IFTYPE_AP:
1287		mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
1288				      BSS_CHANGED_BEACON_ENABLED);
1289		break;
1290	default:
1291		break;
1292	}
1293}
1294
1295static void
1296mt7915_update_beacons(struct mt7915_dev *dev)
1297{
1298	struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
1299
1300	ieee80211_iterate_active_interfaces(dev->mt76.hw,
1301		IEEE80211_IFACE_ITER_RESUME_ALL,
1302		mt7915_update_vif_beacon, dev->mt76.hw);
1303
1304	if (!mphy_ext)
1305		return;
1306
1307	ieee80211_iterate_active_interfaces(mphy_ext->hw,
1308		IEEE80211_IFACE_ITER_RESUME_ALL,
1309		mt7915_update_vif_beacon, mphy_ext->hw);
1310}
1311
1312static int
1313mt7915_mac_restart(struct mt7915_dev *dev)
1314{
1315	struct mt7915_phy *phy2;
1316	struct mt76_phy *ext_phy;
1317	struct mt76_dev *mdev = &dev->mt76;
1318	int i, ret;
1319
1320	ext_phy = dev->mt76.phys[MT_BAND1];
1321	phy2 = ext_phy ? ext_phy->priv : NULL;
1322
1323	if (dev->hif2) {
1324		mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1325		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1326	}
1327
1328	if (dev_is_pci(mdev->dev)) {
1329		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1330		if (dev->hif2) {
1331			if (is_mt7915(mdev))
1332				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1333			else
1334				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0x0);
1335		}
1336	}
1337
1338	set_bit(MT76_RESET, &dev->mphy.state);
1339	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1340	wake_up(&dev->mt76.mcu.wait);
1341	if (ext_phy) {
1342		set_bit(MT76_RESET, &ext_phy->state);
1343		set_bit(MT76_MCU_RESET, &ext_phy->state);
1344	}
1345
1346	/* lock/unlock all queues to ensure that no tx is pending */
1347	mt76_txq_schedule_all(&dev->mphy);
1348	if (ext_phy)
1349		mt76_txq_schedule_all(ext_phy);
1350
1351	/* disable all tx/rx napi */
1352	mt76_worker_disable(&dev->mt76.tx_worker);
1353	mt76_for_each_q_rx(mdev, i) {
1354		if (mdev->q_rx[i].ndesc)
1355			napi_disable(&dev->mt76.napi[i]);
1356	}
1357	napi_disable(&dev->mt76.tx_napi);
1358
1359	/* token reinit */
1360	mt76_connac2_tx_token_put(&dev->mt76);
1361	idr_init(&dev->mt76.token);
1362
1363	mt7915_dma_reset(dev, true);
1364
1365	local_bh_disable();
1366	mt76_for_each_q_rx(mdev, i) {
1367		if (mdev->q_rx[i].ndesc) {
1368			napi_enable(&dev->mt76.napi[i]);
1369			napi_schedule(&dev->mt76.napi[i]);
1370		}
1371	}
1372	local_bh_enable();
1373	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1374	clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1375
1376	mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1377	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1378
1379	if (dev->hif2) {
1380		mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1381		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1382	}
1383	if (dev_is_pci(mdev->dev)) {
1384		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1385		if (dev->hif2) {
 
 
1386			if (is_mt7915(mdev))
1387				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1388			else
1389				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
1390		}
1391	}
1392
1393	/* load firmware */
1394	ret = mt7915_mcu_init_firmware(dev);
1395	if (ret)
1396		goto out;
1397
1398	/* set the necessary init items */
1399	ret = mt7915_mcu_set_eeprom(dev);
1400	if (ret)
1401		goto out;
1402
1403	mt7915_mac_init(dev);
1404	mt7915_init_txpower(&dev->phy);
1405	mt7915_init_txpower(phy2);
1406	ret = mt7915_txbf_init(dev);
1407
1408	if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1409		ret = mt7915_run(dev->mphy.hw);
1410		if (ret)
1411			goto out;
1412	}
1413
1414	if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) {
1415		ret = mt7915_run(ext_phy->hw);
1416		if (ret)
1417			goto out;
1418	}
1419
1420out:
1421	/* reset done */
1422	clear_bit(MT76_RESET, &dev->mphy.state);
1423	if (phy2)
1424		clear_bit(MT76_RESET, &phy2->mt76->state);
1425
1426	local_bh_disable();
1427	napi_enable(&dev->mt76.tx_napi);
1428	napi_schedule(&dev->mt76.tx_napi);
1429	local_bh_enable();
1430
1431	mt76_worker_enable(&dev->mt76.tx_worker);
1432
1433	return ret;
1434}
1435
1436static void
1437mt7915_mac_full_reset(struct mt7915_dev *dev)
1438{
1439	struct mt76_phy *ext_phy;
 
1440	int i;
1441
1442	ext_phy = dev->mt76.phys[MT_BAND1];
 
1443
1444	dev->recovery.hw_full_reset = true;
1445
 
1446	wake_up(&dev->mt76.mcu.wait);
1447	ieee80211_stop_queues(mt76_hw(dev));
1448	if (ext_phy)
1449		ieee80211_stop_queues(ext_phy->hw);
1450
1451	cancel_delayed_work_sync(&dev->mphy.mac_work);
1452	if (ext_phy)
1453		cancel_delayed_work_sync(&ext_phy->mac_work);
1454
1455	mutex_lock(&dev->mt76.mutex);
1456	for (i = 0; i < 10; i++) {
1457		if (!mt7915_mac_restart(dev))
1458			break;
1459	}
1460	mutex_unlock(&dev->mt76.mutex);
1461
1462	if (i == 10)
1463		dev_err(dev->mt76.dev, "chip full reset failed\n");
1464
1465	ieee80211_restart_hw(mt76_hw(dev));
1466	if (ext_phy)
1467		ieee80211_restart_hw(ext_phy->hw);
 
1468
1469	ieee80211_wake_queues(mt76_hw(dev));
1470	if (ext_phy)
1471		ieee80211_wake_queues(ext_phy->hw);
 
 
1472
 
 
1473	dev->recovery.hw_full_reset = false;
1474	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1475				     MT7915_WATCHDOG_TIME);
 
 
1476	if (ext_phy)
1477		ieee80211_queue_delayed_work(ext_phy->hw,
1478					     &ext_phy->mac_work,
1479					     MT7915_WATCHDOG_TIME);
1480}
1481
1482/* system error recovery */
1483void mt7915_mac_reset_work(struct work_struct *work)
1484{
1485	struct mt7915_phy *phy2;
1486	struct mt76_phy *ext_phy;
1487	struct mt7915_dev *dev;
1488	int i;
1489
1490	dev = container_of(work, struct mt7915_dev, reset_work);
1491	ext_phy = dev->mt76.phys[MT_BAND1];
1492	phy2 = ext_phy ? ext_phy->priv : NULL;
1493
1494	/* chip full reset */
1495	if (dev->recovery.restart) {
1496		/* disable WA/WM WDT */
1497		mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1498			   MT_MCU_CMD_WDT_MASK);
1499
1500		if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1501			dev->recovery.wa_reset_count++;
1502		else
1503			dev->recovery.wm_reset_count++;
1504
1505		mt7915_mac_full_reset(dev);
1506
1507		/* enable mcu irq */
1508		mt7915_irq_enable(dev, MT_INT_MCU_CMD);
1509		mt7915_irq_disable(dev, 0);
1510
1511		/* enable WA/WM WDT */
1512		mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1513
1514		dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1515		dev->recovery.restart = false;
1516		return;
1517	}
1518
1519	/* chip partial reset */
1520	if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1521		return;
1522
1523	if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1524		mtk_wed_device_stop(&dev->mt76.mmio.wed);
1525		if (!is_mt798x(&dev->mt76))
1526			mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
1527	}
1528
1529	ieee80211_stop_queues(mt76_hw(dev));
1530	if (ext_phy)
1531		ieee80211_stop_queues(ext_phy->hw);
1532
1533	set_bit(MT76_RESET, &dev->mphy.state);
1534	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1535	wake_up(&dev->mt76.mcu.wait);
1536	cancel_delayed_work_sync(&dev->mphy.mac_work);
1537	if (phy2) {
1538		set_bit(MT76_RESET, &phy2->mt76->state);
1539		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1540	}
 
 
 
1541	mt76_worker_disable(&dev->mt76.tx_worker);
1542	mt76_for_each_q_rx(&dev->mt76, i)
1543		napi_disable(&dev->mt76.napi[i]);
1544	napi_disable(&dev->mt76.tx_napi);
1545
1546	mutex_lock(&dev->mt76.mutex);
 
 
1547
1548	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1549
1550	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1551		mt7915_dma_reset(dev, false);
1552
1553		mt76_connac2_tx_token_put(&dev->mt76);
1554		idr_init(&dev->mt76.token);
1555
1556		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1557		mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1558	}
1559
1560	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1561	mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1562
1563	/* enable DMA Tx/Rx and interrupt */
1564	mt7915_dma_start(dev, false, false);
1565
1566	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1567	clear_bit(MT76_RESET, &dev->mphy.state);
1568	if (phy2)
1569		clear_bit(MT76_RESET, &phy2->mt76->state);
1570
1571	local_bh_disable();
1572	mt76_for_each_q_rx(&dev->mt76, i) {
1573		napi_enable(&dev->mt76.napi[i]);
1574		napi_schedule(&dev->mt76.napi[i]);
1575	}
1576	local_bh_enable();
1577
1578	tasklet_schedule(&dev->mt76.irq_tasklet);
1579
1580	mt76_worker_enable(&dev->mt76.tx_worker);
1581
1582	local_bh_disable();
1583	napi_enable(&dev->mt76.tx_napi);
1584	napi_schedule(&dev->mt76.tx_napi);
1585	local_bh_enable();
1586
1587	ieee80211_wake_queues(mt76_hw(dev));
1588	if (ext_phy)
1589		ieee80211_wake_queues(ext_phy->hw);
1590
1591	mutex_unlock(&dev->mt76.mutex);
1592
1593	mt7915_update_beacons(dev);
1594
1595	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1596				     MT7915_WATCHDOG_TIME);
1597	if (phy2)
1598		ieee80211_queue_delayed_work(ext_phy->hw,
1599					     &phy2->mt76->mac_work,
1600					     MT7915_WATCHDOG_TIME);
1601}
1602
1603/* firmware coredump */
1604void mt7915_mac_dump_work(struct work_struct *work)
1605{
1606	const struct mt7915_mem_region *mem_region;
1607	struct mt7915_crash_data *crash_data;
1608	struct mt7915_dev *dev;
1609	struct mt7915_mem_hdr *hdr;
1610	size_t buf_len;
1611	int i;
1612	u32 num;
1613	u8 *buf;
1614
1615	dev = container_of(work, struct mt7915_dev, dump_work);
1616
1617	mutex_lock(&dev->dump_mutex);
1618
1619	crash_data = mt7915_coredump_new(dev);
1620	if (!crash_data) {
1621		mutex_unlock(&dev->dump_mutex);
1622		goto skip_coredump;
1623	}
1624
1625	mem_region = mt7915_coredump_get_mem_layout(dev, &num);
1626	if (!mem_region || !crash_data->memdump_buf_len) {
1627		mutex_unlock(&dev->dump_mutex);
1628		goto skip_memdump;
1629	}
1630
1631	buf = crash_data->memdump_buf;
1632	buf_len = crash_data->memdump_buf_len;
1633
1634	/* dumping memory content... */
1635	memset(buf, 0, buf_len);
1636	for (i = 0; i < num; i++) {
1637		if (mem_region->len > buf_len) {
1638			dev_warn(dev->mt76.dev, "%s len %lu is too large\n",
1639				 mem_region->name,
1640				 (unsigned long)mem_region->len);
1641			break;
1642		}
1643
1644		/* reserve space for the header */
1645		hdr = (void *)buf;
1646		buf += sizeof(*hdr);
1647		buf_len -= sizeof(*hdr);
1648
1649		mt7915_memcpy_fromio(dev, buf, mem_region->start,
1650				     mem_region->len);
1651
1652		hdr->start = mem_region->start;
1653		hdr->len = mem_region->len;
1654
1655		if (!mem_region->len)
1656			/* note: the header remains, just with zero length */
1657			break;
1658
1659		buf += mem_region->len;
1660		buf_len -= mem_region->len;
1661
1662		mem_region++;
1663	}
1664
1665	mutex_unlock(&dev->dump_mutex);
1666
1667skip_memdump:
1668	mt7915_coredump_submit(dev);
1669skip_coredump:
1670	queue_work(dev->mt76.wq, &dev->reset_work);
1671}
1672
1673void mt7915_reset(struct mt7915_dev *dev)
1674{
1675	if (!dev->recovery.hw_init_done)
1676		return;
1677
1678	if (dev->recovery.hw_full_reset)
1679		return;
1680
1681	/* wm/wa exception: do full recovery */
1682	if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
1683		dev->recovery.restart = true;
1684		dev_info(dev->mt76.dev,
1685			 "%s indicated firmware crash, attempting recovery\n",
1686			 wiphy_name(dev->mt76.hw->wiphy));
1687
1688		mt7915_irq_disable(dev, MT_INT_MCU_CMD);
1689		queue_work(dev->mt76.wq, &dev->dump_work);
1690		return;
 
 
 
 
 
1691	}
1692
1693	queue_work(dev->mt76.wq, &dev->reset_work);
1694	wake_up(&dev->reset_wait);
1695}
1696
1697void mt7915_mac_update_stats(struct mt7915_phy *phy)
1698{
1699	struct mt76_mib_stats *mib = &phy->mib;
1700	struct mt7915_dev *dev = phy->dev;
1701	int i, aggr0 = 0, aggr1, cnt;
1702	u8 band = phy->mt76->band_idx;
1703	u32 val;
1704
1705	cnt = mt76_rr(dev, MT_MIB_SDR3(band));
1706	mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
1707		FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
1708		FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
1709
1710	cnt = mt76_rr(dev, MT_MIB_SDR4(band));
1711	mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
1712
1713	cnt = mt76_rr(dev, MT_MIB_SDR5(band));
1714	mib->rx_mpdu_cnt += cnt;
1715
1716	cnt = mt76_rr(dev, MT_MIB_SDR6(band));
1717	mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
1718
1719	cnt = mt76_rr(dev, MT_MIB_SDR7(band));
1720	mib->rx_vector_mismatch_cnt +=
1721		FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
1722
1723	cnt = mt76_rr(dev, MT_MIB_SDR8(band));
1724	mib->rx_delimiter_fail_cnt +=
1725		FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
1726
1727	cnt = mt76_rr(dev, MT_MIB_SDR10(band));
1728	mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
1729		FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
1730		FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
1731
1732	cnt = mt76_rr(dev, MT_MIB_SDR11(band));
1733	mib->rx_len_mismatch_cnt +=
1734		FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
1735
1736	cnt = mt76_rr(dev, MT_MIB_SDR12(band));
1737	mib->tx_ampdu_cnt += cnt;
1738
1739	cnt = mt76_rr(dev, MT_MIB_SDR13(band));
1740	mib->tx_stop_q_empty_cnt +=
1741		FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
1742
1743	cnt = mt76_rr(dev, MT_MIB_SDR14(band));
1744	mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
1745		FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
1746		FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
1747
1748	cnt = mt76_rr(dev, MT_MIB_SDR15(band));
1749	mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
1750		FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
1751		FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
1752
1753	cnt = mt76_rr(dev, MT_MIB_SDR16(band));
1754	mib->primary_cca_busy_time +=
1755		FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
1756
1757	cnt = mt76_rr(dev, MT_MIB_SDR17(band));
1758	mib->secondary_cca_busy_time +=
1759		FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
1760
1761	cnt = mt76_rr(dev, MT_MIB_SDR18(band));
1762	mib->primary_energy_detect_time +=
1763		FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
1764
1765	cnt = mt76_rr(dev, MT_MIB_SDR19(band));
1766	mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
1767
1768	cnt = mt76_rr(dev, MT_MIB_SDR20(band));
1769	mib->ofdm_mdrdy_time +=
1770		FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
1771
1772	cnt = mt76_rr(dev, MT_MIB_SDR21(band));
1773	mib->green_mdrdy_time +=
1774		FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
1775
1776	cnt = mt76_rr(dev, MT_MIB_SDR22(band));
1777	mib->rx_ampdu_cnt += cnt;
1778
1779	cnt = mt76_rr(dev, MT_MIB_SDR23(band));
1780	mib->rx_ampdu_bytes_cnt += cnt;
1781
1782	cnt = mt76_rr(dev, MT_MIB_SDR24(band));
1783	mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
1784		FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
1785		FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
1786
1787	cnt = mt76_rr(dev, MT_MIB_SDR25(band));
1788	mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
1789
1790	cnt = mt76_rr(dev, MT_MIB_SDR27(band));
1791	mib->tx_rwp_fail_cnt +=
1792		FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
1793
1794	cnt = mt76_rr(dev, MT_MIB_SDR28(band));
1795	mib->tx_rwp_need_cnt +=
1796		FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
1797
1798	cnt = mt76_rr(dev, MT_MIB_SDR29(band));
1799	mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
1800		FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
1801		FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
1802
1803	cnt = mt76_rr(dev, MT_MIB_SDRVEC(band));
1804	mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
1805		FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
1806		FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
1807
1808	cnt = mt76_rr(dev, MT_MIB_SDR31(band));
1809	mib->rx_ba_cnt += cnt;
1810
1811	cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band));
1812	mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
1813
1814	cnt = mt76_rr(dev, MT_MIB_DR8(band));
1815	mib->tx_mu_mpdu_cnt += cnt;
1816
1817	cnt = mt76_rr(dev, MT_MIB_DR9(band));
1818	mib->tx_mu_acked_mpdu_cnt += cnt;
1819
1820	cnt = mt76_rr(dev, MT_MIB_DR11(band));
1821	mib->tx_su_acked_mpdu_cnt += cnt;
1822
1823	cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band));
1824	mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
1825	mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
1826	mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
1827
1828	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
1829		cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
1830		mib->tx_amsdu[i] += cnt;
1831		mib->tx_amsdu_cnt += cnt;
1832	}
1833
1834	if (is_mt7915(&dev->mt76)) {
1835		for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
1836			val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4)));
1837			mib->ba_miss_cnt +=
1838				FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1839			mib->ack_fail_cnt +=
1840				FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1841
1842			val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4)));
1843			mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1844			mib->rts_retries_cnt +=
1845				FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1846
1847			val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
1848			phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
1849			phy->mt76->aggr_stats[aggr0++] += val >> 16;
1850
1851			val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i));
1852			phy->mt76->aggr_stats[aggr1++] += val & 0xffff;
1853			phy->mt76->aggr_stats[aggr1++] += val >> 16;
1854		}
1855
1856		cnt = mt76_rr(dev, MT_MIB_SDR32(band));
1857		mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1858
1859		cnt = mt76_rr(dev, MT_MIB_SDR33(band));
1860		mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
1861
1862		cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band));
1863		mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
1864		mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
1865
1866		cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band));
1867		mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
1868		mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
1869
1870		cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band));
1871		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
1872		mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
1873		mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
1874		mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
1875	} else {
1876		for (i = 0; i < 2; i++) {
1877			/* rts count */
1878			val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2)));
1879			mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
1880			mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
1881
1882			/* rts retry count */
1883			val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2)));
1884			mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
1885			mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
1886
1887			/* ba miss count */
1888			val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2)));
1889			mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
1890			mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
1891
1892			/* ack fail count */
1893			val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2)));
1894			mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
1895			mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
1896		}
1897
1898		for (i = 0; i < 8; i++) {
1899			val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
1900			phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
1901			phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
1902		}
1903
1904		cnt = mt76_rr(dev, MT_MIB_SDR32(band));
1905		mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1906		mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1907		mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1908		mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1909
1910		cnt = mt76_rr(dev, MT_MIB_BFCR7(band));
1911		mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
1912
1913		cnt = mt76_rr(dev, MT_MIB_BFCR2(band));
1914		mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
1915
1916		cnt = mt76_rr(dev, MT_MIB_BFCR0(band));
1917		mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1918		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1919		mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1920		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1921
1922		cnt = mt76_rr(dev, MT_MIB_BFCR1(band));
1923		mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1924		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1925	}
1926}
1927
1928static void mt7915_mac_severe_check(struct mt7915_phy *phy)
1929{
1930	struct mt7915_dev *dev = phy->dev;
1931	u32 trb;
1932
1933	if (!phy->omac_mask)
1934		return;
1935
1936	/* In rare cases, TRB pointers might be out of sync leads to RMAC
1937	 * stopping Rx, so check status periodically to see if TRB hardware
1938	 * requires minimal recovery.
1939	 */
1940	trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx));
1941
1942	if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
1943	     FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
1944	    (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
1945	     FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
1946	    trb == phy->trb_ts)
1947		mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
1948				   phy->mt76->band_idx);
1949
1950	phy->trb_ts = trb;
1951}
1952
1953void mt7915_mac_sta_rc_work(struct work_struct *work)
1954{
1955	struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1956	struct ieee80211_sta *sta;
1957	struct ieee80211_vif *vif;
1958	struct mt7915_sta *msta;
1959	u32 changed;
1960	LIST_HEAD(list);
1961
1962	spin_lock_bh(&dev->mt76.sta_poll_lock);
1963	list_splice_init(&dev->sta_rc_list, &list);
1964
1965	while (!list_empty(&list)) {
1966		msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1967		list_del_init(&msta->rc_list);
1968		changed = msta->changed;
1969		msta->changed = 0;
1970		spin_unlock_bh(&dev->mt76.sta_poll_lock);
1971
1972		sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1973		vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1974
1975		if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1976			       IEEE80211_RC_NSS_CHANGED |
1977			       IEEE80211_RC_BW_CHANGED))
1978			mt7915_mcu_add_rate_ctrl(dev, vif, sta, true);
1979
1980		if (changed & IEEE80211_RC_SMPS_CHANGED)
1981			mt7915_mcu_add_smps(dev, vif, sta);
1982
1983		spin_lock_bh(&dev->mt76.sta_poll_lock);
1984	}
1985
1986	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1987}
1988
1989void mt7915_mac_work(struct work_struct *work)
1990{
1991	struct mt7915_phy *phy;
1992	struct mt76_phy *mphy;
1993
1994	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1995					       mac_work.work);
1996	phy = mphy->priv;
1997
1998	mutex_lock(&mphy->dev->mutex);
1999
2000	mt76_update_survey(mphy);
2001	if (++mphy->mac_work_count == 5) {
2002		mphy->mac_work_count = 0;
2003
2004		mt7915_mac_update_stats(phy);
2005		mt7915_mac_severe_check(phy);
2006
2007		if (phy->dev->muru_debug)
2008			mt7915_mcu_muru_debug_get(phy);
2009	}
2010
2011	mutex_unlock(&mphy->dev->mutex);
2012
2013	mt76_tx_status_check(mphy->dev, false);
2014
2015	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2016				     MT7915_WATCHDOG_TIME);
2017}
2018
2019static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
2020{
2021	struct mt7915_dev *dev = phy->dev;
2022
2023	if (phy->rdd_state & BIT(0))
2024		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
2025					MT_RX_SEL0, 0);
2026	if (phy->rdd_state & BIT(1))
2027		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
2028					MT_RX_SEL0, 0);
2029}
2030
2031static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
2032{
2033	int err, region;
2034
2035	switch (dev->mt76.region) {
2036	case NL80211_DFS_ETSI:
2037		region = 0;
2038		break;
2039	case NL80211_DFS_JP:
2040		region = 2;
2041		break;
2042	case NL80211_DFS_FCC:
2043	default:
2044		region = 1;
2045		break;
2046	}
2047
2048	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
2049				      MT_RX_SEL0, region);
2050	if (err < 0)
2051		return err;
2052
2053	if (is_mt7915(&dev->mt76)) {
2054		err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
2055					      0, dev->dbdc_support ? 2 : 0);
2056		if (err < 0)
2057			return err;
2058	}
2059
2060	return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
2061				       MT_RX_SEL0, 1);
2062}
2063
2064static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
2065{
2066	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2067	struct mt7915_dev *dev = phy->dev;
2068	int err;
2069
2070	/* start CAC */
2071	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
2072				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2073	if (err < 0)
2074		return err;
2075
2076	err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
2077	if (err < 0)
2078		return err;
2079
2080	phy->rdd_state |= BIT(phy->mt76->band_idx);
2081
2082	if (!is_mt7915(&dev->mt76))
2083		return 0;
2084
2085	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2086	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2087		err = mt7915_dfs_start_rdd(dev, 1);
2088		if (err < 0)
2089			return err;
2090
2091		phy->rdd_state |= BIT(1);
2092	}
2093
2094	return 0;
2095}
2096
2097static int
2098mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
2099{
2100	const struct mt7915_dfs_radar_spec *radar_specs;
2101	struct mt7915_dev *dev = phy->dev;
2102	int err, i;
2103
2104	switch (dev->mt76.region) {
2105	case NL80211_DFS_FCC:
2106		radar_specs = &fcc_radar_specs;
2107		err = mt7915_mcu_set_fcc5_lpn(dev, 8);
2108		if (err < 0)
2109			return err;
2110		break;
2111	case NL80211_DFS_ETSI:
2112		radar_specs = &etsi_radar_specs;
2113		break;
2114	case NL80211_DFS_JP:
2115		radar_specs = &jp_radar_specs;
2116		break;
2117	default:
2118		return -EINVAL;
2119	}
2120
2121	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2122		err = mt7915_mcu_set_radar_th(dev, i,
2123					      &radar_specs->radar_pattern[i]);
2124		if (err < 0)
2125			return err;
2126	}
2127
2128	return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2129}
2130
2131int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
2132{
2133	struct mt7915_dev *dev = phy->dev;
2134	enum mt76_dfs_state dfs_state, prev_state;
2135	int err;
2136
2137	prev_state = phy->mt76->dfs_state;
2138	dfs_state = mt76_phy_dfs_state(phy->mt76);
2139
2140	if (prev_state == dfs_state)
2141		return 0;
2142
2143	if (prev_state == MT_DFS_STATE_UNKNOWN)
2144		mt7915_dfs_stop_radar_detector(phy);
2145
2146	if (dfs_state == MT_DFS_STATE_DISABLED)
2147		goto stop;
2148
2149	if (prev_state <= MT_DFS_STATE_DISABLED) {
2150		err = mt7915_dfs_init_radar_specs(phy);
2151		if (err < 0)
2152			return err;
2153
2154		err = mt7915_dfs_start_radar_detector(phy);
2155		if (err < 0)
2156			return err;
2157
2158		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2159	}
2160
2161	if (dfs_state == MT_DFS_STATE_CAC)
2162		return 0;
2163
2164	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
2165				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2166	if (err < 0) {
2167		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2168		return err;
2169	}
2170
2171	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2172	return 0;
2173
2174stop:
2175	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
2176				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2177	if (err < 0)
2178		return err;
2179
2180	if (is_mt7915(&dev->mt76)) {
2181		err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
2182					      phy->mt76->band_idx, 0,
2183					      dev->dbdc_support ? 2 : 0);
2184		if (err < 0)
2185			return err;
2186	}
2187
2188	mt7915_dfs_stop_radar_detector(phy);
2189	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2190
2191	return 0;
2192}
2193
2194static int
2195mt7915_mac_twt_duration_align(int duration)
2196{
2197	return duration << 8;
2198}
2199
2200static u64
2201mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev,
2202			      struct mt7915_twt_flow *flow)
2203{
2204	struct mt7915_twt_flow *iter, *iter_next;
2205	u32 duration = flow->duration << 8;
2206	u64 start_tsf;
2207
2208	iter = list_first_entry_or_null(&dev->twt_list,
2209					struct mt7915_twt_flow, list);
2210	if (!iter || !iter->sched || iter->start_tsf > duration) {
2211		/* add flow as first entry in the list */
2212		list_add(&flow->list, &dev->twt_list);
2213		return 0;
2214	}
2215
2216	list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2217		start_tsf = iter->start_tsf +
2218			    mt7915_mac_twt_duration_align(iter->duration);
2219		if (list_is_last(&iter->list, &dev->twt_list))
2220			break;
2221
2222		if (!iter_next->sched ||
2223		    iter_next->start_tsf > start_tsf + duration) {
2224			list_add(&flow->list, &iter->list);
2225			goto out;
2226		}
2227	}
2228
2229	/* add flow as last entry in the list */
2230	list_add_tail(&flow->list, &dev->twt_list);
2231out:
2232	return start_tsf;
2233}
2234
2235static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2236{
2237	struct ieee80211_twt_params *twt_agrt;
2238	u64 interval, duration;
2239	u16 mantissa;
2240	u8 exp;
2241
2242	/* only individual agreement supported */
2243	if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2244		return -EOPNOTSUPP;
2245
2246	/* only 256us unit supported */
2247	if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2248		return -EOPNOTSUPP;
2249
2250	twt_agrt = (struct ieee80211_twt_params *)twt->params;
2251
2252	/* explicit agreement not supported */
2253	if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2254		return -EOPNOTSUPP;
2255
2256	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2257			le16_to_cpu(twt_agrt->req_type));
2258	mantissa = le16_to_cpu(twt_agrt->mantissa);
2259	duration = twt_agrt->min_twt_dur << 8;
2260
2261	interval = (u64)mantissa << exp;
2262	if (interval < duration)
2263		return -EOPNOTSUPP;
2264
2265	return 0;
2266}
2267
2268static bool
2269mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
2270			   struct ieee80211_twt_params *twt_agrt)
2271{
2272	u16 type = le16_to_cpu(twt_agrt->req_type);
2273	u8 exp;
2274	int i;
2275
2276	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2277	for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
2278		struct mt7915_twt_flow *f;
2279
2280		if (!(msta->twt.flowid_mask & BIT(i)))
2281			continue;
2282
2283		f = &msta->twt.flow[i];
2284		if (f->duration == twt_agrt->min_twt_dur &&
2285		    f->mantissa == twt_agrt->mantissa &&
2286		    f->exp == exp &&
2287		    f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2288		    f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2289		    f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2290			return true;
2291	}
2292
2293	return false;
2294}
2295
2296void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
2297			      struct ieee80211_sta *sta,
2298			      struct ieee80211_twt_setup *twt)
2299{
2300	enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2301	struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
2302	struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2303	u16 req_type = le16_to_cpu(twt_agrt->req_type);
2304	enum ieee80211_twt_setup_cmd sta_setup_cmd;
2305	struct mt7915_dev *dev = mt7915_hw_dev(hw);
2306	struct mt7915_twt_flow *flow;
2307	int flowid, table_id;
2308	u8 exp;
2309
2310	if (mt7915_mac_check_twt_req(twt))
2311		goto out;
2312
2313	mutex_lock(&dev->mt76.mutex);
2314
2315	if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT)
2316		goto unlock;
2317
2318	if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2319		goto unlock;
2320
2321	if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
2322		setup_cmd = TWT_SETUP_CMD_DICTATE;
2323		twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
2324		goto unlock;
2325	}
2326
2327	flowid = ffs(~msta->twt.flowid_mask) - 1;
2328	twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2329	twt_agrt->req_type |= le16_encode_bits(flowid,
2330					       IEEE80211_TWT_REQTYPE_FLOWID);
2331
2332	table_id = ffs(~dev->twt.table_mask) - 1;
2333	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2334	sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2335
2336	if (mt7915_mac_twt_param_equal(msta, twt_agrt))
2337		goto unlock;
2338
2339	flow = &msta->twt.flow[flowid];
2340	memset(flow, 0, sizeof(*flow));
2341	INIT_LIST_HEAD(&flow->list);
2342	flow->wcid = msta->wcid.idx;
2343	flow->table_id = table_id;
2344	flow->id = flowid;
2345	flow->duration = twt_agrt->min_twt_dur;
2346	flow->mantissa = twt_agrt->mantissa;
2347	flow->exp = exp;
2348	flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2349	flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2350	flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2351
2352	if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2353	    sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2354		u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2355		u64 flow_tsf, curr_tsf;
2356		u32 rem;
2357
2358		flow->sched = true;
2359		flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow);
2360		curr_tsf = __mt7915_get_tsf(hw, msta->vif);
2361		div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2362		flow_tsf = curr_tsf + interval - rem;
2363		twt_agrt->twt = cpu_to_le64(flow_tsf);
2364	} else {
2365		list_add_tail(&flow->list, &dev->twt_list);
2366	}
2367	flow->tsf = le64_to_cpu(twt_agrt->twt);
2368
2369	if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2370		goto unlock;
2371
2372	setup_cmd = TWT_SETUP_CMD_ACCEPT;
2373	dev->twt.table_mask |= BIT(table_id);
2374	msta->twt.flowid_mask |= BIT(flowid);
2375	dev->twt.n_agrt++;
2376
2377unlock:
2378	mutex_unlock(&dev->mt76.mutex);
2379out:
2380	twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2381	twt_agrt->req_type |=
2382		le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2383	twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2384		       (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2385}
2386
2387void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
2388				  struct mt7915_sta *msta,
2389				  u8 flowid)
2390{
2391	struct mt7915_twt_flow *flow;
2392
2393	lockdep_assert_held(&dev->mt76.mutex);
2394
2395	if (flowid >= ARRAY_SIZE(msta->twt.flow))
2396		return;
2397
2398	if (!(msta->twt.flowid_mask & BIT(flowid)))
2399		return;
2400
2401	flow = &msta->twt.flow[flowid];
2402	if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow,
2403				       MCU_TWT_AGRT_DELETE))
2404		return;
2405
2406	list_del_init(&flow->list);
2407	msta->twt.flowid_mask &= ~BIT(flowid);
2408	dev->twt.table_mask &= ~BIT(flow->table_id);
2409	dev->twt.n_agrt--;
2410}
v6.13.7
   1// SPDX-License-Identifier: ISC
   2/* Copyright (C) 2020 MediaTek Inc. */
   3
   4#include <linux/etherdevice.h>
   5#include <linux/timekeeping.h>
   6#include "coredump.h"
   7#include "mt7915.h"
   8#include "../dma.h"
   9#include "mac.h"
  10#include "mcu.h"
  11
  12#define to_rssi(field, rcpi)	((FIELD_GET(field, rcpi) - 220) / 2)
  13
  14static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
  15	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
  16	.radar_pattern = {
  17		[5] =  { 1, 0,  6, 32, 28, 0,  990, 5010, 17, 1, 1 },
  18		[6] =  { 1, 0,  9, 32, 28, 0,  615, 5010, 27, 1, 1 },
  19		[7] =  { 1, 0, 15, 32, 28, 0,  240,  445, 27, 1, 1 },
  20		[8] =  { 1, 0, 12, 32, 28, 0,  240,  510, 42, 1, 1 },
  21		[9] =  { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
  22		[10] = { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
  23		[11] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 18, 32, 28, { },  54 },
  24		[12] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 27, 32, 24, { },  54 },
  25	},
  26};
  27
  28static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
  29	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
  30	.radar_pattern = {
  31		[0] = { 1, 0,  8,  32, 28, 0, 508, 3076, 13, 1,  1 },
  32		[1] = { 1, 0, 12,  32, 28, 0, 140,  240, 17, 1,  1 },
  33		[2] = { 1, 0,  8,  32, 28, 0, 190,  510, 22, 1,  1 },
  34		[3] = { 1, 0,  6,  32, 28, 0, 190,  510, 32, 1,  1 },
  35		[4] = { 1, 0,  9, 255, 28, 0, 323,  343, 13, 1, 32 },
  36	},
  37};
  38
  39static const struct mt7915_dfs_radar_spec jp_radar_specs = {
  40	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
  41	.radar_pattern = {
  42		[0] =  { 1, 0,  8,  32, 28, 0,  508, 3076,  13, 1,  1 },
  43		[1] =  { 1, 0, 12,  32, 28, 0,  140,  240,  17, 1,  1 },
  44		[2] =  { 1, 0,  8,  32, 28, 0,  190,  510,  22, 1,  1 },
  45		[3] =  { 1, 0,  6,  32, 28, 0,  190,  510,  32, 1,  1 },
  46		[4] =  { 1, 0,  9, 255, 28, 0,  323,  343,  13, 1, 32 },
  47		[13] = { 1, 0,  7,  32, 28, 0, 3836, 3856,  14, 1,  1 },
  48		[14] = { 1, 0,  6,  32, 28, 0,  615, 5010, 110, 1,  1 },
  49		[15] = { 1, 1,  0,   0,  0, 0,   15, 5010, 110, 0,  0, 12, 32, 28 },
  50	},
  51};
  52
  53static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
  54					    u16 idx, bool unicast)
  55{
  56	struct mt7915_sta *sta;
  57	struct mt76_wcid *wcid;
  58
  59	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
  60		return NULL;
  61
  62	wcid = rcu_dereference(dev->mt76.wcid[idx]);
  63	if (unicast || !wcid)
  64		return wcid;
  65
  66	if (!wcid->sta)
  67		return NULL;
  68
  69	sta = container_of(wcid, struct mt7915_sta, wcid);
  70	if (!sta->vif)
  71		return NULL;
  72
  73	return &sta->vif->sta.wcid;
  74}
  75
  76bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
  77{
  78	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
  79		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
  80
  81	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
  82			 0, 5000);
  83}
  84
  85u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw)
  86{
  87	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
  88		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
  89
  90	return MT_WTBL_LMAC_OFFS(wcid, dw);
  91}
  92
  93static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
  94{
  95	static const u8 ac_to_tid[] = {
  96		[IEEE80211_AC_BE] = 0,
  97		[IEEE80211_AC_BK] = 1,
  98		[IEEE80211_AC_VI] = 4,
  99		[IEEE80211_AC_VO] = 6
 100	};
 101	struct ieee80211_sta *sta;
 102	struct mt7915_sta *msta;
 103	struct rate_info *rate;
 104	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
 105	LIST_HEAD(sta_poll_list);
 106	int i;
 107
 108	spin_lock_bh(&dev->mt76.sta_poll_lock);
 109	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
 110	spin_unlock_bh(&dev->mt76.sta_poll_lock);
 111
 112	rcu_read_lock();
 113
 114	while (true) {
 115		bool clear = false;
 116		u32 addr, val;
 117		u16 idx;
 118		s8 rssi[4];
 119		u8 bw;
 120
 121		spin_lock_bh(&dev->mt76.sta_poll_lock);
 122		if (list_empty(&sta_poll_list)) {
 123			spin_unlock_bh(&dev->mt76.sta_poll_lock);
 124			break;
 125		}
 126		msta = list_first_entry(&sta_poll_list,
 127					struct mt7915_sta, wcid.poll_list);
 128		list_del_init(&msta->wcid.poll_list);
 129		spin_unlock_bh(&dev->mt76.sta_poll_lock);
 130
 131		idx = msta->wcid.idx;
 132
 133		/* refresh peer's airtime reporting */
 134		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
 135
 136		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 137			u32 tx_last = msta->airtime_ac[i];
 138			u32 rx_last = msta->airtime_ac[i + 4];
 139
 140			msta->airtime_ac[i] = mt76_rr(dev, addr);
 141			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
 142
 143			if (msta->airtime_ac[i] <= tx_last)
 144				tx_time[i] = 0;
 145			else
 146				tx_time[i] = msta->airtime_ac[i] - tx_last;
 147
 148			if (msta->airtime_ac[i + 4] <= rx_last)
 149				rx_time[i] = 0;
 150			else
 151				rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
 152
 153			if ((tx_last | rx_last) & BIT(30))
 154				clear = true;
 155
 156			addr += 8;
 157		}
 158
 159		if (clear) {
 160			mt7915_mac_wtbl_update(dev, idx,
 161					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
 162			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
 163		}
 164
 165		if (!msta->wcid.sta)
 166			continue;
 167
 168		sta = container_of((void *)msta, struct ieee80211_sta,
 169				   drv_priv);
 170		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 171			u8 queue = mt76_connac_lmac_mapping(i);
 172			u32 tx_cur = tx_time[queue];
 173			u32 rx_cur = rx_time[queue];
 174			u8 tid = ac_to_tid[i];
 175
 176			if (!tx_cur && !rx_cur)
 177				continue;
 178
 179			ieee80211_sta_register_airtime(sta, tid, tx_cur,
 180						       rx_cur);
 181		}
 182
 183		/*
 184		 * We don't support reading GI info from txs packets.
 185		 * For accurate tx status reporting and AQL improvement,
 186		 * we need to make sure that flags match so polling GI
 187		 * from per-sta counters directly.
 188		 */
 189		rate = &msta->wcid.rate;
 190		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7);
 191		val = mt76_rr(dev, addr);
 192
 193		switch (rate->bw) {
 194		case RATE_INFO_BW_160:
 195			bw = IEEE80211_STA_RX_BW_160;
 196			break;
 197		case RATE_INFO_BW_80:
 198			bw = IEEE80211_STA_RX_BW_80;
 199			break;
 200		case RATE_INFO_BW_40:
 201			bw = IEEE80211_STA_RX_BW_40;
 202			break;
 203		default:
 204			bw = IEEE80211_STA_RX_BW_20;
 205			break;
 206		}
 207
 208		if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
 209			u8 offs = 24 + 2 * bw;
 210
 211			rate->he_gi = (val & (0x3 << offs)) >> offs;
 212		} else if (rate->flags &
 213			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
 214			if (val & BIT(12 + bw))
 215				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
 216			else
 217				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
 218		}
 219
 220		/* get signal strength of resp frames (CTS/BA/ACK) */
 221		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30);
 222		val = mt76_rr(dev, addr);
 223
 224		rssi[0] = to_rssi(GENMASK(7, 0), val);
 225		rssi[1] = to_rssi(GENMASK(15, 8), val);
 226		rssi[2] = to_rssi(GENMASK(23, 16), val);
 227		rssi[3] = to_rssi(GENMASK(31, 14), val);
 228
 229		msta->ack_signal =
 230			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
 231
 232		ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
 233	}
 234
 235	rcu_read_unlock();
 236}
 237
 238void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
 239			      struct ieee80211_vif *vif, bool enable)
 240{
 241	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
 242	u32 addr;
 243
 244	addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
 245	if (enable)
 246		mt76_set(dev, addr, BIT(5));
 247	else
 248		mt76_clear(dev, addr, BIT(5));
 249}
 250
 251static void
 252mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
 253		     struct mt7915_sta *msta, struct sk_buff *skb,
 254		     u32 info)
 255{
 256	struct ieee80211_vif *vif;
 257	struct wireless_dev *wdev;
 258
 259	if (!msta || !msta->vif)
 260		return;
 261
 262	if (!mt76_queue_is_wed_rx(q))
 263		return;
 264
 265	if (!(info & MT_DMA_INFO_PPE_VLD))
 266		return;
 267
 268	vif = container_of((void *)msta->vif, struct ieee80211_vif,
 269			   drv_priv);
 270	wdev = ieee80211_vif_to_wdev(vif);
 271	skb->dev = wdev->netdev;
 272
 273	mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
 274				 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
 275				 FIELD_GET(MT_DMA_PPE_ENTRY, info));
 276}
 277
 278static int
 279mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
 280		   enum mt76_rxq_id q, u32 *info)
 281{
 282	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
 283	struct mt76_phy *mphy = &dev->mt76.phy;
 284	struct mt7915_phy *phy = &dev->phy;
 285	struct ieee80211_supported_band *sband;
 286	__le32 *rxd = (__le32 *)skb->data;
 287	__le32 *rxv = NULL;
 288	u32 rxd0 = le32_to_cpu(rxd[0]);
 289	u32 rxd1 = le32_to_cpu(rxd[1]);
 290	u32 rxd2 = le32_to_cpu(rxd[2]);
 291	u32 rxd3 = le32_to_cpu(rxd[3]);
 292	u32 rxd4 = le32_to_cpu(rxd[4]);
 293	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
 294	bool unicast, insert_ccmp_hdr = false;
 295	u8 remove_pad, amsdu_info;
 296	u8 mode = 0, qos_ctl = 0;
 297	struct mt7915_sta *msta = NULL;
 298	u32 csum_status = *(u32 *)skb->cb;
 299	bool hdr_trans;
 300	u16 hdr_gap;
 301	u16 seq_ctrl = 0;
 302	__le16 fc = 0;
 303	int idx;
 304
 305	memset(status, 0, sizeof(*status));
 306
 307	if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) {
 308		mphy = dev->mt76.phys[MT_BAND1];
 309		if (!mphy)
 310			return -EINVAL;
 311
 312		phy = mphy->priv;
 313		status->phy_idx = 1;
 314	}
 315
 316	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
 317		return -EINVAL;
 318
 319	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
 320		return -EINVAL;
 321
 322	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
 323	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
 324		return -EINVAL;
 325
 326	/* ICV error or CCMP/BIP/WPI MIC error */
 327	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
 328		status->flag |= RX_FLAG_ONLY_MONITOR;
 329
 330	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
 331	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
 332	status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
 333
 334	if (status->wcid) {
 335		msta = container_of(status->wcid, struct mt7915_sta, wcid);
 336		spin_lock_bh(&dev->mt76.sta_poll_lock);
 337		if (list_empty(&msta->wcid.poll_list))
 338			list_add_tail(&msta->wcid.poll_list,
 339				      &dev->mt76.sta_poll_list);
 340		spin_unlock_bh(&dev->mt76.sta_poll_lock);
 341	}
 342
 343	status->freq = mphy->chandef.chan->center_freq;
 344	status->band = mphy->chandef.chan->band;
 345	if (status->band == NL80211_BAND_5GHZ)
 346		sband = &mphy->sband_5g.sband;
 347	else if (status->band == NL80211_BAND_6GHZ)
 348		sband = &mphy->sband_6g.sband;
 349	else
 350		sband = &mphy->sband_2g.sband;
 351
 352	if (!sband->channels)
 353		return -EINVAL;
 354
 355	if ((rxd0 & csum_mask) == csum_mask &&
 356	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
 357		skb->ip_summed = CHECKSUM_UNNECESSARY;
 358
 359	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
 360		status->flag |= RX_FLAG_FAILED_FCS_CRC;
 361
 362	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
 363		status->flag |= RX_FLAG_MMIC_ERROR;
 364
 365	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
 366	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
 367		status->flag |= RX_FLAG_DECRYPTED;
 368		status->flag |= RX_FLAG_IV_STRIPPED;
 369		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
 370	}
 371
 372	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
 373
 374	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
 375		return -EINVAL;
 376
 377	rxd += 6;
 378	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
 379		u32 v0 = le32_to_cpu(rxd[0]);
 380		u32 v2 = le32_to_cpu(rxd[2]);
 381
 382		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
 383		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
 384		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
 385
 386		rxd += 4;
 387		if ((u8 *)rxd - skb->data >= skb->len)
 388			return -EINVAL;
 389	}
 390
 391	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
 392		u8 *data = (u8 *)rxd;
 393
 394		if (status->flag & RX_FLAG_DECRYPTED) {
 395			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
 396			case MT_CIPHER_AES_CCMP:
 397			case MT_CIPHER_CCMP_CCX:
 398			case MT_CIPHER_CCMP_256:
 399				insert_ccmp_hdr =
 400					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
 401				fallthrough;
 402			case MT_CIPHER_TKIP:
 403			case MT_CIPHER_TKIP_NO_MIC:
 404			case MT_CIPHER_GCMP:
 405			case MT_CIPHER_GCMP_256:
 406				status->iv[0] = data[5];
 407				status->iv[1] = data[4];
 408				status->iv[2] = data[3];
 409				status->iv[3] = data[2];
 410				status->iv[4] = data[1];
 411				status->iv[5] = data[0];
 412				break;
 413			default:
 414				break;
 415			}
 416		}
 417		rxd += 4;
 418		if ((u8 *)rxd - skb->data >= skb->len)
 419			return -EINVAL;
 420	}
 421
 422	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
 423		status->timestamp = le32_to_cpu(rxd[0]);
 424		status->flag |= RX_FLAG_MACTIME_START;
 425
 426		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
 427			status->flag |= RX_FLAG_AMPDU_DETAILS;
 428
 429			/* all subframes of an A-MPDU have the same timestamp */
 430			if (phy->rx_ampdu_ts != status->timestamp) {
 431				if (!++phy->ampdu_ref)
 432					phy->ampdu_ref++;
 433			}
 434			phy->rx_ampdu_ts = status->timestamp;
 435
 436			status->ampdu_ref = phy->ampdu_ref;
 437		}
 438
 439		rxd += 2;
 440		if ((u8 *)rxd - skb->data >= skb->len)
 441			return -EINVAL;
 442	}
 443
 444	/* RXD Group 3 - P-RXV */
 445	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
 446		u32 v0, v1;
 447		int ret;
 448
 449		rxv = rxd;
 450		rxd += 2;
 451		if ((u8 *)rxd - skb->data >= skb->len)
 452			return -EINVAL;
 453
 454		v0 = le32_to_cpu(rxv[0]);
 455		v1 = le32_to_cpu(rxv[1]);
 456
 457		if (v0 & MT_PRXV_HT_AD_CODE)
 458			status->enc_flags |= RX_ENC_FLAG_LDPC;
 459
 460		status->chains = mphy->antenna_mask;
 461		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
 462		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
 463		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
 464		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
 465
 466		/* RXD Group 5 - C-RXV */
 467		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
 468			rxd += 18;
 469			if ((u8 *)rxd - skb->data >= skb->len)
 470				return -EINVAL;
 471		}
 472
 473		if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
 474			ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
 475							    sband, rxv, &mode);
 476			if (ret < 0)
 477				return ret;
 478		}
 479	}
 480
 481	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
 482	status->amsdu = !!amsdu_info;
 483	if (status->amsdu) {
 484		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
 485		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
 486	}
 487
 488	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
 489	if (hdr_trans && ieee80211_has_morefrags(fc)) {
 490		struct ieee80211_vif *vif;
 491		int err;
 492
 493		if (!msta || !msta->vif)
 494			return -EINVAL;
 495
 496		vif = container_of((void *)msta->vif, struct ieee80211_vif,
 497				   drv_priv);
 498		err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
 499		if (err)
 500			return err;
 501
 502		hdr_trans = false;
 503	} else {
 504		int pad_start = 0;
 505
 506		skb_pull(skb, hdr_gap);
 507		if (!hdr_trans && status->amsdu) {
 508			pad_start = ieee80211_get_hdrlen_from_skb(skb);
 509		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
 510			/*
 511			 * When header translation failure is indicated,
 512			 * the hardware will insert an extra 2-byte field
 513			 * containing the data length after the protocol
 514			 * type field. This happens either when the LLC-SNAP
 515			 * pattern did not match, or if a VLAN header was
 516			 * detected.
 517			 */
 518			pad_start = 12;
 519			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
 520				pad_start += 4;
 521			else
 522				pad_start = 0;
 523		}
 524
 525		if (pad_start) {
 526			memmove(skb->data + 2, skb->data, pad_start);
 527			skb_pull(skb, 2);
 528		}
 529	}
 530
 531	if (!hdr_trans) {
 532		struct ieee80211_hdr *hdr;
 533
 534		if (insert_ccmp_hdr) {
 535			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
 536
 537			mt76_insert_ccmp_hdr(skb, key_id);
 538		}
 539
 540		hdr = mt76_skb_get_hdr(skb);
 541		fc = hdr->frame_control;
 542		if (ieee80211_is_data_qos(fc)) {
 543			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
 544			qos_ctl = *ieee80211_get_qos_ctl(hdr);
 545		}
 546	} else {
 547		status->flag |= RX_FLAG_8023;
 548		mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
 549				     *info);
 550	}
 551
 552	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
 553		mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
 554
 555	if (!status->wcid || !ieee80211_is_data_qos(fc))
 556		return 0;
 557
 558	status->aggr = unicast &&
 559		       !ieee80211_is_qos_nullfunc(fc);
 560	status->qos_ctl = qos_ctl;
 561	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
 562
 563	return 0;
 564}
 565
 566static void
 567mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
 568{
 569#ifdef CONFIG_NL80211_TESTMODE
 570	struct mt7915_phy *phy = &dev->phy;
 571	__le32 *rxd = (__le32 *)skb->data;
 572	__le32 *rxv_hdr = rxd + 2;
 573	__le32 *rxv = rxd + 4;
 574	u32 rcpi, ib_rssi, wb_rssi, v20, v21;
 575	u8 band_idx;
 576	s32 foe;
 577	u8 snr;
 578	int i;
 579
 580	band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
 581	if (band_idx && !phy->mt76->band_idx) {
 582		phy = mt7915_ext_phy(dev);
 583		if (!phy)
 584			goto out;
 585	}
 586
 587	rcpi = le32_to_cpu(rxv[6]);
 588	ib_rssi = le32_to_cpu(rxv[7]);
 589	wb_rssi = le32_to_cpu(rxv[8]) >> 5;
 590
 591	for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
 592		if (i == 3)
 593			wb_rssi = le32_to_cpu(rxv[9]);
 594
 595		phy->test.last_rcpi[i] = rcpi & 0xff;
 596		phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
 597		phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
 598	}
 599
 600	v20 = le32_to_cpu(rxv[20]);
 601	v21 = le32_to_cpu(rxv[21]);
 602
 603	foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
 604	      (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
 605
 606	snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
 607
 608	phy->test.last_freq_offset = foe;
 609	phy->test.last_snr = snr;
 610out:
 611#endif
 612	dev_kfree_skb(skb);
 613}
 614
 615static void
 616mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
 617			 struct sk_buff *skb)
 618{
 619#ifdef CONFIG_NL80211_TESTMODE
 620	struct mt76_testmode_data *td = &phy->mt76->test;
 621	const struct ieee80211_rate *r;
 622	u8 bw, mode, nss = td->tx_rate_nss;
 623	u8 rate_idx = td->tx_rate_idx;
 624	u16 rateval = 0;
 625	u32 val;
 626	bool cck = false;
 627	int band;
 628
 629	if (skb != phy->mt76->test.tx_skb)
 630		return;
 631
 632	switch (td->tx_rate_mode) {
 633	case MT76_TM_TX_MODE_HT:
 634		nss = 1 + (rate_idx >> 3);
 635		mode = MT_PHY_TYPE_HT;
 636		break;
 637	case MT76_TM_TX_MODE_VHT:
 638		mode = MT_PHY_TYPE_VHT;
 639		break;
 640	case MT76_TM_TX_MODE_HE_SU:
 641		mode = MT_PHY_TYPE_HE_SU;
 642		break;
 643	case MT76_TM_TX_MODE_HE_EXT_SU:
 644		mode = MT_PHY_TYPE_HE_EXT_SU;
 645		break;
 646	case MT76_TM_TX_MODE_HE_TB:
 647		mode = MT_PHY_TYPE_HE_TB;
 648		break;
 649	case MT76_TM_TX_MODE_HE_MU:
 650		mode = MT_PHY_TYPE_HE_MU;
 651		break;
 652	case MT76_TM_TX_MODE_CCK:
 653		cck = true;
 654		fallthrough;
 655	case MT76_TM_TX_MODE_OFDM:
 656		band = phy->mt76->chandef.chan->band;
 657		if (band == NL80211_BAND_2GHZ && !cck)
 658			rate_idx += 4;
 659
 660		r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
 661		val = cck ? r->hw_value_short : r->hw_value;
 662
 663		mode = val >> 8;
 664		rate_idx = val & 0xff;
 665		break;
 666	default:
 667		mode = MT_PHY_TYPE_OFDM;
 668		break;
 669	}
 670
 671	switch (phy->mt76->chandef.width) {
 672	case NL80211_CHAN_WIDTH_40:
 673		bw = 1;
 674		break;
 675	case NL80211_CHAN_WIDTH_80:
 676		bw = 2;
 677		break;
 678	case NL80211_CHAN_WIDTH_80P80:
 679	case NL80211_CHAN_WIDTH_160:
 680		bw = 3;
 681		break;
 682	default:
 683		bw = 0;
 684		break;
 685	}
 686
 687	if (td->tx_rate_stbc && nss == 1) {
 688		nss++;
 689		rateval |= MT_TX_RATE_STBC;
 690	}
 691
 692	rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
 693		   FIELD_PREP(MT_TX_RATE_MODE, mode) |
 694		   FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
 695
 696	txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
 697
 698	le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
 699	if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
 700		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
 701
 702	val = MT_TXD6_FIXED_BW |
 703	      FIELD_PREP(MT_TXD6_BW, bw) |
 704	      FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
 705	      FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
 706
 707	/* for HE_SU/HE_EXT_SU PPDU
 708	 * - 1x, 2x, 4x LTF + 0.8us GI
 709	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
 710	 * for HE_MU PPDU
 711	 * - 2x, 4x LTF + 0.8us GI
 712	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
 713	 * for HE_TB PPDU
 714	 * - 1x, 2x LTF + 1.6us GI
 715	 * - 4x LTF + 3.2us GI
 716	 */
 717	if (mode >= MT_PHY_TYPE_HE_SU)
 718		val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
 719
 720	if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
 721		val |= MT_TXD6_LDPC;
 722
 723	txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
 724	txwi[6] |= cpu_to_le32(val);
 725	txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
 726					  phy->test.spe_idx));
 727#endif
 728}
 729
 730void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
 731			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
 732			   struct ieee80211_key_conf *key,
 733			   enum mt76_txq_id qid, u32 changed)
 734{
 735	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 736	u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
 737	struct mt76_phy *mphy = &dev->phy;
 738
 739	if (phy_idx && dev->phys[MT_BAND1])
 740		mphy = dev->phys[MT_BAND1];
 741
 742	mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
 743
 744	if (mt76_testmode_enabled(mphy))
 745		mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
 746}
 747
 748int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
 749			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
 750			  struct ieee80211_sta *sta,
 751			  struct mt76_tx_info *tx_info)
 752{
 753	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
 754	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
 755	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
 756	struct ieee80211_key_conf *key = info->control.hw_key;
 757	struct ieee80211_vif *vif = info->control.vif;
 758	struct mt76_connac_fw_txp *txp;
 759	struct mt76_txwi_cache *t;
 760	int id, i, nbuf = tx_info->nbuf - 1;
 761	u8 *txwi = (u8 *)txwi_ptr;
 762	int pid;
 763
 764	if (unlikely(tx_info->skb->len <= ETH_HLEN))
 765		return -EINVAL;
 766
 767	if (!wcid)
 768		wcid = &dev->mt76.global_wcid;
 769
 770	if (sta) {
 771		struct mt7915_sta *msta;
 772
 773		msta = (struct mt7915_sta *)sta->drv_priv;
 774
 775		if (time_after(jiffies, msta->jiffies + HZ / 4)) {
 776			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
 777			msta->jiffies = jiffies;
 778		}
 779	}
 780
 781	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
 782	t->skb = tx_info->skb;
 783
 784	id = mt76_token_consume(mdev, &t);
 785	if (id < 0)
 786		return id;
 787
 788	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
 789	mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
 790			      qid, 0);
 791
 792	txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
 793	for (i = 0; i < nbuf; i++) {
 794		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
 795		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
 796	}
 797	txp->nbuf = nbuf;
 798
 799	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
 800
 801	if (!key)
 802		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
 803
 804	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 805	    ieee80211_is_mgmt(hdr->frame_control))
 806		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
 807
 808	if (vif) {
 809		struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
 810
 811		txp->bss_idx = mvif->mt76.idx;
 812	}
 813
 814	txp->token = cpu_to_le16(id);
 815	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
 816		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
 817	else
 818		txp->rept_wds_wcid = cpu_to_le16(0x3ff);
 819	tx_info->skb = NULL;
 820
 821	/* pass partial skb header to fw */
 822	tx_info->buf[1].len = MT_CT_PARSE_LEN;
 823	tx_info->buf[1].skip_unmap = true;
 824	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
 825
 826	return 0;
 827}
 828
 829u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
 830{
 831	struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
 832	__le32 *txwi = ptr;
 833	u32 val;
 834
 835	memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
 836
 837	val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
 838	      FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
 839	txwi[0] = cpu_to_le32(val);
 840
 841	val = MT_TXD1_LONG_FORMAT |
 842	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
 843	txwi[1] = cpu_to_le32(val);
 844
 845	txp->token = cpu_to_le16(token_id);
 846	txp->nbuf = 1;
 847	txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
 848
 849	return MT_TXD_SIZE + sizeof(*txp);
 850}
 851
 852static void
 853mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
 854{
 855	struct mt76_dev *mdev = &dev->mt76;
 856	struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
 857
 858	/* clean DMA queues and unmap buffers first */
 859	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
 860	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
 861	if (mphy_ext) {
 862		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
 863		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
 864	}
 865}
 866
 867static void
 868mt7915_mac_tx_free_done(struct mt7915_dev *dev,
 869			struct list_head *free_list, bool wake)
 870{
 871	struct sk_buff *skb, *tmp;
 872
 873	mt7915_mac_sta_poll(dev);
 874
 875	if (wake)
 876		mt76_set_tx_blocked(&dev->mt76, false);
 877
 878	mt76_worker_schedule(&dev->mt76.tx_worker);
 879
 880	list_for_each_entry_safe(skb, tmp, free_list, list) {
 881		skb_list_del_init(skb);
 882		napi_consume_skb(skb, 1);
 883	}
 884}
 885
 886static void
 887mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
 888{
 889	struct mt76_connac_tx_free *free = data;
 890	__le32 *tx_info = (__le32 *)(data + sizeof(*free));
 891	struct mt76_dev *mdev = &dev->mt76;
 892	struct mt76_txwi_cache *txwi;
 893	struct ieee80211_sta *sta = NULL;
 894	struct mt76_wcid *wcid = NULL;
 895	LIST_HEAD(free_list);
 896	void *end = data + len;
 897	bool v3, wake = false;
 898	u16 total, count = 0;
 899	u32 txd = le32_to_cpu(free->txd);
 900	__le32 *cur_info;
 901
 902	mt7915_mac_tx_free_prepare(dev);
 903
 904	total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
 905	v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
 906
 907	for (cur_info = tx_info; count < total; cur_info++) {
 908		u32 msdu, info;
 909		u8 i;
 910
 911		if (WARN_ON_ONCE((void *)cur_info >= end))
 912			return;
 913
 914		/*
 915		 * 1'b1: new wcid pair.
 916		 * 1'b0: msdu_id with the same 'wcid pair' as above.
 917		 */
 918		info = le32_to_cpu(*cur_info);
 919		if (info & MT_TX_FREE_PAIR) {
 920			struct mt7915_sta *msta;
 921			u16 idx;
 922
 923			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
 924			wcid = rcu_dereference(dev->mt76.wcid[idx]);
 925			sta = wcid_to_sta(wcid);
 926			if (!sta)
 927				continue;
 928
 929			msta = container_of(wcid, struct mt7915_sta, wcid);
 930			spin_lock_bh(&mdev->sta_poll_lock);
 931			if (list_empty(&msta->wcid.poll_list))
 932				list_add_tail(&msta->wcid.poll_list,
 933					      &mdev->sta_poll_list);
 934			spin_unlock_bh(&mdev->sta_poll_lock);
 935			continue;
 936		}
 937
 938		if (!mtk_wed_device_active(&mdev->mmio.wed) && wcid) {
 939			u32 tx_retries = 0, tx_failed = 0;
 940
 941			if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) {
 942				tx_retries =
 943					FIELD_GET(MT_TX_FREE_COUNT_V3, info) - 1;
 944				tx_failed = tx_retries +
 945					!!FIELD_GET(MT_TX_FREE_STAT_V3, info);
 946			} else if (!v3 && (info & MT_TX_FREE_MPDU_HEADER)) {
 947				tx_retries =
 948					FIELD_GET(MT_TX_FREE_COUNT, info) - 1;
 949				tx_failed = tx_retries +
 950					!!FIELD_GET(MT_TX_FREE_STAT, info);
 951			}
 952			wcid->stats.tx_retries += tx_retries;
 953			wcid->stats.tx_failed += tx_failed;
 954		}
 955
 956		if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3))
 957			continue;
 958
 959		for (i = 0; i < 1 + v3; i++) {
 960			if (v3) {
 961				msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
 962				if (msdu == MT_TX_FREE_MSDU_ID_V3)
 963					continue;
 964			} else {
 965				msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
 966			}
 967			count++;
 968			txwi = mt76_token_release(mdev, msdu, &wake);
 969			if (!txwi)
 970				continue;
 971
 972			mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
 973		}
 974	}
 975
 976	mt7915_mac_tx_free_done(dev, &free_list, wake);
 977}
 978
 979static void
 980mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
 981{
 982	struct mt76_connac_tx_free *free = data;
 983	__le16 *info = (__le16 *)(data + sizeof(*free));
 984	struct mt76_dev *mdev = &dev->mt76;
 985	void *end = data + len;
 986	LIST_HEAD(free_list);
 987	bool wake = false;
 988	u8 i, count;
 989
 990	mt7915_mac_tx_free_prepare(dev);
 991
 992	count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
 993	if (WARN_ON_ONCE((void *)&info[count] > end))
 994		return;
 995
 996	for (i = 0; i < count; i++) {
 997		struct mt76_txwi_cache *txwi;
 998		u16 msdu = le16_to_cpu(info[i]);
 999
1000		txwi = mt76_token_release(mdev, msdu, &wake);
1001		if (!txwi)
1002			continue;
1003
1004		mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list);
1005	}
1006
1007	mt7915_mac_tx_free_done(dev, &free_list, wake);
1008}
1009
1010static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
1011{
1012	struct mt7915_sta *msta = NULL;
1013	struct mt76_wcid *wcid;
1014	__le32 *txs_data = data;
1015	u16 wcidx;
1016	u8 pid;
1017
1018	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1019	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1020
1021	if (pid < MT_PACKET_ID_WED)
1022		return;
1023
1024	if (wcidx >= mt7915_wtbl_size(dev))
1025		return;
1026
1027	rcu_read_lock();
1028
1029	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1030	if (!wcid)
1031		goto out;
1032
1033	msta = container_of(wcid, struct mt7915_sta, wcid);
1034
1035	if (pid == MT_PACKET_ID_WED)
1036		mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
1037	else
1038		mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
1039
1040	if (!wcid->sta)
1041		goto out;
1042
1043	spin_lock_bh(&dev->mt76.sta_poll_lock);
1044	if (list_empty(&msta->wcid.poll_list))
1045		list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1046	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1047
1048out:
1049	rcu_read_unlock();
1050}
1051
1052bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
1053{
1054	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1055	__le32 *rxd = (__le32 *)data;
1056	__le32 *end = (__le32 *)&rxd[len / 4];
1057	enum rx_pkt_type type;
1058
1059	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1060
1061	switch (type) {
1062	case PKT_TYPE_TXRX_NOTIFY:
1063		mt7915_mac_tx_free(dev, data, len);
1064		return false;
1065	case PKT_TYPE_TXRX_NOTIFY_V0:
1066		mt7915_mac_tx_free_v0(dev, data, len);
1067		return false;
1068	case PKT_TYPE_TXS:
1069		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1070			mt7915_mac_add_txs(dev, rxd);
1071		return false;
1072	case PKT_TYPE_RX_FW_MONITOR:
1073		mt7915_debugfs_rx_fw_monitor(dev, data, len);
1074		return false;
1075	default:
1076		return true;
1077	}
1078}
1079
1080void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1081			 struct sk_buff *skb, u32 *info)
1082{
1083	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1084	__le32 *rxd = (__le32 *)skb->data;
1085	__le32 *end = (__le32 *)&skb->data[skb->len];
1086	enum rx_pkt_type type;
1087
1088	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1089
1090	switch (type) {
1091	case PKT_TYPE_TXRX_NOTIFY:
1092		mt7915_mac_tx_free(dev, skb->data, skb->len);
1093		napi_consume_skb(skb, 1);
1094		break;
1095	case PKT_TYPE_TXRX_NOTIFY_V0:
1096		mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
1097		napi_consume_skb(skb, 1);
1098		break;
1099	case PKT_TYPE_RX_EVENT:
1100		mt7915_mcu_rx_event(dev, skb);
1101		break;
1102	case PKT_TYPE_TXRXV:
1103		mt7915_mac_fill_rx_vector(dev, skb);
1104		break;
1105	case PKT_TYPE_TXS:
1106		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1107			mt7915_mac_add_txs(dev, rxd);
1108		dev_kfree_skb(skb);
1109		break;
1110	case PKT_TYPE_RX_FW_MONITOR:
1111		mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1112		dev_kfree_skb(skb);
1113		break;
1114	case PKT_TYPE_NORMAL:
1115		if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
1116			mt76_rx(&dev->mt76, q, skb);
1117			return;
1118		}
1119		fallthrough;
1120	default:
1121		dev_kfree_skb(skb);
1122		break;
1123	}
1124}
1125
1126void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
1127{
1128	struct mt7915_dev *dev = phy->dev;
1129	u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx);
1130
1131	mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
1132	mt76_set(dev, reg, BIT(11) | BIT(9));
1133}
1134
1135void mt7915_mac_reset_counters(struct mt7915_phy *phy)
1136{
1137	struct mt7915_dev *dev = phy->dev;
1138	int i;
1139
1140	for (i = 0; i < 4; i++) {
1141		mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i));
1142		mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i));
1143	}
1144
1145	phy->mt76->survey_time = ktime_get_boottime();
1146	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1147
1148	/* reset airtime counters */
1149	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx),
1150		 MT_WF_RMAC_MIB_RXTIME_CLR);
1151
1152	mt7915_mcu_get_chan_mib_info(phy, true);
1153}
1154
1155void mt7915_mac_set_timing(struct mt7915_phy *phy)
1156{
1157	s16 coverage_class = phy->coverage_class;
1158	struct mt7915_dev *dev = phy->dev;
1159	struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
1160	u32 val, reg_offset;
1161	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1162		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1163	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1164		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1165	u8 band = phy->mt76->band_idx;
1166	int eifs_ofdm = 360, sifs = 10, offset;
1167	bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
1168
1169	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1170		return;
1171
1172	if (ext_phy)
1173		coverage_class = max_t(s16, dev->phy.coverage_class,
1174				       ext_phy->coverage_class);
1175
1176	mt76_set(dev, MT_ARB_SCR(band),
1177		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1178	udelay(1);
1179
1180	offset = 3 * coverage_class;
1181	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1182		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1183
1184	if (!is_mt7915(&dev->mt76)) {
1185		if (!a_band) {
1186			mt76_wr(dev, MT_TMAC_ICR1(band),
1187				FIELD_PREP(MT_IFS_EIFS_CCK, 314));
1188			eifs_ofdm = 78;
1189		} else {
1190			eifs_ofdm = 84;
1191		}
1192	} else if (a_band) {
1193		sifs = 16;
1194	}
1195
1196	mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset);
1197	mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset);
1198	mt76_wr(dev, MT_TMAC_ICR0(band),
1199		FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
1200		FIELD_PREP(MT_IFS_RIFS, 2) |
1201		FIELD_PREP(MT_IFS_SIFS, sifs) |
1202		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1203
1204	if (phy->slottime < 20 || a_band)
1205		val = MT7915_CFEND_RATE_DEFAULT;
1206	else
1207		val = MT7915_CFEND_RATE_11B;
1208
1209	mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val);
1210	mt76_clear(dev, MT_ARB_SCR(band),
1211		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1212}
1213
1214void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band)
1215{
1216	u32 reg;
1217
1218	reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) :
1219				      MT_WF_PHY_RXTD12_MT7916(band);
1220	mt76_set(dev, reg,
1221		 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1222		 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1223
1224	reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) :
1225				      MT_WF_PHY_RX_CTRL1_MT7916(band);
1226	mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1227}
1228
1229static u8
1230mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
1231{
1232	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1233	struct mt7915_dev *dev = phy->dev;
1234	u32 val, sum = 0, n = 0;
1235	int nss, i;
1236
1237	for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
1238		u32 reg = is_mt7915(&dev->mt76) ?
1239			MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
1240			MT_WF_IRPI_NSS_MT7916(idx, nss);
1241
1242		for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1243			val = mt76_rr(dev, reg);
1244			sum += val * nf_power[i];
1245			n += val;
1246		}
1247	}
1248
1249	if (!n)
1250		return 0;
1251
1252	return sum / n;
1253}
1254
1255void mt7915_update_channel(struct mt76_phy *mphy)
1256{
1257	struct mt7915_phy *phy = mphy->priv;
1258	struct mt76_channel_state *state = mphy->chan_state;
1259	int nf;
1260
1261	mt7915_mcu_get_chan_mib_info(phy, false);
1262
1263	nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx);
1264	if (!phy->noise)
1265		phy->noise = nf << 4;
1266	else if (nf)
1267		phy->noise += nf - (phy->noise >> 4);
1268
1269	state->noise = -(phy->noise >> 4);
1270}
1271
1272static bool
1273mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1274{
1275	bool ret;
1276
1277	ret = wait_event_timeout(dev->reset_wait,
1278				 (READ_ONCE(dev->recovery.state) & state),
1279				 MT7915_RESET_TIMEOUT);
1280
1281	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1282	return ret;
1283}
1284
1285static void
1286mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1287{
1288	struct ieee80211_hw *hw = priv;
1289
1290	switch (vif->type) {
1291	case NL80211_IFTYPE_MESH_POINT:
1292	case NL80211_IFTYPE_ADHOC:
1293	case NL80211_IFTYPE_AP:
1294		mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
1295				      BSS_CHANGED_BEACON_ENABLED);
1296		break;
1297	default:
1298		break;
1299	}
1300}
1301
1302static void
1303mt7915_update_beacons(struct mt7915_dev *dev)
1304{
1305	struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
1306
1307	ieee80211_iterate_active_interfaces(dev->mt76.hw,
1308		IEEE80211_IFACE_ITER_RESUME_ALL,
1309		mt7915_update_vif_beacon, dev->mt76.hw);
1310
1311	if (!mphy_ext)
1312		return;
1313
1314	ieee80211_iterate_active_interfaces(mphy_ext->hw,
1315		IEEE80211_IFACE_ITER_RESUME_ALL,
1316		mt7915_update_vif_beacon, mphy_ext->hw);
1317}
1318
1319static int
1320mt7915_mac_restart(struct mt7915_dev *dev)
1321{
1322	struct mt7915_phy *phy2;
1323	struct mt76_phy *ext_phy;
1324	struct mt76_dev *mdev = &dev->mt76;
1325	int i, ret;
1326
1327	ext_phy = dev->mt76.phys[MT_BAND1];
1328	phy2 = ext_phy ? ext_phy->priv : NULL;
1329
1330	if (dev->hif2) {
1331		mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1332		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1333	}
1334
1335	if (dev_is_pci(mdev->dev)) {
1336		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1337		if (dev->hif2) {
1338			if (is_mt7915(mdev))
1339				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1340			else
1341				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0x0);
1342		}
1343	}
1344
1345	set_bit(MT76_RESET, &dev->mphy.state);
1346	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1347	wake_up(&dev->mt76.mcu.wait);
1348	if (ext_phy)
1349		set_bit(MT76_RESET, &ext_phy->state);
 
 
1350
1351	/* lock/unlock all queues to ensure that no tx is pending */
1352	mt76_txq_schedule_all(&dev->mphy);
1353	if (ext_phy)
1354		mt76_txq_schedule_all(ext_phy);
1355
1356	/* disable all tx/rx napi */
1357	mt76_worker_disable(&dev->mt76.tx_worker);
1358	mt76_for_each_q_rx(mdev, i) {
1359		if (mdev->q_rx[i].ndesc)
1360			napi_disable(&dev->mt76.napi[i]);
1361	}
1362	napi_disable(&dev->mt76.tx_napi);
1363
1364	/* token reinit */
1365	mt76_connac2_tx_token_put(&dev->mt76);
1366	idr_init(&dev->mt76.token);
1367
1368	mt7915_dma_reset(dev, true);
1369
1370	local_bh_disable();
1371	mt76_for_each_q_rx(mdev, i) {
1372		if (mdev->q_rx[i].ndesc) {
1373			napi_enable(&dev->mt76.napi[i]);
1374			napi_schedule(&dev->mt76.napi[i]);
1375		}
1376	}
1377	local_bh_enable();
1378	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1379	clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1380
1381	mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1382	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1383
1384	if (dev->hif2) {
1385		mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1386		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1387	}
1388	if (dev_is_pci(mdev->dev)) {
1389		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1390		if (dev->hif2) {
1391			mt76_wr(dev, MT_PCIE_RECOG_ID,
1392				dev->hif2->index | MT_PCIE_RECOG_ID_SEM);
1393			if (is_mt7915(mdev))
1394				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1395			else
1396				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
1397		}
1398	}
1399
1400	/* load firmware */
1401	ret = mt7915_mcu_init_firmware(dev);
1402	if (ret)
1403		goto out;
1404
1405	/* set the necessary init items */
1406	ret = mt7915_mcu_set_eeprom(dev);
1407	if (ret)
1408		goto out;
1409
1410	mt7915_mac_init(dev);
1411	mt7915_init_txpower(&dev->phy);
1412	mt7915_init_txpower(phy2);
1413	ret = mt7915_txbf_init(dev);
1414
1415	if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1416		ret = mt7915_run(dev->mphy.hw);
1417		if (ret)
1418			goto out;
1419	}
1420
1421	if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) {
1422		ret = mt7915_run(ext_phy->hw);
1423		if (ret)
1424			goto out;
1425	}
1426
1427out:
1428	/* reset done */
1429	clear_bit(MT76_RESET, &dev->mphy.state);
1430	if (phy2)
1431		clear_bit(MT76_RESET, &phy2->mt76->state);
1432
1433	local_bh_disable();
1434	napi_enable(&dev->mt76.tx_napi);
1435	napi_schedule(&dev->mt76.tx_napi);
1436	local_bh_enable();
1437
1438	mt76_worker_enable(&dev->mt76.tx_worker);
1439
1440	return ret;
1441}
1442
1443static void
1444mt7915_mac_full_reset(struct mt7915_dev *dev)
1445{
1446	struct mt76_phy *ext_phy;
1447	struct mt7915_phy *phy2;
1448	int i;
1449
1450	ext_phy = dev->mt76.phys[MT_BAND1];
1451	phy2 = ext_phy ? ext_phy->priv : NULL;
1452
1453	dev->recovery.hw_full_reset = true;
1454
1455	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1456	wake_up(&dev->mt76.mcu.wait);
1457	ieee80211_stop_queues(mt76_hw(dev));
1458	if (ext_phy)
1459		ieee80211_stop_queues(ext_phy->hw);
1460
1461	cancel_delayed_work_sync(&dev->mphy.mac_work);
1462	if (ext_phy)
1463		cancel_delayed_work_sync(&ext_phy->mac_work);
1464
1465	mutex_lock(&dev->mt76.mutex);
1466	for (i = 0; i < 10; i++) {
1467		if (!mt7915_mac_restart(dev))
1468			break;
1469	}
 
1470
1471	if (i == 10)
1472		dev_err(dev->mt76.dev, "chip full reset failed\n");
1473
1474	spin_lock_bh(&dev->mt76.sta_poll_lock);
1475	while (!list_empty(&dev->mt76.sta_poll_list))
1476		list_del_init(dev->mt76.sta_poll_list.next);
1477	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1478
1479	memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
1480	dev->mt76.vif_mask = 0;
1481	dev->phy.omac_mask = 0;
1482	if (phy2)
1483		phy2->omac_mask = 0;
1484
1485	i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
1486	dev->mt76.global_wcid.idx = i;
1487	dev->recovery.hw_full_reset = false;
1488
1489	mutex_unlock(&dev->mt76.mutex);
1490
1491	ieee80211_restart_hw(mt76_hw(dev));
1492	if (ext_phy)
1493		ieee80211_restart_hw(ext_phy->hw);
 
 
1494}
1495
1496/* system error recovery */
1497void mt7915_mac_reset_work(struct work_struct *work)
1498{
1499	struct mt7915_phy *phy2;
1500	struct mt76_phy *ext_phy;
1501	struct mt7915_dev *dev;
1502	int i;
1503
1504	dev = container_of(work, struct mt7915_dev, reset_work);
1505	ext_phy = dev->mt76.phys[MT_BAND1];
1506	phy2 = ext_phy ? ext_phy->priv : NULL;
1507
1508	/* chip full reset */
1509	if (dev->recovery.restart) {
1510		/* disable WA/WM WDT */
1511		mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1512			   MT_MCU_CMD_WDT_MASK);
1513
1514		if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1515			dev->recovery.wa_reset_count++;
1516		else
1517			dev->recovery.wm_reset_count++;
1518
1519		mt7915_mac_full_reset(dev);
1520
1521		/* enable mcu irq */
1522		mt7915_irq_enable(dev, MT_INT_MCU_CMD);
1523		mt7915_irq_disable(dev, 0);
1524
1525		/* enable WA/WM WDT */
1526		mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1527
1528		dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1529		dev->recovery.restart = false;
1530		return;
1531	}
1532
1533	/* chip partial reset */
1534	if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1535		return;
1536
 
 
 
 
 
 
1537	ieee80211_stop_queues(mt76_hw(dev));
1538	if (ext_phy)
1539		ieee80211_stop_queues(ext_phy->hw);
1540
1541	set_bit(MT76_RESET, &dev->mphy.state);
1542	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1543	wake_up(&dev->mt76.mcu.wait);
1544	cancel_delayed_work_sync(&dev->mphy.mac_work);
1545	if (phy2) {
1546		set_bit(MT76_RESET, &phy2->mt76->state);
1547		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1548	}
1549
1550	mutex_lock(&dev->mt76.mutex);
1551
1552	mt76_worker_disable(&dev->mt76.tx_worker);
1553	mt76_for_each_q_rx(&dev->mt76, i)
1554		napi_disable(&dev->mt76.napi[i]);
1555	napi_disable(&dev->mt76.tx_napi);
1556
1557
1558	if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1559		mtk_wed_device_stop(&dev->mt76.mmio.wed);
1560
1561	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1562
1563	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1564		mt7915_dma_reset(dev, false);
1565
1566		mt76_connac2_tx_token_put(&dev->mt76);
1567		idr_init(&dev->mt76.token);
1568
1569		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1570		mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1571	}
1572
1573	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1574	mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1575
1576	/* enable DMA Tx/Rx and interrupt */
1577	mt7915_dma_start(dev, false, false);
1578
1579	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1580	clear_bit(MT76_RESET, &dev->mphy.state);
1581	if (phy2)
1582		clear_bit(MT76_RESET, &phy2->mt76->state);
1583
1584	local_bh_disable();
1585	mt76_for_each_q_rx(&dev->mt76, i) {
1586		napi_enable(&dev->mt76.napi[i]);
1587		napi_schedule(&dev->mt76.napi[i]);
1588	}
1589	local_bh_enable();
1590
1591	tasklet_schedule(&dev->mt76.irq_tasklet);
1592
1593	mt76_worker_enable(&dev->mt76.tx_worker);
1594
1595	local_bh_disable();
1596	napi_enable(&dev->mt76.tx_napi);
1597	napi_schedule(&dev->mt76.tx_napi);
1598	local_bh_enable();
1599
1600	ieee80211_wake_queues(mt76_hw(dev));
1601	if (ext_phy)
1602		ieee80211_wake_queues(ext_phy->hw);
1603
1604	mutex_unlock(&dev->mt76.mutex);
1605
1606	mt7915_update_beacons(dev);
1607
1608	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1609				     MT7915_WATCHDOG_TIME);
1610	if (phy2)
1611		ieee80211_queue_delayed_work(ext_phy->hw,
1612					     &phy2->mt76->mac_work,
1613					     MT7915_WATCHDOG_TIME);
1614}
1615
1616/* firmware coredump */
1617void mt7915_mac_dump_work(struct work_struct *work)
1618{
1619	const struct mt7915_mem_region *mem_region;
1620	struct mt7915_crash_data *crash_data;
1621	struct mt7915_dev *dev;
1622	struct mt7915_mem_hdr *hdr;
1623	size_t buf_len;
1624	int i;
1625	u32 num;
1626	u8 *buf;
1627
1628	dev = container_of(work, struct mt7915_dev, dump_work);
1629
1630	mutex_lock(&dev->dump_mutex);
1631
1632	crash_data = mt7915_coredump_new(dev);
1633	if (!crash_data) {
1634		mutex_unlock(&dev->dump_mutex);
1635		goto skip_coredump;
1636	}
1637
1638	mem_region = mt7915_coredump_get_mem_layout(dev, &num);
1639	if (!mem_region || !crash_data->memdump_buf_len) {
1640		mutex_unlock(&dev->dump_mutex);
1641		goto skip_memdump;
1642	}
1643
1644	buf = crash_data->memdump_buf;
1645	buf_len = crash_data->memdump_buf_len;
1646
1647	/* dumping memory content... */
1648	memset(buf, 0, buf_len);
1649	for (i = 0; i < num; i++) {
1650		if (mem_region->len > buf_len) {
1651			dev_warn(dev->mt76.dev, "%s len %lu is too large\n",
1652				 mem_region->name,
1653				 (unsigned long)mem_region->len);
1654			break;
1655		}
1656
1657		/* reserve space for the header */
1658		hdr = (void *)buf;
1659		buf += sizeof(*hdr);
1660		buf_len -= sizeof(*hdr);
1661
1662		mt7915_memcpy_fromio(dev, buf, mem_region->start,
1663				     mem_region->len);
1664
1665		hdr->start = mem_region->start;
1666		hdr->len = mem_region->len;
1667
1668		if (!mem_region->len)
1669			/* note: the header remains, just with zero length */
1670			break;
1671
1672		buf += mem_region->len;
1673		buf_len -= mem_region->len;
1674
1675		mem_region++;
1676	}
1677
1678	mutex_unlock(&dev->dump_mutex);
1679
1680skip_memdump:
1681	mt7915_coredump_submit(dev);
1682skip_coredump:
1683	queue_work(dev->mt76.wq, &dev->reset_work);
1684}
1685
1686void mt7915_reset(struct mt7915_dev *dev)
1687{
1688	if (!dev->recovery.hw_init_done)
1689		return;
1690
1691	if (dev->recovery.hw_full_reset)
1692		return;
1693
1694	/* wm/wa exception: do full recovery */
1695	if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
1696		dev->recovery.restart = true;
1697		dev_info(dev->mt76.dev,
1698			 "%s indicated firmware crash, attempting recovery\n",
1699			 wiphy_name(dev->mt76.hw->wiphy));
1700
1701		mt7915_irq_disable(dev, MT_INT_MCU_CMD);
1702		queue_work(dev->mt76.wq, &dev->dump_work);
1703		return;
1704	}
1705
1706	if ((READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) {
1707		set_bit(MT76_MCU_RESET, &dev->mphy.state);
1708		wake_up(&dev->mt76.mcu.wait);
1709	}
1710
1711	queue_work(dev->mt76.wq, &dev->reset_work);
1712	wake_up(&dev->reset_wait);
1713}
1714
1715void mt7915_mac_update_stats(struct mt7915_phy *phy)
1716{
1717	struct mt76_mib_stats *mib = &phy->mib;
1718	struct mt7915_dev *dev = phy->dev;
1719	int i, aggr0 = 0, aggr1, cnt;
1720	u8 band = phy->mt76->band_idx;
1721	u32 val;
1722
1723	cnt = mt76_rr(dev, MT_MIB_SDR3(band));
1724	mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
1725		FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
1726		FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
1727
1728	cnt = mt76_rr(dev, MT_MIB_SDR4(band));
1729	mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
1730
1731	cnt = mt76_rr(dev, MT_MIB_SDR5(band));
1732	mib->rx_mpdu_cnt += cnt;
1733
1734	cnt = mt76_rr(dev, MT_MIB_SDR6(band));
1735	mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
1736
1737	cnt = mt76_rr(dev, MT_MIB_SDR7(band));
1738	mib->rx_vector_mismatch_cnt +=
1739		FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
1740
1741	cnt = mt76_rr(dev, MT_MIB_SDR8(band));
1742	mib->rx_delimiter_fail_cnt +=
1743		FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
1744
1745	cnt = mt76_rr(dev, MT_MIB_SDR10(band));
1746	mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
1747		FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
1748		FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
1749
1750	cnt = mt76_rr(dev, MT_MIB_SDR11(band));
1751	mib->rx_len_mismatch_cnt +=
1752		FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
1753
1754	cnt = mt76_rr(dev, MT_MIB_SDR12(band));
1755	mib->tx_ampdu_cnt += cnt;
1756
1757	cnt = mt76_rr(dev, MT_MIB_SDR13(band));
1758	mib->tx_stop_q_empty_cnt +=
1759		FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
1760
1761	cnt = mt76_rr(dev, MT_MIB_SDR14(band));
1762	mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
1763		FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
1764		FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
1765
1766	cnt = mt76_rr(dev, MT_MIB_SDR15(band));
1767	mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
1768		FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
1769		FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
1770
1771	cnt = mt76_rr(dev, MT_MIB_SDR16(band));
1772	mib->primary_cca_busy_time +=
1773		FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
1774
1775	cnt = mt76_rr(dev, MT_MIB_SDR17(band));
1776	mib->secondary_cca_busy_time +=
1777		FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
1778
1779	cnt = mt76_rr(dev, MT_MIB_SDR18(band));
1780	mib->primary_energy_detect_time +=
1781		FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
1782
1783	cnt = mt76_rr(dev, MT_MIB_SDR19(band));
1784	mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
1785
1786	cnt = mt76_rr(dev, MT_MIB_SDR20(band));
1787	mib->ofdm_mdrdy_time +=
1788		FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
1789
1790	cnt = mt76_rr(dev, MT_MIB_SDR21(band));
1791	mib->green_mdrdy_time +=
1792		FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
1793
1794	cnt = mt76_rr(dev, MT_MIB_SDR22(band));
1795	mib->rx_ampdu_cnt += cnt;
1796
1797	cnt = mt76_rr(dev, MT_MIB_SDR23(band));
1798	mib->rx_ampdu_bytes_cnt += cnt;
1799
1800	cnt = mt76_rr(dev, MT_MIB_SDR24(band));
1801	mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
1802		FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
1803		FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
1804
1805	cnt = mt76_rr(dev, MT_MIB_SDR25(band));
1806	mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
1807
1808	cnt = mt76_rr(dev, MT_MIB_SDR27(band));
1809	mib->tx_rwp_fail_cnt +=
1810		FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
1811
1812	cnt = mt76_rr(dev, MT_MIB_SDR28(band));
1813	mib->tx_rwp_need_cnt +=
1814		FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
1815
1816	cnt = mt76_rr(dev, MT_MIB_SDR29(band));
1817	mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
1818		FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
1819		FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
1820
1821	cnt = mt76_rr(dev, MT_MIB_SDRVEC(band));
1822	mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
1823		FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
1824		FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
1825
1826	cnt = mt76_rr(dev, MT_MIB_SDR31(band));
1827	mib->rx_ba_cnt += cnt;
1828
1829	cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band));
1830	mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
1831
1832	cnt = mt76_rr(dev, MT_MIB_DR8(band));
1833	mib->tx_mu_mpdu_cnt += cnt;
1834
1835	cnt = mt76_rr(dev, MT_MIB_DR9(band));
1836	mib->tx_mu_acked_mpdu_cnt += cnt;
1837
1838	cnt = mt76_rr(dev, MT_MIB_DR11(band));
1839	mib->tx_su_acked_mpdu_cnt += cnt;
1840
1841	cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band));
1842	mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
1843	mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
1844	mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
1845
1846	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
1847		cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
1848		mib->tx_amsdu[i] += cnt;
1849		mib->tx_amsdu_cnt += cnt;
1850	}
1851
1852	if (is_mt7915(&dev->mt76)) {
1853		for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
1854			val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4)));
1855			mib->ba_miss_cnt +=
1856				FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1857			mib->ack_fail_cnt +=
1858				FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1859
1860			val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4)));
1861			mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1862			mib->rts_retries_cnt +=
1863				FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1864
1865			val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
1866			phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
1867			phy->mt76->aggr_stats[aggr0++] += val >> 16;
1868
1869			val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i));
1870			phy->mt76->aggr_stats[aggr1++] += val & 0xffff;
1871			phy->mt76->aggr_stats[aggr1++] += val >> 16;
1872		}
1873
1874		cnt = mt76_rr(dev, MT_MIB_SDR32(band));
1875		mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1876
1877		cnt = mt76_rr(dev, MT_MIB_SDR33(band));
1878		mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
1879
1880		cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band));
1881		mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
1882		mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
1883
1884		cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band));
1885		mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
1886		mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
1887
1888		cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band));
1889		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
1890		mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
1891		mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
1892		mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
1893	} else {
1894		for (i = 0; i < 2; i++) {
1895			/* rts count */
1896			val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2)));
1897			mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
1898			mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
1899
1900			/* rts retry count */
1901			val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2)));
1902			mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
1903			mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
1904
1905			/* ba miss count */
1906			val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2)));
1907			mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
1908			mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
1909
1910			/* ack fail count */
1911			val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2)));
1912			mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
1913			mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
1914		}
1915
1916		for (i = 0; i < 8; i++) {
1917			val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
1918			phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
1919			phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
1920		}
1921
1922		cnt = mt76_rr(dev, MT_MIB_SDR32(band));
1923		mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1924		mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1925		mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1926		mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1927
1928		cnt = mt76_rr(dev, MT_MIB_BFCR7(band));
1929		mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
1930
1931		cnt = mt76_rr(dev, MT_MIB_BFCR2(band));
1932		mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
1933
1934		cnt = mt76_rr(dev, MT_MIB_BFCR0(band));
1935		mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1936		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1937		mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1938		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1939
1940		cnt = mt76_rr(dev, MT_MIB_BFCR1(band));
1941		mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1942		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1943	}
1944}
1945
1946static void mt7915_mac_severe_check(struct mt7915_phy *phy)
1947{
1948	struct mt7915_dev *dev = phy->dev;
1949	u32 trb;
1950
1951	if (!phy->omac_mask)
1952		return;
1953
1954	/* In rare cases, TRB pointers might be out of sync leads to RMAC
1955	 * stopping Rx, so check status periodically to see if TRB hardware
1956	 * requires minimal recovery.
1957	 */
1958	trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx));
1959
1960	if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
1961	     FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
1962	    (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
1963	     FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
1964	    trb == phy->trb_ts)
1965		mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
1966				   phy->mt76->band_idx);
1967
1968	phy->trb_ts = trb;
1969}
1970
1971void mt7915_mac_sta_rc_work(struct work_struct *work)
1972{
1973	struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1974	struct ieee80211_sta *sta;
1975	struct ieee80211_vif *vif;
1976	struct mt7915_sta *msta;
1977	u32 changed;
1978	LIST_HEAD(list);
1979
1980	spin_lock_bh(&dev->mt76.sta_poll_lock);
1981	list_splice_init(&dev->sta_rc_list, &list);
1982
1983	while (!list_empty(&list)) {
1984		msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1985		list_del_init(&msta->rc_list);
1986		changed = msta->changed;
1987		msta->changed = 0;
1988		spin_unlock_bh(&dev->mt76.sta_poll_lock);
1989
1990		sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1991		vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1992
1993		if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1994			       IEEE80211_RC_NSS_CHANGED |
1995			       IEEE80211_RC_BW_CHANGED))
1996			mt7915_mcu_add_rate_ctrl(dev, vif, sta, true);
1997
1998		if (changed & IEEE80211_RC_SMPS_CHANGED)
1999			mt7915_mcu_add_smps(dev, vif, sta);
2000
2001		spin_lock_bh(&dev->mt76.sta_poll_lock);
2002	}
2003
2004	spin_unlock_bh(&dev->mt76.sta_poll_lock);
2005}
2006
2007void mt7915_mac_work(struct work_struct *work)
2008{
2009	struct mt7915_phy *phy;
2010	struct mt76_phy *mphy;
2011
2012	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2013					       mac_work.work);
2014	phy = mphy->priv;
2015
2016	mutex_lock(&mphy->dev->mutex);
2017
2018	mt76_update_survey(mphy);
2019	if (++mphy->mac_work_count == 5) {
2020		mphy->mac_work_count = 0;
2021
2022		mt7915_mac_update_stats(phy);
2023		mt7915_mac_severe_check(phy);
2024
2025		if (phy->dev->muru_debug)
2026			mt7915_mcu_muru_debug_get(phy);
2027	}
2028
2029	mutex_unlock(&mphy->dev->mutex);
2030
2031	mt76_tx_status_check(mphy->dev, false);
2032
2033	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2034				     MT7915_WATCHDOG_TIME);
2035}
2036
2037static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
2038{
2039	struct mt7915_dev *dev = phy->dev;
2040
2041	if (phy->rdd_state & BIT(0))
2042		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
2043					MT_RX_SEL0, 0);
2044	if (phy->rdd_state & BIT(1))
2045		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
2046					MT_RX_SEL0, 0);
2047}
2048
2049static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
2050{
2051	int err, region;
2052
2053	switch (dev->mt76.region) {
2054	case NL80211_DFS_ETSI:
2055		region = 0;
2056		break;
2057	case NL80211_DFS_JP:
2058		region = 2;
2059		break;
2060	case NL80211_DFS_FCC:
2061	default:
2062		region = 1;
2063		break;
2064	}
2065
2066	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
2067				      MT_RX_SEL0, region);
2068	if (err < 0)
2069		return err;
2070
2071	if (is_mt7915(&dev->mt76)) {
2072		err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
2073					      0, dev->dbdc_support ? 2 : 0);
2074		if (err < 0)
2075			return err;
2076	}
2077
2078	return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
2079				       MT_RX_SEL0, 1);
2080}
2081
2082static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
2083{
2084	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2085	struct mt7915_dev *dev = phy->dev;
2086	int err;
2087
2088	/* start CAC */
2089	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
2090				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2091	if (err < 0)
2092		return err;
2093
2094	err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
2095	if (err < 0)
2096		return err;
2097
2098	phy->rdd_state |= BIT(phy->mt76->band_idx);
2099
2100	if (!is_mt7915(&dev->mt76))
2101		return 0;
2102
2103	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2104	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2105		err = mt7915_dfs_start_rdd(dev, 1);
2106		if (err < 0)
2107			return err;
2108
2109		phy->rdd_state |= BIT(1);
2110	}
2111
2112	return 0;
2113}
2114
2115static int
2116mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
2117{
2118	const struct mt7915_dfs_radar_spec *radar_specs;
2119	struct mt7915_dev *dev = phy->dev;
2120	int err, i;
2121
2122	switch (dev->mt76.region) {
2123	case NL80211_DFS_FCC:
2124		radar_specs = &fcc_radar_specs;
2125		err = mt7915_mcu_set_fcc5_lpn(dev, 8);
2126		if (err < 0)
2127			return err;
2128		break;
2129	case NL80211_DFS_ETSI:
2130		radar_specs = &etsi_radar_specs;
2131		break;
2132	case NL80211_DFS_JP:
2133		radar_specs = &jp_radar_specs;
2134		break;
2135	default:
2136		return -EINVAL;
2137	}
2138
2139	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2140		err = mt7915_mcu_set_radar_th(dev, i,
2141					      &radar_specs->radar_pattern[i]);
2142		if (err < 0)
2143			return err;
2144	}
2145
2146	return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2147}
2148
2149int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
2150{
2151	struct mt7915_dev *dev = phy->dev;
2152	enum mt76_dfs_state dfs_state, prev_state;
2153	int err;
2154
2155	prev_state = phy->mt76->dfs_state;
2156	dfs_state = mt76_phy_dfs_state(phy->mt76);
2157
2158	if (prev_state == dfs_state)
2159		return 0;
2160
2161	if (prev_state == MT_DFS_STATE_UNKNOWN)
2162		mt7915_dfs_stop_radar_detector(phy);
2163
2164	if (dfs_state == MT_DFS_STATE_DISABLED)
2165		goto stop;
2166
2167	if (prev_state <= MT_DFS_STATE_DISABLED) {
2168		err = mt7915_dfs_init_radar_specs(phy);
2169		if (err < 0)
2170			return err;
2171
2172		err = mt7915_dfs_start_radar_detector(phy);
2173		if (err < 0)
2174			return err;
2175
2176		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2177	}
2178
2179	if (dfs_state == MT_DFS_STATE_CAC)
2180		return 0;
2181
2182	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
2183				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2184	if (err < 0) {
2185		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2186		return err;
2187	}
2188
2189	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2190	return 0;
2191
2192stop:
2193	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
2194				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2195	if (err < 0)
2196		return err;
2197
2198	if (is_mt7915(&dev->mt76)) {
2199		err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
2200					      phy->mt76->band_idx, 0,
2201					      dev->dbdc_support ? 2 : 0);
2202		if (err < 0)
2203			return err;
2204	}
2205
2206	mt7915_dfs_stop_radar_detector(phy);
2207	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2208
2209	return 0;
2210}
2211
2212static int
2213mt7915_mac_twt_duration_align(int duration)
2214{
2215	return duration << 8;
2216}
2217
2218static u64
2219mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev,
2220			      struct mt7915_twt_flow *flow)
2221{
2222	struct mt7915_twt_flow *iter, *iter_next;
2223	u32 duration = flow->duration << 8;
2224	u64 start_tsf;
2225
2226	iter = list_first_entry_or_null(&dev->twt_list,
2227					struct mt7915_twt_flow, list);
2228	if (!iter || !iter->sched || iter->start_tsf > duration) {
2229		/* add flow as first entry in the list */
2230		list_add(&flow->list, &dev->twt_list);
2231		return 0;
2232	}
2233
2234	list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2235		start_tsf = iter->start_tsf +
2236			    mt7915_mac_twt_duration_align(iter->duration);
2237		if (list_is_last(&iter->list, &dev->twt_list))
2238			break;
2239
2240		if (!iter_next->sched ||
2241		    iter_next->start_tsf > start_tsf + duration) {
2242			list_add(&flow->list, &iter->list);
2243			goto out;
2244		}
2245	}
2246
2247	/* add flow as last entry in the list */
2248	list_add_tail(&flow->list, &dev->twt_list);
2249out:
2250	return start_tsf;
2251}
2252
2253static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2254{
2255	struct ieee80211_twt_params *twt_agrt;
2256	u64 interval, duration;
2257	u16 mantissa;
2258	u8 exp;
2259
2260	/* only individual agreement supported */
2261	if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2262		return -EOPNOTSUPP;
2263
2264	/* only 256us unit supported */
2265	if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2266		return -EOPNOTSUPP;
2267
2268	twt_agrt = (struct ieee80211_twt_params *)twt->params;
2269
2270	/* explicit agreement not supported */
2271	if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2272		return -EOPNOTSUPP;
2273
2274	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2275			le16_to_cpu(twt_agrt->req_type));
2276	mantissa = le16_to_cpu(twt_agrt->mantissa);
2277	duration = twt_agrt->min_twt_dur << 8;
2278
2279	interval = (u64)mantissa << exp;
2280	if (interval < duration)
2281		return -EOPNOTSUPP;
2282
2283	return 0;
2284}
2285
2286static bool
2287mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
2288			   struct ieee80211_twt_params *twt_agrt)
2289{
2290	u16 type = le16_to_cpu(twt_agrt->req_type);
2291	u8 exp;
2292	int i;
2293
2294	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2295	for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
2296		struct mt7915_twt_flow *f;
2297
2298		if (!(msta->twt.flowid_mask & BIT(i)))
2299			continue;
2300
2301		f = &msta->twt.flow[i];
2302		if (f->duration == twt_agrt->min_twt_dur &&
2303		    f->mantissa == twt_agrt->mantissa &&
2304		    f->exp == exp &&
2305		    f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2306		    f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2307		    f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2308			return true;
2309	}
2310
2311	return false;
2312}
2313
2314void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
2315			      struct ieee80211_sta *sta,
2316			      struct ieee80211_twt_setup *twt)
2317{
2318	enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2319	struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
2320	struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2321	u16 req_type = le16_to_cpu(twt_agrt->req_type);
2322	enum ieee80211_twt_setup_cmd sta_setup_cmd;
2323	struct mt7915_dev *dev = mt7915_hw_dev(hw);
2324	struct mt7915_twt_flow *flow;
2325	int flowid, table_id;
2326	u8 exp;
2327
2328	if (mt7915_mac_check_twt_req(twt))
2329		goto out;
2330
2331	mutex_lock(&dev->mt76.mutex);
2332
2333	if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT)
2334		goto unlock;
2335
2336	if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2337		goto unlock;
2338
2339	if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
2340		setup_cmd = TWT_SETUP_CMD_DICTATE;
2341		twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
2342		goto unlock;
2343	}
2344
2345	flowid = ffs(~msta->twt.flowid_mask) - 1;
2346	twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2347	twt_agrt->req_type |= le16_encode_bits(flowid,
2348					       IEEE80211_TWT_REQTYPE_FLOWID);
2349
2350	table_id = ffs(~dev->twt.table_mask) - 1;
2351	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2352	sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2353
2354	if (mt7915_mac_twt_param_equal(msta, twt_agrt))
2355		goto unlock;
2356
2357	flow = &msta->twt.flow[flowid];
2358	memset(flow, 0, sizeof(*flow));
2359	INIT_LIST_HEAD(&flow->list);
2360	flow->wcid = msta->wcid.idx;
2361	flow->table_id = table_id;
2362	flow->id = flowid;
2363	flow->duration = twt_agrt->min_twt_dur;
2364	flow->mantissa = twt_agrt->mantissa;
2365	flow->exp = exp;
2366	flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2367	flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2368	flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2369
2370	if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2371	    sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2372		u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2373		u64 flow_tsf, curr_tsf;
2374		u32 rem;
2375
2376		flow->sched = true;
2377		flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow);
2378		curr_tsf = __mt7915_get_tsf(hw, msta->vif);
2379		div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2380		flow_tsf = curr_tsf + interval - rem;
2381		twt_agrt->twt = cpu_to_le64(flow_tsf);
2382	} else {
2383		list_add_tail(&flow->list, &dev->twt_list);
2384	}
2385	flow->tsf = le64_to_cpu(twt_agrt->twt);
2386
2387	if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2388		goto unlock;
2389
2390	setup_cmd = TWT_SETUP_CMD_ACCEPT;
2391	dev->twt.table_mask |= BIT(table_id);
2392	msta->twt.flowid_mask |= BIT(flowid);
2393	dev->twt.n_agrt++;
2394
2395unlock:
2396	mutex_unlock(&dev->mt76.mutex);
2397out:
2398	twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2399	twt_agrt->req_type |=
2400		le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2401	twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2402		       (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2403}
2404
2405void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
2406				  struct mt7915_sta *msta,
2407				  u8 flowid)
2408{
2409	struct mt7915_twt_flow *flow;
2410
2411	lockdep_assert_held(&dev->mt76.mutex);
2412
2413	if (flowid >= ARRAY_SIZE(msta->twt.flow))
2414		return;
2415
2416	if (!(msta->twt.flowid_mask & BIT(flowid)))
2417		return;
2418
2419	flow = &msta->twt.flow[flowid];
2420	if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow,
2421				       MCU_TWT_AGRT_DELETE))
2422		return;
2423
2424	list_del_init(&flow->list);
2425	msta->twt.flowid_mask &= ~BIT(flowid);
2426	dev->twt.table_mask &= ~BIT(flow->table_id);
2427	dev->twt.n_agrt--;
2428}