Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2022 Realtek Corporation
3 */
4
5#include "coex.h"
6#include "debug.h"
7#include "mac.h"
8#include "phy.h"
9#include "reg.h"
10#include "rtw8852b.h"
11#include "rtw8852b_rfk.h"
12#include "rtw8852b_rfk_table.h"
13#include "rtw8852b_table.h"
14
15#define RTW8852B_RXDCK_VER 0x1
16#define RTW8852B_IQK_VER 0x2a
17#define RTW8852B_IQK_SS 2
18#define RTW8852B_RXK_GROUP_NR 4
19#define RTW8852B_TSSI_PATH_NR 2
20#define RTW8852B_RF_REL_VERSION 34
21#define RTW8852B_DPK_VER 0x0d
22#define RTW8852B_DPK_RF_PATH 2
23#define RTW8852B_DPK_KIP_REG_NUM 2
24
25#define _TSSI_DE_MASK GENMASK(21, 12)
26#define ADDC_T_AVG 100
27#define DPK_TXAGC_LOWER 0x2e
28#define DPK_TXAGC_UPPER 0x3f
29#define DPK_TXAGC_INVAL 0xff
30#define RFREG_MASKRXBB 0x003e0
31#define RFREG_MASKMODE 0xf0000
32
33enum rtw8852b_dpk_id {
34 LBK_RXIQK = 0x06,
35 SYNC = 0x10,
36 MDPK_IDL = 0x11,
37 MDPK_MPA = 0x12,
38 GAIN_LOSS = 0x13,
39 GAIN_CAL = 0x14,
40 DPK_RXAGC = 0x15,
41 KIP_PRESET = 0x16,
42 KIP_RESTORE = 0x17,
43 DPK_TXAGC = 0x19,
44 D_KIP_PRESET = 0x28,
45 D_TXAGC = 0x29,
46 D_RXAGC = 0x2a,
47 D_SYNC = 0x2b,
48 D_GAIN_LOSS = 0x2c,
49 D_MDPK_IDL = 0x2d,
50 D_GAIN_NORM = 0x2f,
51 D_KIP_THERMAL = 0x30,
52 D_KIP_RESTORE = 0x31
53};
54
55enum dpk_agc_step {
56 DPK_AGC_STEP_SYNC_DGAIN,
57 DPK_AGC_STEP_GAIN_ADJ,
58 DPK_AGC_STEP_GAIN_LOSS_IDX,
59 DPK_AGC_STEP_GL_GT_CRITERION,
60 DPK_AGC_STEP_GL_LT_CRITERION,
61 DPK_AGC_STEP_SET_TX_GAIN,
62};
63
64enum rtw8852b_iqk_type {
65 ID_TXAGC = 0x0,
66 ID_FLOK_COARSE = 0x1,
67 ID_FLOK_FINE = 0x2,
68 ID_TXK = 0x3,
69 ID_RXAGC = 0x4,
70 ID_RXK = 0x5,
71 ID_NBTXK = 0x6,
72 ID_NBRXK = 0x7,
73 ID_FLOK_VBUFFER = 0x8,
74 ID_A_FLOK_COARSE = 0x9,
75 ID_G_FLOK_COARSE = 0xa,
76 ID_A_FLOK_FINE = 0xb,
77 ID_G_FLOK_FINE = 0xc,
78 ID_IQK_RESTORE = 0x10,
79};
80
81static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820};
82static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18};
83static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = {
84 {0x5634, 0x5630, 0x5630, 0x5630},
85 {0x7634, 0x7630, 0x7630, 0x7630} };
86static const u32 _tssi_cw_default_mask[4] = {
87 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
88static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858};
89static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860};
90static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838};
91static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840};
92static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848};
93static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850};
94static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828};
95static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830};
96static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352};
97static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f};
98static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0};
99static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360};
100static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f};
101static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1};
102static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
103static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6};
104static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
105static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b};
106static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
107static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6};
108static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
109static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24};
110
111static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800};
112static const u32 rtw8852b_backup_rf_regs[] = {
113 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005
114};
115
116#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs)
117#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs)
118
119static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = {
120 {0x20fc, 0xffff0000, 0x0303},
121 {0x5864, 0x18000000, 0x3},
122 {0x7864, 0x18000000, 0x3},
123 {0x12b8, 0x40000000, 0x1},
124 {0x32b8, 0x40000000, 0x1},
125 {0x030c, 0xff000000, 0x13},
126 {0x032c, 0xffff0000, 0x0041},
127 {0x12b8, 0x10000000, 0x1},
128 {0x58c8, 0x01000000, 0x1},
129 {0x78c8, 0x01000000, 0x1},
130 {0x5864, 0xc0000000, 0x3},
131 {0x7864, 0xc0000000, 0x3},
132 {0x2008, 0x01ffffff, 0x1ffffff},
133 {0x0c1c, 0x00000004, 0x1},
134 {0x0700, 0x08000000, 0x1},
135 {0x0c70, 0x000003ff, 0x3ff},
136 {0x0c60, 0x00000003, 0x3},
137 {0x0c6c, 0x00000001, 0x1},
138 {0x58ac, 0x08000000, 0x1},
139 {0x78ac, 0x08000000, 0x1},
140 {0x0c3c, 0x00000200, 0x1},
141 {0x2344, 0x80000000, 0x1},
142 {0x4490, 0x80000000, 0x1},
143 {0x12a0, 0x00007000, 0x7},
144 {0x12a0, 0x00008000, 0x1},
145 {0x12a0, 0x00070000, 0x3},
146 {0x12a0, 0x00080000, 0x1},
147 {0x32a0, 0x00070000, 0x3},
148 {0x32a0, 0x00080000, 0x1},
149 {0x0700, 0x01000000, 0x1},
150 {0x0700, 0x06000000, 0x2},
151 {0x20fc, 0xffff0000, 0x3333},
152};
153
154static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = {
155 {0x20fc, 0xffff0000, 0x0303},
156 {0x12b8, 0x40000000, 0x0},
157 {0x32b8, 0x40000000, 0x0},
158 {0x5864, 0xc0000000, 0x0},
159 {0x7864, 0xc0000000, 0x0},
160 {0x2008, 0x01ffffff, 0x0000000},
161 {0x0c1c, 0x00000004, 0x0},
162 {0x0700, 0x08000000, 0x0},
163 {0x0c70, 0x0000001f, 0x03},
164 {0x0c70, 0x000003e0, 0x03},
165 {0x12a0, 0x000ff000, 0x00},
166 {0x32a0, 0x000ff000, 0x00},
167 {0x0700, 0x07000000, 0x0},
168 {0x20fc, 0xffff0000, 0x0000},
169 {0x58c8, 0x01000000, 0x0},
170 {0x78c8, 0x01000000, 0x0},
171 {0x0c3c, 0x00000200, 0x0},
172 {0x2344, 0x80000000, 0x0},
173};
174
175static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
176{
177 u32 i;
178
179 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
180 backup_bb_reg_val[i] =
181 rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
182 MASKDWORD);
183 rtw89_debug(rtwdev, RTW89_DBG_RFK,
184 "[RFK]backup bb reg : %x, value =%x\n",
185 rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
186 }
187}
188
189static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
190 u8 rf_path)
191{
192 u32 i;
193
194 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
195 backup_rf_reg_val[i] =
196 rtw89_read_rf(rtwdev, rf_path,
197 rtw8852b_backup_rf_regs[i], RFREG_MASK);
198 rtw89_debug(rtwdev, RTW89_DBG_RFK,
199 "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
200 rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
201 }
202}
203
204static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
205 const u32 backup_bb_reg_val[])
206{
207 u32 i;
208
209 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
210 rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
211 MASKDWORD, backup_bb_reg_val[i]);
212 rtw89_debug(rtwdev, RTW89_DBG_RFK,
213 "[RFK]restore bb reg : %x, value =%x\n",
214 rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
215 }
216}
217
218static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
219 const u32 backup_rf_reg_val[], u8 rf_path)
220{
221 u32 i;
222
223 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
224 rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i],
225 RFREG_MASK, backup_rf_reg_val[i]);
226
227 rtw89_debug(rtwdev, RTW89_DBG_RFK,
228 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
229 rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
230 }
231}
232
233static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
234 enum rtw89_rf_path path, bool is_bybb)
235{
236 if (is_bybb)
237 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
238 else
239 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
240}
241
242static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
243 enum rtw89_rf_path path, bool is_bybb)
244{
245 if (is_bybb)
246 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
247 else
248 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
249}
250
251static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
252{
253 bool fail = true;
254 u32 val;
255 int ret;
256
257 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
258 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
259 if (ret)
260 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
261
262 udelay(200);
263
264 if (!ret)
265 fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
266 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
267
268 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
269 val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
270 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val);
271
272 return fail;
273}
274
275static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
276{
277 u8 val;
278
279 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
280 rtwdev->dbcc_en, phy_idx);
281
282 if (!rtwdev->dbcc_en) {
283 val = RF_AB;
284 } else {
285 if (phy_idx == RTW89_PHY_0)
286 val = RF_A;
287 else
288 val = RF_B;
289 }
290 return val;
291}
292
293static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
294 enum rtw89_rf_path path)
295{
296 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
297 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
298 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
299 mdelay(1);
300}
301
302static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
303{
304 u8 path, dck_tune;
305 u32 rf_reg5;
306
307 rtw89_debug(rtwdev, RTW89_DBG_RFK,
308 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
309 RTW8852B_RXDCK_VER, rtwdev->hal.cv);
310
311 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
312 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
313 dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
314
315 if (rtwdev->is_tssi_mode[path])
316 rtw89_phy_write32_mask(rtwdev,
317 R_P0_TSSI_TRK + (path << 13),
318 B_P0_TSSI_TRK_EN, 0x1);
319
320 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
321 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
322 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
323 _set_rx_dck(rtwdev, phy, path);
324 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
325 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
326
327 if (rtwdev->is_tssi_mode[path])
328 rtw89_phy_write32_mask(rtwdev,
329 R_P0_TSSI_TRK + (path << 13),
330 B_P0_TSSI_TRK_EN, 0x0);
331 }
332}
333
334static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
335{
336 u32 rf_reg5;
337 u32 rck_val;
338 u32 val;
339 int ret;
340
341 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
342
343 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
344
345 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
346 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
347
348 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
349 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
350
351 /* RCK trigger */
352 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
353
354 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
355 false, rtwdev, path, RR_RCKS, BIT(3));
356
357 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
358
359 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
360 rck_val, ret);
361
362 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
363 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
364
365 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
366 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
367}
368
369static void _afe_init(struct rtw89_dev *rtwdev)
370{
371 rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf);
372
373 rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl);
374}
375
376static void _drck(struct rtw89_dev *rtwdev)
377{
378 u32 rck_d;
379 u32 val;
380 int ret;
381
382 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
383 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1);
384
385 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
386 false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE);
387 if (ret)
388 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
389
390 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0);
391 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
392 udelay(1);
393 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
394 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS);
395 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0);
396 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d);
397
398 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n",
399 rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD));
400}
401
402static void _addck_backup(struct rtw89_dev *rtwdev)
403{
404 struct rtw89_dack_info *dack = &rtwdev->dack;
405
406 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
407 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
408 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
409
410 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
411 dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
412 dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
413}
414
415static void _addck_reload(struct rtw89_dev *rtwdev)
416{
417 struct rtw89_dack_info *dack = &rtwdev->dack;
418
419 /* S0 */
420 rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]);
421 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6);
422 rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f);
423 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3);
424
425 /* S1 */
426 rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]);
427 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6);
428 rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f);
429 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3);
430}
431
432static void _dack_backup_s0(struct rtw89_dev *rtwdev)
433{
434 struct rtw89_dack_info *dack = &rtwdev->dack;
435 u8 i;
436
437 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
438
439 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
440 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
441 dack->msbk_d[0][0][i] =
442 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
443 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
444 dack->msbk_d[0][1][i] =
445 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
446 }
447
448 dack->biask_d[0][0] =
449 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
450 dack->biask_d[0][1] =
451 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
452
453 dack->dadck_d[0][0] =
454 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
455 dack->dadck_d[0][1] =
456 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
457}
458
459static void _dack_backup_s1(struct rtw89_dev *rtwdev)
460{
461 struct rtw89_dack_info *dack = &rtwdev->dack;
462 u8 i;
463
464 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
465
466 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
467 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
468 dack->msbk_d[1][0][i] =
469 rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
470 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
471 dack->msbk_d[1][1][i] =
472 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
473 }
474
475 dack->biask_d[1][0] =
476 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
477 dack->biask_d[1][1] =
478 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
479
480 dack->dadck_d[1][0] =
481 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
482 dack->dadck_d[1][1] =
483 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
484}
485
486static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
487{
488 s32 dc_re = 0, dc_im = 0;
489 u32 tmp;
490 u32 i;
491
492 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
493 &rtw8852b_check_addc_defs_a_tbl,
494 &rtw8852b_check_addc_defs_b_tbl);
495
496 for (i = 0; i < ADDC_T_AVG; i++) {
497 tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
498 dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
499 dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
500 }
501
502 dc_re /= ADDC_T_AVG;
503 dc_im /= ADDC_T_AVG;
504
505 rtw89_debug(rtwdev, RTW89_DBG_RFK,
506 "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
507}
508
509static void _addck(struct rtw89_dev *rtwdev)
510{
511 struct rtw89_dack_info *dack = &rtwdev->dack;
512 u32 val;
513 int ret;
514
515 /* S0 */
516 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
517 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0);
518 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
519 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
520 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
521 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
522 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
523 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
524 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1);
525 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
526
527 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
528 _check_addc(rtwdev, RF_PATH_A);
529
530 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
531 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
532 udelay(1);
533 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
534
535 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
536 false, rtwdev, R_ADDCKR0, BIT(0));
537 if (ret) {
538 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
539 dack->addck_timeout[0] = true;
540 }
541 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
542 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
543 _check_addc(rtwdev, RF_PATH_A);
544
545 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0);
546 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
547 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
548 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
549 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
550
551 /* S1 */
552 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
553 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
554 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
555 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
556 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
557 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
558 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1);
559 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
560
561 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
562 _check_addc(rtwdev, RF_PATH_B);
563
564 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
565 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
566 udelay(1);
567 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
568
569 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
570 false, rtwdev, R_ADDCKR1, BIT(0));
571 if (ret) {
572 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
573 dack->addck_timeout[1] = true;
574 }
575 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
576 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
577 _check_addc(rtwdev, RF_PATH_B);
578
579 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0);
580 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
581 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
582 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
583 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
584}
585
586static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
587{
588 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
589 &rtw8852b_check_dadc_en_defs_a_tbl,
590 &rtw8852b_check_dadc_en_defs_b_tbl);
591
592 _check_addc(rtwdev, path);
593
594 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
595 &rtw8852b_check_dadc_dis_defs_a_tbl,
596 &rtw8852b_check_dadc_dis_defs_b_tbl);
597}
598
599static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1)
600{
601 if (part1) {
602 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
603 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0)
604 return false;
605 } else {
606 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
607 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
608 return false;
609 }
610
611 return true;
612}
613
614static void _dack_s0(struct rtw89_dev *rtwdev)
615{
616 struct rtw89_dack_info *dack = &rtwdev->dack;
617 bool done;
618 int ret;
619
620 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl);
621
622 ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
623 false, rtwdev, true);
624 if (ret) {
625 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
626 dack->msbk_timeout[0] = true;
627 }
628 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
629
630 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl);
631
632 ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
633 false, rtwdev, false);
634 if (ret) {
635 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
636 dack->dadck_timeout[0] = true;
637 }
638 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
639
640 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl);
641
642 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
643
644 _dack_backup_s0(rtwdev);
645 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
646}
647
648static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1)
649{
650 if (part1) {
651 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 &&
652 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0)
653 return false;
654 } else {
655 if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 &&
656 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0)
657 return false;
658 }
659
660 return true;
661}
662
663static void _dack_s1(struct rtw89_dev *rtwdev)
664{
665 struct rtw89_dack_info *dack = &rtwdev->dack;
666 bool done;
667 int ret;
668
669 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl);
670
671 ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
672 false, rtwdev, true);
673 if (ret) {
674 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
675 dack->msbk_timeout[1] = true;
676 }
677 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
678
679 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl);
680
681 ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
682 false, rtwdev, false);
683 if (ret) {
684 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
685 dack->dadck_timeout[1] = true;
686 }
687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
688
689 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl);
690
691 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
692
693 _check_dadc(rtwdev, RF_PATH_B);
694 _dack_backup_s1(rtwdev);
695 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
696}
697
698static void _dack(struct rtw89_dev *rtwdev)
699{
700 _dack_s0(rtwdev);
701 _dack_s1(rtwdev);
702}
703
704static void _dack_dump(struct rtw89_dev *rtwdev)
705{
706 struct rtw89_dack_info *dack = &rtwdev->dack;
707 u8 i;
708 u8 t;
709
710 rtw89_debug(rtwdev, RTW89_DBG_RFK,
711 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
712 dack->addck_d[0][0], dack->addck_d[0][1]);
713 rtw89_debug(rtwdev, RTW89_DBG_RFK,
714 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
715 dack->addck_d[1][0], dack->addck_d[1][1]);
716 rtw89_debug(rtwdev, RTW89_DBG_RFK,
717 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
718 dack->dadck_d[0][0], dack->dadck_d[0][1]);
719 rtw89_debug(rtwdev, RTW89_DBG_RFK,
720 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
721 dack->dadck_d[1][0], dack->dadck_d[1][1]);
722 rtw89_debug(rtwdev, RTW89_DBG_RFK,
723 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
724 dack->biask_d[0][0], dack->biask_d[0][1]);
725 rtw89_debug(rtwdev, RTW89_DBG_RFK,
726 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
727 dack->biask_d[1][0], dack->biask_d[1][1]);
728
729 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
730 for (i = 0; i < 0x10; i++) {
731 t = dack->msbk_d[0][0][i];
732 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
733 }
734
735 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
736 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
737 t = dack->msbk_d[0][1][i];
738 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
739 }
740
741 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
742 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
743 t = dack->msbk_d[1][0][i];
744 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
745 }
746
747 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
748 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
749 t = dack->msbk_d[1][1][i];
750 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
751 }
752}
753
754static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
755{
756 struct rtw89_dack_info *dack = &rtwdev->dack;
757 u32 rf0_0, rf1_0;
758
759 dack->dack_done = false;
760 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n");
761 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
762
763 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
764 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
765 _afe_init(rtwdev);
766 _drck(rtwdev);
767
768 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
769 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
770 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
771 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
772 _addck(rtwdev);
773 _addck_backup(rtwdev);
774 _addck_reload(rtwdev);
775
776 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
777 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
778 _dack(rtwdev);
779 _dack_dump(rtwdev);
780 dack->dack_done = true;
781
782 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
783 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
784 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
785 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
786 dack->dack_cnt++;
787 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
788}
789
790static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
791{
792 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
793 u32 tmp;
794
795 switch (iqk_info->iqk_band[path]) {
796 case RTW89_BAND_2G:
797 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
798 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
799 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
800 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
801 break;
802 case RTW89_BAND_5G:
803 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
804 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
805 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
806 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
807 break;
808 default:
809 break;
810 }
811}
812
813static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
814 u8 path, u8 ktype)
815{
816 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
817 u32 iqk_cmd;
818 bool fail;
819
820 switch (ktype) {
821 case ID_FLOK_COARSE:
822 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
823 iqk_cmd = 0x108 | (1 << (4 + path));
824 break;
825 case ID_FLOK_FINE:
826 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
827 iqk_cmd = 0x208 | (1 << (4 + path));
828 break;
829 case ID_FLOK_VBUFFER:
830 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
831 iqk_cmd = 0x308 | (1 << (4 + path));
832 break;
833 case ID_TXK:
834 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
835 iqk_cmd = 0x008 | (1 << (path + 4)) |
836 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
837 break;
838 case ID_RXAGC:
839 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
840 break;
841 case ID_RXK:
842 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
843 iqk_cmd = 0x008 | (1 << (path + 4)) |
844 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
845 break;
846 case ID_NBTXK:
847 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
848 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
849 iqk_cmd = 0x308 | (1 << (4 + path));
850 break;
851 case ID_NBRXK:
852 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
853 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
854 iqk_cmd = 0x608 | (1 << (4 + path));
855 break;
856 default:
857 return false;
858 }
859
860 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
861 udelay(1);
862 fail = _iqk_check_cal(rtwdev, path);
863 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
864
865 return fail;
866}
867
868static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
869 u8 path)
870{
871 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
872 bool kfail = false;
873 bool fail;
874 u8 gp;
875
876 for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
877 switch (iqk_info->iqk_band[path]) {
878 case RTW89_BAND_2G:
879 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
880 _g_idxrxgain[gp]);
881 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
882 _g_idxattc2[gp]);
883 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
884 _g_idxattc1[gp]);
885 break;
886 case RTW89_BAND_5G:
887 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
888 _a_idxrxgain[gp]);
889 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
890 _a_idxattc2[gp]);
891 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
892 _a_idxattc1[gp]);
893 break;
894 default:
895 break;
896 }
897
898 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
899 B_CFIR_LUT_SEL, 0x1);
900 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
901 B_CFIR_LUT_SET, 0x0);
902 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
903 B_CFIR_LUT_GP_V1, gp);
904 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
905 rtw89_phy_write32_mask(rtwdev, R_IQKINF,
906 BIT(16 + gp + path * 4), fail);
907 kfail |= fail;
908 }
909 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
910
911 if (kfail) {
912 iqk_info->nb_rxcfir[path] = 0x40000002;
913 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
914 B_IQK_RES_RXCFIR, 0x0);
915 iqk_info->is_wb_rxiqk[path] = false;
916 } else {
917 iqk_info->nb_rxcfir[path] = 0x40000000;
918 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
919 B_IQK_RES_RXCFIR, 0x5);
920 iqk_info->is_wb_rxiqk[path] = true;
921 }
922
923 return kfail;
924}
925
926static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
927 u8 path)
928{
929 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
930 const u8 gp = 0x3;
931 bool kfail = false;
932 bool fail;
933
934 switch (iqk_info->iqk_band[path]) {
935 case RTW89_BAND_2G:
936 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
937 _g_idxrxgain[gp]);
938 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
939 _g_idxattc2[gp]);
940 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
941 _g_idxattc1[gp]);
942 break;
943 case RTW89_BAND_5G:
944 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
945 _a_idxrxgain[gp]);
946 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
947 _a_idxattc2[gp]);
948 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
949 _a_idxattc1[gp]);
950 break;
951 default:
952 break;
953 }
954
955 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
956 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
957 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
958 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
959 udelay(1);
960
961 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
962 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
963 kfail |= fail;
964 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
965
966 if (!kfail)
967 iqk_info->nb_rxcfir[path] =
968 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2;
969 else
970 iqk_info->nb_rxcfir[path] = 0x40000002;
971
972 return kfail;
973}
974
975static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
976{
977 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
978
979 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
980 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
981 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
982 udelay(1);
983 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
984 udelay(1);
985 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
986 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
987 udelay(1);
988 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
989 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2);
990 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
991 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2);
992 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
993 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
994 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
995 } else {
996 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
997 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
998 udelay(1);
999 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1000 udelay(1);
1001 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1002 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1003 udelay(1);
1004 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1005 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1);
1006 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
1007 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1);
1008 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
1009 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1010 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0);
1011 }
1012}
1013
1014static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1015{
1016 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1017 bool kfail = false;
1018 bool fail;
1019 u8 gp;
1020
1021 for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
1022 switch (iqk_info->iqk_band[path]) {
1023 case RTW89_BAND_2G:
1024 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1025 _g_power_range[gp]);
1026 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1027 _g_track_range[gp]);
1028 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1029 _g_gain_bb[gp]);
1030 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1031 MASKDWORD, _g_itqt[gp]);
1032 break;
1033 case RTW89_BAND_5G:
1034 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1035 _a_power_range[gp]);
1036 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1037 _a_track_range[gp]);
1038 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1039 _a_gain_bb[gp]);
1040 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1041 MASKDWORD, _a_itqt[gp]);
1042 break;
1043 default:
1044 break;
1045 }
1046
1047 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1048 B_CFIR_LUT_SEL, 0x1);
1049 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1050 B_CFIR_LUT_SET, 0x1);
1051 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1052 B_CFIR_LUT_G2, 0x0);
1053 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1054 B_CFIR_LUT_GP, gp);
1055 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1056 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1057 rtw89_phy_write32_mask(rtwdev, R_IQKINF,
1058 BIT(8 + gp + path * 4), fail);
1059 kfail |= fail;
1060 }
1061
1062 if (kfail) {
1063 iqk_info->nb_txcfir[path] = 0x40000002;
1064 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1065 B_IQK_RES_TXCFIR, 0x0);
1066 iqk_info->is_wb_txiqk[path] = false;
1067 } else {
1068 iqk_info->nb_txcfir[path] = 0x40000000;
1069 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1070 B_IQK_RES_TXCFIR, 0x5);
1071 iqk_info->is_wb_txiqk[path] = true;
1072 }
1073
1074 return kfail;
1075}
1076
1077static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1078{
1079 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1080 bool kfail;
1081 u8 gp = 0x3;
1082
1083 switch (iqk_info->iqk_band[path]) {
1084 case RTW89_BAND_2G:
1085 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1086 _g_power_range[gp]);
1087 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1088 _g_track_range[gp]);
1089 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1090 _g_gain_bb[gp]);
1091 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1092 MASKDWORD, _g_itqt[gp]);
1093 break;
1094 case RTW89_BAND_5G:
1095 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1096 _a_power_range[gp]);
1097 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1098 _a_track_range[gp]);
1099 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1100 _a_gain_bb[gp]);
1101 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1102 MASKDWORD, _a_itqt[gp]);
1103 break;
1104 default:
1105 break;
1106 }
1107
1108 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1109 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1110 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1111 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
1112 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1113 kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1114
1115 if (!kfail)
1116 iqk_info->nb_txcfir[path] =
1117 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1118 MASKDWORD) | 0x2;
1119 else
1120 iqk_info->nb_txcfir[path] = 0x40000002;
1121
1122 return kfail;
1123}
1124
1125static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1126{
1127 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1128
1129 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1130 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1131
1132 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1133 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1134 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1135 else
1136 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1137 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1138 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1139 rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1);
1140
1141 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path,
1142 rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK));
1143}
1144
1145static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1146{
1147 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1148 bool is_fail1, is_fail2;
1149 u32 vbuff_i;
1150 u32 vbuff_q;
1151 u32 core_i;
1152 u32 core_q;
1153 u32 tmp;
1154 u8 ch;
1155
1156 tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1157 core_i = FIELD_GET(RR_TXMO_COI, tmp);
1158 core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1159 ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR;
1160
1161 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1162 is_fail1 = true;
1163 else
1164 is_fail1 = false;
1165
1166 iqk_info->lok_idac[ch][path] = tmp;
1167
1168 tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1169 vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp);
1170 vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp);
1171
1172 if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1173 is_fail2 = true;
1174 else
1175 is_fail2 = false;
1176
1177 iqk_info->lok_vbuf[ch][path] = tmp;
1178
1179 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1180 "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path,
1181 iqk_info->lok_idac[ch][path]);
1182 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1183 "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path,
1184 iqk_info->lok_vbuf[ch][path]);
1185
1186 return is_fail1 | is_fail2;
1187}
1188
1189static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1190{
1191 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1192 bool tmp;
1193
1194 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1195
1196 switch (iqk_info->iqk_band[path]) {
1197 case RTW89_BAND_2G:
1198 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1199 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1200 break;
1201 case RTW89_BAND_5G:
1202 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1203 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1204 break;
1205 default:
1206 break;
1207 }
1208
1209 switch (iqk_info->iqk_band[path]) {
1210 case RTW89_BAND_2G:
1211 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1212 break;
1213 case RTW89_BAND_5G:
1214 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1215 break;
1216 default:
1217 break;
1218 }
1219
1220 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1221 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1222 iqk_info->lok_cor_fail[0][path] = tmp;
1223
1224 switch (iqk_info->iqk_band[path]) {
1225 case RTW89_BAND_2G:
1226 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1227 break;
1228 case RTW89_BAND_5G:
1229 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1230 break;
1231 default:
1232 break;
1233 }
1234
1235 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1236 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1237
1238 switch (iqk_info->iqk_band[path]) {
1239 case RTW89_BAND_2G:
1240 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1241 break;
1242 case RTW89_BAND_5G:
1243 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1244 break;
1245 default:
1246 break;
1247 }
1248
1249 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1250 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1251 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1252 iqk_info->lok_fin_fail[0][path] = tmp;
1253
1254 switch (iqk_info->iqk_band[path]) {
1255 case RTW89_BAND_2G:
1256 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1257 break;
1258 case RTW89_BAND_5G:
1259 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1260 break;
1261 default:
1262 break;
1263 }
1264
1265 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1266 _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1267
1268 return _lok_finetune_check(rtwdev, path);
1269}
1270
1271static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1272{
1273 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1274
1275 switch (iqk_info->iqk_band[path]) {
1276 case RTW89_BAND_2G:
1277 rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00);
1278 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1279 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1280 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1281 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1282 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1283 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1284 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1285 udelay(1);
1286 break;
1287 case RTW89_BAND_5G:
1288 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1289 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1290 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1291 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1292 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1293 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1294 udelay(1);
1295 break;
1296 default:
1297 break;
1298 }
1299}
1300
1301static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1302{
1303 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1304 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1305 udelay(1);
1306 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1307 udelay(1);
1308 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1309 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1310 udelay(1);
1311 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1312}
1313
1314static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1315{
1316 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1317 u32 tmp;
1318 bool flag;
1319
1320 iqk_info->thermal[path] =
1321 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1322 iqk_info->thermal_rek_en = false;
1323
1324 flag = iqk_info->lok_cor_fail[0][path];
1325 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1326 flag = iqk_info->lok_fin_fail[0][path];
1327 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1328 flag = iqk_info->iqk_tx_fail[0][path];
1329 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1330 flag = iqk_info->iqk_rx_fail[0][path];
1331 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1332
1333 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1334 iqk_info->bp_iqkenable[path] = tmp;
1335 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1336 iqk_info->bp_txkresult[path] = tmp;
1337 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1338 iqk_info->bp_rxkresult[path] = tmp;
1339
1340 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times);
1341
1342 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1343 if (tmp)
1344 iqk_info->iqk_fail_cnt++;
1345 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1346 iqk_info->iqk_fail_cnt);
1347}
1348
1349static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1350{
1351 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1352 bool lok_is_fail = false;
1353 const int try = 3;
1354 u8 ibias = 0x1;
1355 u8 i;
1356
1357 _iqk_txclk_setting(rtwdev, path);
1358
1359 /* LOK */
1360 for (i = 0; i < try; i++) {
1361 _lok_res_table(rtwdev, path, ibias++);
1362 _iqk_txk_setting(rtwdev, path);
1363 lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1364 if (!lok_is_fail)
1365 break;
1366 }
1367
1368 if (lok_is_fail)
1369 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path);
1370
1371 /* TXK */
1372 if (iqk_info->is_nbiqk)
1373 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1374 else
1375 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1376
1377 /* RX */
1378 _iqk_rxclk_setting(rtwdev, path);
1379 _iqk_rxk_setting(rtwdev, path);
1380 if (iqk_info->is_nbiqk)
1381 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1382 else
1383 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1384
1385 _iqk_info_iqk(rtwdev, phy_idx, path);
1386}
1387
1388static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
1389{
1390 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1391 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1392 u32 reg_rf18;
1393 u32 reg_35c;
1394 u8 idx;
1395 u8 get_empty_table = false;
1396
1397 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1398 if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1399 get_empty_table = true;
1400 break;
1401 }
1402 }
1403 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
1404
1405 if (!get_empty_table) {
1406 idx = iqk_info->iqk_table_idx[path] + 1;
1407 if (idx > 1)
1408 idx = 0;
1409 }
1410 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
1411
1412 reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1413 reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1414
1415 iqk_info->iqk_band[path] = chan->band_type;
1416 iqk_info->iqk_bw[path] = chan->band_width;
1417 iqk_info->iqk_ch[path] = chan->channel;
1418 iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1419 iqk_info->iqk_table_idx[path] = idx;
1420
1421 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1422 path, reg_rf18, idx);
1423 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1424 path, reg_rf18);
1425 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1426 iqk_info->iqk_times, idx);
1427 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1428 idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1429
1430 if (reg_35c == 0x01)
1431 iqk_info->syn1to2 = 0x1;
1432 else
1433 iqk_info->syn1to2 = 0x0;
1434
1435 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1436 "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path,
1437 iqk_info->syn1to2);
1438
1439 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER);
1440 /* 2GHz/5GHz/6GHz = 0/1/2 */
1441 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1442 iqk_info->iqk_band[path]);
1443 /* 20/40/80 = 0/1/2 */
1444 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1445 iqk_info->iqk_bw[path]);
1446 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1447 iqk_info->iqk_ch[path]);
1448}
1449
1450static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1451{
1452 _iqk_by_path(rtwdev, phy_idx, path);
1453}
1454
1455static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1456{
1457 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1458 bool fail;
1459
1460 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1461 iqk_info->nb_txcfir[path]);
1462 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1463 iqk_info->nb_rxcfir[path]);
1464 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1465 0x00000e19 + (path << 4));
1466 fail = _iqk_check_cal(rtwdev, path);
1467
1468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail);
1469
1470 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1471 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1472 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1473 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0);
1474 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0);
1475 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1476 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1477 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1478 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1479 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1480 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1481}
1482
1483static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1484 enum rtw89_phy_idx phy_idx, u8 path)
1485{
1486 const struct rtw89_reg3_def *def;
1487 int size;
1488 u8 kpath;
1489 int i;
1490
1491 rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1492
1493 kpath = _kpath(rtwdev, phy_idx);
1494
1495 switch (kpath) {
1496 case RF_A:
1497 case RF_B:
1498 return;
1499 default:
1500 size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01);
1501 def = rtw8852b_restore_nondbcc_path01;
1502 break;
1503 }
1504
1505 for (i = 0; i < size; i++, def++)
1506 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1507}
1508
1509static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1510{
1511 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1512 u8 idx;
1513
1514 idx = iqk_info->iqk_table_idx[path];
1515 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx);
1516
1517 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1518 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1519
1520 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1521 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1522 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1523 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1524
1525 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path,
1526 rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD));
1527 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path,
1528 rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD));
1529}
1530
1531static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1532 enum rtw89_phy_idx phy_idx, u8 path)
1533{
1534 const struct rtw89_reg3_def *def;
1535 int size;
1536 u8 kpath;
1537 int i;
1538
1539 kpath = _kpath(rtwdev, phy_idx);
1540
1541 switch (kpath) {
1542 case RF_A:
1543 case RF_B:
1544 return;
1545 default:
1546 size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01);
1547 def = rtw8852b_set_nondbcc_path01;
1548 break;
1549 }
1550
1551 for (i = 0; i < size; i++, def++)
1552 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1553}
1554
1555static void _iqk_init(struct rtw89_dev *rtwdev)
1556{
1557 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1558 u8 idx, path;
1559
1560 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1561 if (iqk_info->is_iqk_init)
1562 return;
1563
1564 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1565 iqk_info->is_iqk_init = true;
1566 iqk_info->is_nbiqk = false;
1567 iqk_info->iqk_fft_en = false;
1568 iqk_info->iqk_sram_en = false;
1569 iqk_info->iqk_cfir_en = false;
1570 iqk_info->iqk_xym_en = false;
1571 iqk_info->thermal_rek_en = false;
1572 iqk_info->iqk_times = 0x0;
1573
1574 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1575 iqk_info->iqk_channel[idx] = 0x0;
1576 for (path = 0; path < RTW8852B_IQK_SS; path++) {
1577 iqk_info->lok_cor_fail[idx][path] = false;
1578 iqk_info->lok_fin_fail[idx][path] = false;
1579 iqk_info->iqk_tx_fail[idx][path] = false;
1580 iqk_info->iqk_rx_fail[idx][path] = false;
1581 iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1582 iqk_info->iqk_table_idx[path] = 0x0;
1583 }
1584 }
1585}
1586
1587static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1588{
1589 u32 rf_mode;
1590 u8 path;
1591 int ret;
1592
1593 for (path = 0; path < RF_PATH_MAX; path++) {
1594 if (!(kpath & BIT(path)))
1595 continue;
1596
1597 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1598 rf_mode != 2, 2, 5000, false,
1599 rtwdev, path, RR_MOD, RR_MOD_MASK);
1600 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1601 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1602 }
1603}
1604
1605static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1606 bool is_pause)
1607{
1608 if (!is_pause)
1609 return;
1610
1611 _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1612}
1613
1614static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1615 enum rtw89_phy_idx phy_idx, u8 path)
1616{
1617 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1618 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1619 u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
1620 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1621
1622 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1623
1624 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1625 "[IQK]==========IQK strat!!!!!==========\n");
1626 iqk_info->iqk_times++;
1627 iqk_info->kcount = 0;
1628 iqk_info->version = RTW8852B_IQK_VER;
1629
1630 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1631 _iqk_get_ch_info(rtwdev, phy_idx, path);
1632
1633 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1634 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1635 _iqk_macbb_setting(rtwdev, phy_idx, path);
1636 _iqk_preset(rtwdev, path);
1637 _iqk_start_iqk(rtwdev, phy_idx, path);
1638 _iqk_restore(rtwdev, path);
1639 _iqk_afebb_restore(rtwdev, phy_idx, path);
1640 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1641 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1642
1643 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1644}
1645
1646static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1647{
1648 u8 kpath = _kpath(rtwdev, phy_idx);
1649
1650 switch (kpath) {
1651 case RF_A:
1652 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1653 break;
1654 case RF_B:
1655 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1656 break;
1657 case RF_AB:
1658 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1659 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1660 break;
1661 default:
1662 break;
1663 }
1664}
1665
1666static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1667 u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1668{
1669 u8 i;
1670
1671 for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1672 reg_bkup[path][i] =
1673 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1674 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1675 reg[i] + (path << 8), reg_bkup[path][i]);
1676 }
1677}
1678
1679static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1680 const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1681{
1682 u8 i;
1683
1684 for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1685 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
1686 reg_bkup[path][i]);
1687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1688 reg[i] + (path << 8), reg_bkup[path][i]);
1689 }
1690}
1691
1692static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
1693{
1694 u8 order;
1695 u8 val;
1696
1697 order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
1698 val = 0x3 >> order;
1699
1700 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
1701
1702 return val;
1703}
1704
1705static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1706{
1707 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1708 u8 val, kidx = dpk->cur_idx[path];
1709
1710 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1711
1712 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1713 MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
1714
1715 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1716 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
1717}
1718
1719static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1720 enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
1721{
1722 u16 dpk_cmd;
1723 u32 val;
1724 int ret;
1725
1726 dpk_cmd = (id << 8) | (0x19 + (path << 4));
1727 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1728
1729 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1730 1, 20000, false,
1731 rtwdev, 0xbff8, MASKBYTE0);
1732 if (ret)
1733 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1734
1735 udelay(1);
1736
1737 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1738
1739 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1740 1, 2000, false,
1741 rtwdev, 0x80fc, MASKLWORD);
1742 if (ret)
1743 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1744
1745 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1746
1747 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1748 "[DPK] one-shot for %s = 0x%x\n",
1749 id == 0x06 ? "LBK_RXIQK" :
1750 id == 0x10 ? "SYNC" :
1751 id == 0x11 ? "MDPK_IDL" :
1752 id == 0x12 ? "MDPK_MPA" :
1753 id == 0x13 ? "GAIN_LOSS" :
1754 id == 0x14 ? "PWR_CAL" :
1755 id == 0x15 ? "DPK_RXAGC" :
1756 id == 0x16 ? "KIP_PRESET" :
1757 id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC",
1758 dpk_cmd);
1759}
1760
1761static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1762 enum rtw89_rf_path path)
1763{
1764 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1765 _set_rx_dck(rtwdev, phy, path);
1766}
1767
1768static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1769 enum rtw89_rf_path path)
1770{
1771 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1772 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1773
1774 u8 kidx = dpk->cur_idx[path];
1775
1776 dpk->bp[path][kidx].band = chan->band_type;
1777 dpk->bp[path][kidx].ch = chan->channel;
1778 dpk->bp[path][kidx].bw = chan->band_width;
1779
1780 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1781 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1782 path, dpk->cur_idx[path], phy,
1783 rtwdev->is_tssi_mode[path] ? "on" : "off",
1784 rtwdev->dbcc_en ? "on" : "off",
1785 dpk->bp[path][kidx].band == 0 ? "2G" :
1786 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1787 dpk->bp[path][kidx].ch,
1788 dpk->bp[path][kidx].bw == 0 ? "20M" :
1789 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1790}
1791
1792static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1793 enum rtw89_phy_idx phy,
1794 enum rtw89_rf_path path, u8 kpath)
1795{
1796 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1797
1798 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
1799
1800 if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1801 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
1802 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
1803 }
1804
1805 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1806 "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1807}
1808
1809static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1810 enum rtw89_phy_idx phy,
1811 enum rtw89_rf_path path, u8 kpath)
1812{
1813 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1814
1815 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
1816
1817 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1818 "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1819
1820 if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1821 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
1822 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x0);
1823 }
1824}
1825
1826static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1827 enum rtw89_rf_path path, bool is_pause)
1828{
1829 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1830 B_P0_TSSI_TRK_EN, is_pause);
1831
1832 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1833 is_pause ? "pause" : "resume");
1834}
1835
1836static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1837 enum rtw89_rf_path path)
1838{
1839 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
1840
1841 if (rtwdev->hal.cv > CHIP_CAV)
1842 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
1843
1844 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1845}
1846
1847static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1848 enum rtw89_rf_path path)
1849{
1850 u8 cur_rxbb;
1851 u32 tmp;
1852
1853 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
1854
1855 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1856 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1857
1858 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1859 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
1860 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1861 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1862
1863 if (cur_rxbb >= 0x11)
1864 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1865 else if (cur_rxbb <= 0xa)
1866 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1867 else
1868 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1869
1870 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1871 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1872 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1873 udelay(70);
1874
1875 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1876 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1877
1878 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1879
1880 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
1881 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
1882
1883 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1884 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1885 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1886 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1887 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1888 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1889}
1890
1891static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
1892{
1893 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1894
1895 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1896 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
1897 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1898
1899 udelay(200);
1900
1901 dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
1902
1903 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
1904 dpk->bp[path][kidx].ther_dpk);
1905}
1906
1907static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1908 enum rtw89_rf_path path, u8 kidx)
1909{
1910 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1911
1912 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1913 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1914 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1915 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1916 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1917 } else {
1918 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1919 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1920 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1921 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1922 rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1923 rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1924 rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1925 }
1926
1927 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1928 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1929 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1930
1931 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1932 "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
1933 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
1934 rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
1935 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
1936}
1937
1938static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1939 enum rtw89_rf_path path, bool is_bypass)
1940{
1941 if (is_bypass) {
1942 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1943 B_RXIQC_BYPASS2, 0x1);
1944 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1945 B_RXIQC_BYPASS, 0x1);
1946 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1947 "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
1948 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1949 MASKDWORD));
1950 } else {
1951 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
1952 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
1953 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1954 "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
1955 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1956 MASKDWORD));
1957 }
1958}
1959
1960static
1961void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1962{
1963 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1964
1965 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
1966 rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
1967 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
1968 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
1969 else
1970 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
1971
1972 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
1973 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
1974 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
1975}
1976
1977static void _dpk_table_select(struct rtw89_dev *rtwdev,
1978 enum rtw89_rf_path path, u8 kidx, u8 gain)
1979{
1980 u8 val;
1981
1982 val = 0x80 + kidx * 0x20 + gain * 0x10;
1983 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
1984 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1985 "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
1986 gain, val);
1987}
1988
1989static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1990{
1991#define DPK_SYNC_TH_DC_I 200
1992#define DPK_SYNC_TH_DC_Q 200
1993#define DPK_SYNC_TH_CORR 170
1994 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1995 u16 dc_i, dc_q;
1996 u8 corr_val, corr_idx;
1997
1998 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
1999
2000 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2001 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2002
2003 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2004 "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
2005 path, corr_idx, corr_val);
2006
2007 dpk->corr_idx[path][kidx] = corr_idx;
2008 dpk->corr_val[path][kidx] = corr_val;
2009
2010 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2011
2012 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2013 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2014
2015 dc_i = abs(sign_extend32(dc_i, 11));
2016 dc_q = abs(sign_extend32(dc_q, 11));
2017
2018 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2019 path, dc_i, dc_q);
2020
2021 dpk->dc_i[path][kidx] = dc_i;
2022 dpk->dc_q[path][kidx] = dc_q;
2023
2024 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2025 corr_val < DPK_SYNC_TH_CORR)
2026 return true;
2027 else
2028 return false;
2029}
2030
2031static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2032 enum rtw89_rf_path path, u8 kidx)
2033{
2034 _dpk_one_shot(rtwdev, phy, path, SYNC);
2035
2036 return _dpk_sync_check(rtwdev, path, kidx);
2037}
2038
2039static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2040{
2041 u16 dgain;
2042
2043 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2044
2045 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2046
2047 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2048
2049 return dgain;
2050}
2051
2052static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2053{
2054 static const u16 bnd[15] = {
2055 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2056 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2057 };
2058 s8 offset;
2059
2060 if (dgain >= bnd[0])
2061 offset = 0x6;
2062 else if (bnd[0] > dgain && dgain >= bnd[1])
2063 offset = 0x6;
2064 else if (bnd[1] > dgain && dgain >= bnd[2])
2065 offset = 0x5;
2066 else if (bnd[2] > dgain && dgain >= bnd[3])
2067 offset = 0x4;
2068 else if (bnd[3] > dgain && dgain >= bnd[4])
2069 offset = 0x3;
2070 else if (bnd[4] > dgain && dgain >= bnd[5])
2071 offset = 0x2;
2072 else if (bnd[5] > dgain && dgain >= bnd[6])
2073 offset = 0x1;
2074 else if (bnd[6] > dgain && dgain >= bnd[7])
2075 offset = 0x0;
2076 else if (bnd[7] > dgain && dgain >= bnd[8])
2077 offset = 0xff;
2078 else if (bnd[8] > dgain && dgain >= bnd[9])
2079 offset = 0xfe;
2080 else if (bnd[9] > dgain && dgain >= bnd[10])
2081 offset = 0xfd;
2082 else if (bnd[10] > dgain && dgain >= bnd[11])
2083 offset = 0xfc;
2084 else if (bnd[11] > dgain && dgain >= bnd[12])
2085 offset = 0xfb;
2086 else if (bnd[12] > dgain && dgain >= bnd[13])
2087 offset = 0xfa;
2088 else if (bnd[13] > dgain && dgain >= bnd[14])
2089 offset = 0xf9;
2090 else if (bnd[14] > dgain)
2091 offset = 0xf8;
2092 else
2093 offset = 0x0;
2094
2095 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2096
2097 return offset;
2098}
2099
2100static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2101{
2102 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2103 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2104
2105 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2106}
2107
2108static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2109 enum rtw89_rf_path path, u8 kidx)
2110{
2111 _dpk_table_select(rtwdev, path, kidx, 1);
2112 _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2113}
2114
2115static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2116 enum rtw89_rf_path path, u8 kidx)
2117{
2118 _dpk_tpg_sel(rtwdev, path, kidx);
2119 _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2120}
2121
2122static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2123 enum rtw89_rf_path path)
2124{
2125 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2126 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2127 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2128
2129 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2130}
2131
2132static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2133 enum rtw89_rf_path path, u8 txagc)
2134{
2135 rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
2136 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2137 _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2138 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2139
2140 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2141}
2142
2143static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2144 enum rtw89_rf_path path)
2145{
2146 u32 tmp;
2147
2148 tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
2149 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
2150 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2151 _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2152 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2153 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
2154
2155 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2156 "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
2157 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
2158 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
2159}
2160
2161static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2162 enum rtw89_rf_path path, s8 gain_offset)
2163{
2164 u8 txagc;
2165
2166 txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2167
2168 if (txagc - gain_offset < DPK_TXAGC_LOWER)
2169 txagc = DPK_TXAGC_LOWER;
2170 else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2171 txagc = DPK_TXAGC_UPPER;
2172 else
2173 txagc = txagc - gain_offset;
2174
2175 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2176
2177 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2178 gain_offset, txagc);
2179 return txagc;
2180}
2181
2182static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2183{
2184 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2185 u8 i;
2186
2187 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2188 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2189 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2190
2191 if (is_check) {
2192 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2193 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2194 val1_i = abs(sign_extend32(val1_i, 11));
2195 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2196 val1_q = abs(sign_extend32(val1_q, 11));
2197
2198 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2199 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2200 val2_i = abs(sign_extend32(val2_i, 11));
2201 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2202 val2_q = abs(sign_extend32(val2_q, 11));
2203
2204 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2205 phy_div(val1_i * val1_i + val1_q * val1_q,
2206 val2_i * val2_i + val2_q * val2_q));
2207 } else {
2208 for (i = 0; i < 32; i++) {
2209 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2210 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2211 "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2212 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2213 }
2214 }
2215
2216 if (val1_i * val1_i + val1_q * val1_q >=
2217 (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2218 return true;
2219
2220 return false;
2221}
2222
2223static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2224 enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2225 bool loss_only)
2226{
2227 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2228 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2229 u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2230 u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
2231 u16 dgain = 0;
2232 s8 offset;
2233 int limit = 200;
2234
2235 tmp_txagc = init_txagc;
2236
2237 do {
2238 switch (step) {
2239 case DPK_AGC_STEP_SYNC_DGAIN:
2240 if (_dpk_sync(rtwdev, phy, path, kidx)) {
2241 tmp_txagc = 0xff;
2242 goout = 1;
2243 break;
2244 }
2245
2246 dgain = _dpk_dgain_read(rtwdev);
2247
2248 if (loss_only == 1 || limited_rxbb == 1)
2249 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2250 else
2251 step = DPK_AGC_STEP_GAIN_ADJ;
2252 break;
2253
2254 case DPK_AGC_STEP_GAIN_ADJ:
2255 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
2256 RFREG_MASKRXBB);
2257 offset = _dpk_dgain_mapping(rtwdev, dgain);
2258
2259 if (tmp_rxbb + offset > 0x1f) {
2260 tmp_rxbb = 0x1f;
2261 limited_rxbb = 1;
2262 } else if (tmp_rxbb + offset < 0) {
2263 tmp_rxbb = 0;
2264 limited_rxbb = 1;
2265 } else {
2266 tmp_rxbb = tmp_rxbb + offset;
2267 }
2268
2269 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
2270 tmp_rxbb);
2271 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2272 "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2273 if (offset || agc_cnt == 0) {
2274 if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
2275 _dpk_bypass_rxcfir(rtwdev, path, true);
2276 else
2277 _dpk_lbk_rxiqk(rtwdev, phy, path);
2278 }
2279 if (dgain > 1922 || dgain < 342)
2280 step = DPK_AGC_STEP_SYNC_DGAIN;
2281 else
2282 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2283
2284 agc_cnt++;
2285 break;
2286
2287 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2288 _dpk_gainloss(rtwdev, phy, path, kidx);
2289 tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2290
2291 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2292 tmp_gl_idx >= 7)
2293 step = DPK_AGC_STEP_GL_GT_CRITERION;
2294 else if (tmp_gl_idx == 0)
2295 step = DPK_AGC_STEP_GL_LT_CRITERION;
2296 else
2297 step = DPK_AGC_STEP_SET_TX_GAIN;
2298 break;
2299
2300 case DPK_AGC_STEP_GL_GT_CRITERION:
2301 if (tmp_txagc == 0x2e) {
2302 goout = 1;
2303 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2304 "[DPK] Txagc@lower bound!!\n");
2305 } else {
2306 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
2307 }
2308 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2309 agc_cnt++;
2310 break;
2311
2312 case DPK_AGC_STEP_GL_LT_CRITERION:
2313 if (tmp_txagc == 0x3f) {
2314 goout = 1;
2315 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2316 "[DPK] Txagc@upper bound!!\n");
2317 } else {
2318 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
2319 }
2320 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2321 agc_cnt++;
2322 break;
2323 case DPK_AGC_STEP_SET_TX_GAIN:
2324 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
2325 goout = 1;
2326 agc_cnt++;
2327 break;
2328
2329 default:
2330 goout = 1;
2331 break;
2332 }
2333 } while (!goout && agc_cnt < 6 && limit-- > 0);
2334
2335 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2336 "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2337 tmp_rxbb);
2338
2339 return tmp_txagc;
2340}
2341
2342static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2343{
2344 switch (order) {
2345 case 0:
2346 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2347 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2348 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2349 break;
2350 case 1:
2351 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2352 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2353 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2354 break;
2355 case 2:
2356 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2357 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2358 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2359 break;
2360 default:
2361 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2362 "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2363 break;
2364 }
2365
2366 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2367 "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2368}
2369
2370static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2371 enum rtw89_rf_path path, u8 kidx, u8 gain)
2372{
2373 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2374
2375 if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2376 dpk->bp[path][kidx].band == RTW89_BAND_5G)
2377 _dpk_set_mdpd_para(rtwdev, 0x2);
2378 else
2379 _dpk_set_mdpd_para(rtwdev, 0x0);
2380
2381 _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2382}
2383
2384static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2385 enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2386{
2387 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2388 const u16 pwsf = 0x78;
2389 u8 gs = dpk->dpk_gs[phy];
2390
2391 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2392 B_COEF_SEL_MDPD, kidx);
2393
2394 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2395 "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2396 pwsf, gs);
2397
2398 dpk->bp[path][kidx].txagc_dpk = txagc;
2399 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2400 0x3F << ((gain << 3) + (kidx << 4)), txagc);
2401
2402 dpk->bp[path][kidx].pwsf = pwsf;
2403 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2404 0x1FF << (gain << 4), pwsf);
2405
2406 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2407 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2408
2409 dpk->bp[path][kidx].gs = gs;
2410 if (dpk->dpk_gs[phy] == 0x7f)
2411 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2412 MASKDWORD, 0x007f7f7f);
2413 else
2414 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2415 MASKDWORD, 0x005b5b5b);
2416
2417 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2418 B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
2419 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2420 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2421}
2422
2423static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2424 enum rtw89_rf_path path)
2425{
2426 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2427 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2428 bool is_reload = false;
2429 u8 idx, cur_band, cur_ch;
2430
2431 cur_band = chan->band_type;
2432 cur_ch = chan->channel;
2433
2434 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2435 if (cur_band != dpk->bp[path][idx].band ||
2436 cur_ch != dpk->bp[path][idx].ch)
2437 continue;
2438
2439 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2440 B_COEF_SEL_MDPD, idx);
2441 dpk->cur_idx[path] = idx;
2442 is_reload = true;
2443 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2444 "[DPK] reload S%d[%d] success\n", path, idx);
2445 }
2446
2447 return is_reload;
2448}
2449
2450static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2451 enum rtw89_rf_path path, u8 gain)
2452{
2453 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2454 u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2455 bool is_fail = false;
2456
2457 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2458 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2459
2460 _rfk_rf_direct_cntrl(rtwdev, path, false);
2461 _rfk_drf_direct_cntrl(rtwdev, path, false);
2462
2463 _dpk_kip_pwr_clk_on(rtwdev, path);
2464 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2465 _dpk_rf_setting(rtwdev, gain, path, kidx);
2466 _dpk_rx_dck(rtwdev, phy, path);
2467
2468 _dpk_kip_preset(rtwdev, phy, path, kidx);
2469 _dpk_kip_set_rxagc(rtwdev, phy, path);
2470 _dpk_table_select(rtwdev, path, kidx, gain);
2471
2472 txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2473 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
2474
2475 if (txagc == 0xff) {
2476 is_fail = true;
2477 } else {
2478 _dpk_get_thermal(rtwdev, kidx, path);
2479
2480 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2481
2482 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2483
2484 _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2485 }
2486
2487 if (!is_fail)
2488 dpk->bp[path][kidx].path_ok = true;
2489 else
2490 dpk->bp[path][kidx].path_ok = false;
2491
2492 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2493 is_fail ? "Check" : "Success");
2494
2495 return is_fail;
2496}
2497
2498static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2499 enum rtw89_phy_idx phy, u8 kpath)
2500{
2501 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2502 static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
2503 u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
2504 u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2505 u32 backup_bb_val[BACKUP_BB_REGS_NR];
2506 bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
2507 u8 path;
2508
2509 if (dpk->is_dpk_reload_en) {
2510 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2511 reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2512 if (!reloaded[path] && dpk->bp[path][0].ch)
2513 dpk->cur_idx[path] = !dpk->cur_idx[path];
2514 else
2515 _dpk_onoff(rtwdev, path, false);
2516 }
2517 } else {
2518 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
2519 dpk->cur_idx[path] = 0;
2520 }
2521
2522 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2523
2524 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2525 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2526 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2527 _dpk_information(rtwdev, phy, path);
2528 if (rtwdev->is_tssi_mode[path])
2529 _dpk_tssi_pause(rtwdev, path, true);
2530 }
2531
2532 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2533
2534 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2535 is_fail = _dpk_main(rtwdev, phy, path, 1);
2536 _dpk_onoff(rtwdev, path, is_fail);
2537 }
2538
2539 _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
2540 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2541
2542 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2543 _dpk_kip_restore(rtwdev, path);
2544 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2545 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2546 if (rtwdev->is_tssi_mode[path])
2547 _dpk_tssi_pause(rtwdev, path, false);
2548 }
2549}
2550
2551static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2552{
2553 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2554 struct rtw89_fem_info *fem = &rtwdev->fem;
2555
2556 if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2557 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2558 "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2559 return true;
2560 } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2561 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2562 "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2563 return true;
2564 } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2565 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2566 "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2567 return true;
2568 }
2569
2570 return false;
2571}
2572
2573static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2574{
2575 u8 path, kpath;
2576
2577 kpath = _kpath(rtwdev, phy);
2578
2579 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2580 if (kpath & BIT(path))
2581 _dpk_onoff(rtwdev, path, true);
2582 }
2583}
2584
2585static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2586{
2587 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2588 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2589 RTW8852B_DPK_VER, rtwdev->hal.cv,
2590 RTW8852B_RF_REL_VERSION);
2591
2592 if (_dpk_bypass_check(rtwdev, phy))
2593 _dpk_force_bypass(rtwdev, phy);
2594 else
2595 _dpk_cal_select(rtwdev, force, phy, RF_AB);
2596}
2597
2598static void _dpk_track(struct rtw89_dev *rtwdev)
2599{
2600 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2601 s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2602 s8 delta_ther[2] = {};
2603 u8 trk_idx, txagc_rf;
2604 u8 path, kidx;
2605 u16 pwsf[2];
2606 u8 cur_ther;
2607 u32 tmp;
2608
2609 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2610 kidx = dpk->cur_idx[path];
2611
2612 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2613 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2614 path, kidx, dpk->bp[path][kidx].ch);
2615
2616 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2617
2618 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2619 "[DPK_TRK] thermal now = %d\n", cur_ther);
2620
2621 if (dpk->bp[path][kidx].ch && cur_ther)
2622 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2623
2624 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2625 delta_ther[path] = delta_ther[path] * 3 / 2;
2626 else
2627 delta_ther[path] = delta_ther[path] * 5 / 2;
2628
2629 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2630 0x0000003f);
2631
2632 if (rtwdev->is_tssi_mode[path]) {
2633 trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2634
2635 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2636 "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2637 txagc_rf, trk_idx);
2638
2639 txagc_bb =
2640 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2641 MASKBYTE2);
2642 txagc_bb_tp =
2643 rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2644 B_TXAGC_TP);
2645
2646 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2647 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2648 txagc_bb_tp, txagc_bb);
2649
2650 txagc_ofst =
2651 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2652 MASKBYTE3);
2653
2654 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2655 "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2656 txagc_ofst, delta_ther[path]);
2657 tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2658 B_DPD_COM_OF);
2659 if (tmp == 0x1) {
2660 txagc_ofst = 0;
2661 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2662 "[DPK_TRK] HW txagc offset mode\n");
2663 }
2664
2665 if (txagc_rf && cur_ther)
2666 ini_diff = txagc_ofst + (delta_ther[path]);
2667
2668 tmp = rtw89_phy_read32_mask(rtwdev,
2669 R_P0_TXDPD + (path << 13),
2670 B_P0_TXDPD);
2671 if (tmp == 0x0) {
2672 pwsf[0] = dpk->bp[path][kidx].pwsf +
2673 txagc_bb_tp - txagc_bb + ini_diff;
2674 pwsf[1] = dpk->bp[path][kidx].pwsf +
2675 txagc_bb_tp - txagc_bb + ini_diff;
2676 } else {
2677 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2678 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2679 }
2680
2681 } else {
2682 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2683 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2684 }
2685
2686 tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2687 if (!tmp && txagc_rf) {
2688 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2689 "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2690 pwsf[0], pwsf[1]);
2691
2692 rtw89_phy_write32_mask(rtwdev,
2693 R_DPD_BND + (path << 8) + (kidx << 2),
2694 B_DPD_BND_0, pwsf[0]);
2695 rtw89_phy_write32_mask(rtwdev,
2696 R_DPD_BND + (path << 8) + (kidx << 2),
2697 B_DPD_BND_1, pwsf[1]);
2698 }
2699 }
2700}
2701
2702static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2703{
2704 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2705 u8 tx_scale, ofdm_bkof, path, kpath;
2706
2707 kpath = _kpath(rtwdev, phy);
2708
2709 ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2710 tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2711
2712 if (ofdm_bkof + tx_scale >= 44) {
2713 /* move dpd backoff to bb, and set dpd backoff to 0 */
2714 dpk->dpk_gs[phy] = 0x7f;
2715 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2716 if (!(kpath & BIT(path)))
2717 continue;
2718
2719 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2720 B_DPD_CFG, 0x7f7f7f);
2721 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2722 "[RFK] Set S%d DPD backoff to 0dB\n", path);
2723 }
2724 } else {
2725 dpk->dpk_gs[phy] = 0x5b;
2726 }
2727}
2728
2729static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2730 enum rtw89_rf_path path)
2731{
2732 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2733 enum rtw89_band band = chan->band_type;
2734
2735 if (band == RTW89_BAND_2G)
2736 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2737 else
2738 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2739}
2740
2741static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2742 enum rtw89_rf_path path)
2743{
2744 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2745 enum rtw89_band band = chan->band_type;
2746
2747 rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
2748
2749 if (path == RF_PATH_A)
2750 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2751 &rtw8852b_tssi_sys_a_defs_2g_tbl,
2752 &rtw8852b_tssi_sys_a_defs_5g_tbl);
2753 else
2754 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2755 &rtw8852b_tssi_sys_b_defs_2g_tbl,
2756 &rtw8852b_tssi_sys_b_defs_5g_tbl);
2757}
2758
2759static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2760 enum rtw89_phy_idx phy,
2761 enum rtw89_rf_path path)
2762{
2763 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2764 &rtw8852b_tssi_init_txpwr_defs_a_tbl,
2765 &rtw8852b_tssi_init_txpwr_defs_b_tbl);
2766}
2767
2768static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2769 enum rtw89_phy_idx phy,
2770 enum rtw89_rf_path path)
2771{
2772 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2773 &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl,
2774 &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl);
2775}
2776
2777static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2778 enum rtw89_rf_path path)
2779{
2780 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2781 &rtw8852b_tssi_dck_defs_a_tbl,
2782 &rtw8852b_tssi_dck_defs_b_tbl);
2783}
2784
2785static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2786 enum rtw89_rf_path path)
2787{
2788#define RTW8852B_TSSI_GET_VAL(ptr, idx) \
2789({ \
2790 s8 *__ptr = (ptr); \
2791 u8 __idx = (idx), __i, __v; \
2792 u32 __val = 0; \
2793 for (__i = 0; __i < 4; __i++) { \
2794 __v = (__ptr[__idx + __i]); \
2795 __val |= (__v << (8 * __i)); \
2796 } \
2797 __val; \
2798})
2799 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2800 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2801 u8 ch = chan->channel;
2802 u8 subband = chan->subband_type;
2803 const s8 *thm_up_a = NULL;
2804 const s8 *thm_down_a = NULL;
2805 const s8 *thm_up_b = NULL;
2806 const s8 *thm_down_b = NULL;
2807 u8 thermal = 0xff;
2808 s8 thm_ofst[64] = {0};
2809 u32 tmp = 0;
2810 u8 i, j;
2811
2812 switch (subband) {
2813 default:
2814 case RTW89_CH_2G:
2815 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p;
2816 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n;
2817 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p;
2818 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n;
2819 break;
2820 case RTW89_CH_5G_BAND_1:
2821 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0];
2822 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0];
2823 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0];
2824 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0];
2825 break;
2826 case RTW89_CH_5G_BAND_3:
2827 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1];
2828 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1];
2829 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1];
2830 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1];
2831 break;
2832 case RTW89_CH_5G_BAND_4:
2833 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2];
2834 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2];
2835 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2];
2836 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2];
2837 break;
2838 }
2839
2840 if (path == RF_PATH_A) {
2841 thermal = tssi_info->thermal[RF_PATH_A];
2842
2843 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2844 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2845
2846 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2847 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2848
2849 if (thermal == 0xff) {
2850 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2851 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2852
2853 for (i = 0; i < 64; i += 4) {
2854 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2855
2856 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2857 "[TSSI] write 0x%x val=0x%08x\n",
2858 R_P0_TSSI_BASE + i, 0x0);
2859 }
2860
2861 } else {
2862 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2863 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2864 thermal);
2865
2866 i = 0;
2867 for (j = 0; j < 32; j++)
2868 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2869 -thm_down_a[i++] :
2870 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2871
2872 i = 1;
2873 for (j = 63; j >= 32; j--)
2874 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2875 thm_up_a[i++] :
2876 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2877
2878 for (i = 0; i < 64; i += 4) {
2879 tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2880 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2881
2882 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2883 "[TSSI] write 0x%x val=0x%08x\n",
2884 0x5c00 + i, tmp);
2885 }
2886 }
2887 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2888 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2889
2890 } else {
2891 thermal = tssi_info->thermal[RF_PATH_B];
2892
2893 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2894 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2895
2896 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2897 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2898
2899 if (thermal == 0xff) {
2900 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2901 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2902
2903 for (i = 0; i < 64; i += 4) {
2904 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2905
2906 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2907 "[TSSI] write 0x%x val=0x%08x\n",
2908 0x7c00 + i, 0x0);
2909 }
2910
2911 } else {
2912 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
2913 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
2914 thermal);
2915
2916 i = 0;
2917 for (j = 0; j < 32; j++)
2918 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2919 -thm_down_b[i++] :
2920 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
2921
2922 i = 1;
2923 for (j = 63; j >= 32; j--)
2924 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2925 thm_up_b[i++] :
2926 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
2927
2928 for (i = 0; i < 64; i += 4) {
2929 tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2930 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
2931
2932 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2933 "[TSSI] write 0x%x val=0x%08x\n",
2934 0x7c00 + i, tmp);
2935 }
2936 }
2937 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
2938 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
2939 }
2940#undef RTW8852B_TSSI_GET_VAL
2941}
2942
2943static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2944 enum rtw89_rf_path path)
2945{
2946 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2947 &rtw8852b_tssi_dac_gain_defs_a_tbl,
2948 &rtw8852b_tssi_dac_gain_defs_b_tbl);
2949}
2950
2951static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2952 enum rtw89_rf_path path)
2953{
2954 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2955 enum rtw89_band band = chan->band_type;
2956
2957 if (path == RF_PATH_A)
2958 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2959 &rtw8852b_tssi_slope_a_defs_2g_tbl,
2960 &rtw8852b_tssi_slope_a_defs_5g_tbl);
2961 else
2962 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2963 &rtw8852b_tssi_slope_b_defs_2g_tbl,
2964 &rtw8852b_tssi_slope_b_defs_5g_tbl);
2965}
2966
2967static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2968 enum rtw89_rf_path path, bool all)
2969{
2970 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2971 enum rtw89_band band = chan->band_type;
2972 const struct rtw89_rfk_tbl *tbl = NULL;
2973 u8 ch = chan->channel;
2974
2975 if (path == RF_PATH_A) {
2976 if (band == RTW89_BAND_2G) {
2977 if (all)
2978 tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl;
2979 else
2980 tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl;
2981 } else if (ch >= 36 && ch <= 64) {
2982 if (all)
2983 tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl;
2984 else
2985 tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl;
2986 } else if (ch >= 100 && ch <= 144) {
2987 if (all)
2988 tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl;
2989 else
2990 tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl;
2991 } else if (ch >= 149 && ch <= 177) {
2992 if (all)
2993 tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl;
2994 else
2995 tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl;
2996 }
2997 } else {
2998 if (ch >= 1 && ch <= 14) {
2999 if (all)
3000 tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl;
3001 else
3002 tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl;
3003 } else if (ch >= 36 && ch <= 64) {
3004 if (all)
3005 tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl;
3006 else
3007 tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl;
3008 } else if (ch >= 100 && ch <= 144) {
3009 if (all)
3010 tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl;
3011 else
3012 tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl;
3013 } else if (ch >= 149 && ch <= 177) {
3014 if (all)
3015 tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl;
3016 else
3017 tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl;
3018 }
3019 }
3020
3021 if (tbl)
3022 rtw89_rfk_parser(rtwdev, tbl);
3023}
3024
3025static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3026 enum rtw89_rf_path path)
3027{
3028 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3029 &rtw8852b_tssi_slope_defs_a_tbl,
3030 &rtw8852b_tssi_slope_defs_b_tbl);
3031}
3032
3033static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3034 enum rtw89_rf_path path)
3035{
3036 if (path == RF_PATH_A)
3037 rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3038 else
3039 rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3040}
3041
3042static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3043 enum rtw89_phy_idx phy,
3044 enum rtw89_rf_path path)
3045{
3046 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
3047 path);
3048
3049 if (path == RF_PATH_A)
3050 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010);
3051 else
3052 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010);
3053}
3054
3055static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3056{
3057 u8 i;
3058
3059 for (i = 0; i < RF_PATH_NUM_8852B; i++) {
3060 _tssi_set_tssi_track(rtwdev, phy, i);
3061 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3062
3063 if (i == RF_PATH_A) {
3064 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3065 B_P0_TSSI_MV_CLR, 0x0);
3066 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3067 B_P0_TSSI_EN, 0x0);
3068 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3069 B_P0_TSSI_EN, 0x1);
3070 rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3071 RR_TXGA_V1_TRK_EN, 0x1);
3072 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3073 B_P0_TSSI_RFC, 0x3);
3074
3075 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3076 B_P0_TSSI_OFT, 0xc0);
3077 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3078 B_P0_TSSI_OFT_EN, 0x0);
3079 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3080 B_P0_TSSI_OFT_EN, 0x1);
3081
3082 rtwdev->is_tssi_mode[RF_PATH_A] = true;
3083 } else {
3084 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3085 B_P1_TSSI_MV_CLR, 0x0);
3086 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3087 B_P1_TSSI_EN, 0x0);
3088 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3089 B_P1_TSSI_EN, 0x1);
3090 rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3091 RR_TXGA_V1_TRK_EN, 0x1);
3092 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3093 B_P1_TSSI_RFC, 0x3);
3094
3095 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3096 B_P1_TSSI_OFT, 0xc0);
3097 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3098 B_P1_TSSI_OFT_EN, 0x0);
3099 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3100 B_P1_TSSI_OFT_EN, 0x1);
3101
3102 rtwdev->is_tssi_mode[RF_PATH_B] = true;
3103 }
3104 }
3105}
3106
3107static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3108{
3109 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3110 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3111 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3112 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3113 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3114 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3115
3116 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3117 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3118}
3119
3120static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3121{
3122 switch (ch) {
3123 case 1 ... 2:
3124 return 0;
3125 case 3 ... 5:
3126 return 1;
3127 case 6 ... 8:
3128 return 2;
3129 case 9 ... 11:
3130 return 3;
3131 case 12 ... 13:
3132 return 4;
3133 case 14:
3134 return 5;
3135 }
3136
3137 return 0;
3138}
3139
3140#define TSSI_EXTRA_GROUP_BIT (BIT(31))
3141#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3142#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3143#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3144#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3145
3146static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3147{
3148 switch (ch) {
3149 case 1 ... 2:
3150 return 0;
3151 case 3 ... 5:
3152 return 1;
3153 case 6 ... 8:
3154 return 2;
3155 case 9 ... 11:
3156 return 3;
3157 case 12 ... 14:
3158 return 4;
3159 case 36 ... 40:
3160 return 5;
3161 case 41 ... 43:
3162 return TSSI_EXTRA_GROUP(5);
3163 case 44 ... 48:
3164 return 6;
3165 case 49 ... 51:
3166 return TSSI_EXTRA_GROUP(6);
3167 case 52 ... 56:
3168 return 7;
3169 case 57 ... 59:
3170 return TSSI_EXTRA_GROUP(7);
3171 case 60 ... 64:
3172 return 8;
3173 case 100 ... 104:
3174 return 9;
3175 case 105 ... 107:
3176 return TSSI_EXTRA_GROUP(9);
3177 case 108 ... 112:
3178 return 10;
3179 case 113 ... 115:
3180 return TSSI_EXTRA_GROUP(10);
3181 case 116 ... 120:
3182 return 11;
3183 case 121 ... 123:
3184 return TSSI_EXTRA_GROUP(11);
3185 case 124 ... 128:
3186 return 12;
3187 case 129 ... 131:
3188 return TSSI_EXTRA_GROUP(12);
3189 case 132 ... 136:
3190 return 13;
3191 case 137 ... 139:
3192 return TSSI_EXTRA_GROUP(13);
3193 case 140 ... 144:
3194 return 14;
3195 case 149 ... 153:
3196 return 15;
3197 case 154 ... 156:
3198 return TSSI_EXTRA_GROUP(15);
3199 case 157 ... 161:
3200 return 16;
3201 case 162 ... 164:
3202 return TSSI_EXTRA_GROUP(16);
3203 case 165 ... 169:
3204 return 17;
3205 case 170 ... 172:
3206 return TSSI_EXTRA_GROUP(17);
3207 case 173 ... 177:
3208 return 18;
3209 }
3210
3211 return 0;
3212}
3213
3214static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3215{
3216 switch (ch) {
3217 case 1 ... 8:
3218 return 0;
3219 case 9 ... 14:
3220 return 1;
3221 case 36 ... 48:
3222 return 2;
3223 case 52 ... 64:
3224 return 3;
3225 case 100 ... 112:
3226 return 4;
3227 case 116 ... 128:
3228 return 5;
3229 case 132 ... 144:
3230 return 6;
3231 case 149 ... 177:
3232 return 7;
3233 }
3234
3235 return 0;
3236}
3237
3238static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3239 enum rtw89_rf_path path)
3240{
3241 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3242 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3243 u8 ch = chan->channel;
3244 u32 gidx, gidx_1st, gidx_2nd;
3245 s8 de_1st;
3246 s8 de_2nd;
3247 s8 val;
3248
3249 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3250
3251 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3252 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3253
3254 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3255 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3256 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3257 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3258 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3259 val = (de_1st + de_2nd) / 2;
3260
3261 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3262 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3263 path, val, de_1st, de_2nd);
3264 } else {
3265 val = tssi_info->tssi_mcs[path][gidx];
3266
3267 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3268 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3269 }
3270
3271 return val;
3272}
3273
3274static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3275 enum rtw89_rf_path path)
3276{
3277 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3278 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3279 u8 ch = chan->channel;
3280 u32 tgidx, tgidx_1st, tgidx_2nd;
3281 s8 tde_1st;
3282 s8 tde_2nd;
3283 s8 val;
3284
3285 tgidx = _tssi_get_trim_group(rtwdev, ch);
3286
3287 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3288 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3289 path, tgidx);
3290
3291 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3292 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3293 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3294 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3295 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3296 val = (tde_1st + tde_2nd) / 2;
3297
3298 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3299 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3300 path, val, tde_1st, tde_2nd);
3301 } else {
3302 val = tssi_info->tssi_trim[path][tgidx];
3303
3304 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3305 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3306 path, val);
3307 }
3308
3309 return val;
3310}
3311
3312static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3313{
3314 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3315 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3316 u8 ch = chan->channel;
3317 u8 gidx;
3318 s8 ofdm_de;
3319 s8 trim_de;
3320 s32 val;
3321 u32 i;
3322
3323 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3324 phy, ch);
3325
3326 for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3327 gidx = _tssi_get_cck_group(rtwdev, ch);
3328 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3329 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3330
3331 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3332 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3333 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3334
3335 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3336 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3337
3338 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3339 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3340 _tssi_de_cck_long[i],
3341 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3342 _TSSI_DE_MASK));
3343
3344 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3345 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3346 val = ofdm_de + trim_de;
3347
3348 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3349 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3350 i, ofdm_de, trim_de);
3351
3352 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3353 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3354 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3355 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3356 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3357 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3358
3359 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3360 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3361 _tssi_de_mcs_20m[i],
3362 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3363 _TSSI_DE_MASK));
3364 }
3365}
3366
3367static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3368{
3369 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3370 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3371 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3372 R_TSSI_PA_K1 + (path << 13),
3373 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD),
3374 R_TSSI_PA_K2 + (path << 13),
3375 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD),
3376 R_P0_TSSI_ALIM1 + (path << 13),
3377 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD),
3378 R_P0_TSSI_ALIM3 + (path << 13),
3379 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD),
3380 R_TSSI_PA_K5 + (path << 13),
3381 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD),
3382 R_P0_TSSI_ALIM2 + (path << 13),
3383 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD),
3384 R_P0_TSSI_ALIM4 + (path << 13),
3385 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD),
3386 R_TSSI_PA_K8 + (path << 13),
3387 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD));
3388}
3389
3390static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3391 enum rtw89_phy_idx phy, enum rtw89_rf_path path)
3392{
3393 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3394 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3395 u8 channel = chan->channel;
3396 u8 band;
3397
3398 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3399 "======>%s phy=%d path=%d\n", __func__, phy, path);
3400
3401 if (channel >= 1 && channel <= 14)
3402 band = TSSI_ALIMK_2G;
3403 else if (channel >= 36 && channel <= 64)
3404 band = TSSI_ALIMK_5GL;
3405 else if (channel >= 100 && channel <= 144)
3406 band = TSSI_ALIMK_5GM;
3407 else if (channel >= 149 && channel <= 177)
3408 band = TSSI_ALIMK_5GH;
3409 else
3410 band = TSSI_ALIMK_2G;
3411
3412 if (tssi_info->alignment_done[path][band]) {
3413 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3414 tssi_info->alignment_value[path][band][0]);
3415 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3416 tssi_info->alignment_value[path][band][1]);
3417 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3418 tssi_info->alignment_value[path][band][2]);
3419 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3420 tssi_info->alignment_value[path][band][3]);
3421 }
3422
3423 _tssi_alimentk_dump_result(rtwdev, path);
3424}
3425
3426static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3427 enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3428 u8 enable)
3429{
3430 enum rtw89_rf_path_bit rx_path;
3431
3432 if (path == RF_PATH_A)
3433 rx_path = RF_A;
3434 else if (path == RF_PATH_B)
3435 rx_path = RF_B;
3436 else if (path == RF_PATH_AB)
3437 rx_path = RF_AB;
3438 else
3439 rx_path = RF_ABCD; /* don't change path, but still set others */
3440
3441 if (enable) {
3442 rtw8852b_bb_set_plcp_tx(rtwdev);
3443 rtw8852b_bb_cfg_tx_path(rtwdev, path);
3444 rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
3445 rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy);
3446 }
3447
3448 rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
3449}
3450
3451static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3452 enum rtw89_phy_idx phy, const u32 reg[],
3453 u32 reg_backup[], u32 reg_num)
3454{
3455 u32 i;
3456
3457 for (i = 0; i < reg_num; i++) {
3458 reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3459
3460 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3461 "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3462 reg_backup[i]);
3463 }
3464}
3465
3466static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3467 enum rtw89_phy_idx phy, const u32 reg[],
3468 u32 reg_backup[], u32 reg_num)
3469
3470{
3471 u32 i;
3472
3473 for (i = 0; i < reg_num; i++) {
3474 rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3475
3476 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3477 "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3478 reg_backup[i]);
3479 }
3480}
3481
3482static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3483{
3484 u8 channel_index;
3485
3486 if (channel >= 1 && channel <= 14)
3487 channel_index = channel - 1;
3488 else if (channel >= 36 && channel <= 64)
3489 channel_index = (channel - 36) / 2 + 14;
3490 else if (channel >= 100 && channel <= 144)
3491 channel_index = ((channel - 100) / 2) + 15 + 14;
3492 else if (channel >= 149 && channel <= 177)
3493 channel_index = ((channel - 149) / 2) + 38 + 14;
3494 else
3495 channel_index = 0;
3496
3497 return channel_index;
3498}
3499
3500static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3501 enum rtw89_rf_path path, const s16 *power,
3502 u32 *tssi_cw_rpt)
3503{
3504 u32 tx_counter, tx_counter_tmp;
3505 const int retry = 100;
3506 u32 tmp;
3507 int j, k;
3508
3509 for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3510 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3511 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3512
3513 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3514
3515 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3516 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3517 "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
3518 _tssi_trigger[path], tmp, path);
3519
3520 if (j == 0)
3521 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
3522 else
3523 _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
3524
3525 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3526 tx_counter_tmp -= tx_counter;
3527
3528 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3529 "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3530 tx_counter_tmp, path);
3531
3532 for (k = 0; k < retry; k++) {
3533 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3534 B_TSSI_CWRPT_RDY);
3535 if (tmp)
3536 break;
3537
3538 udelay(30);
3539
3540 tx_counter_tmp =
3541 rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3542 tx_counter_tmp -= tx_counter;
3543
3544 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3545 "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3546 k, tx_counter_tmp, path);
3547 }
3548
3549 if (k >= retry) {
3550 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3551 "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3552 k, path);
3553
3554 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
3555 return false;
3556 }
3557
3558 tssi_cw_rpt[j] =
3559 rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
3560
3561 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
3562
3563 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3564 tx_counter_tmp -= tx_counter;
3565
3566 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3567 "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3568 tx_counter_tmp, path);
3569 }
3570
3571 return true;
3572}
3573
3574static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3575 enum rtw89_rf_path path)
3576{
3577 static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3578 0x78e4, 0x49c0, 0x0d18, 0x0d80};
3579 static const s16 power_2g[4] = {48, 20, 4, 4};
3580 static const s16 power_5g[4] = {48, 20, 4, 4};
3581 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3582 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3583 s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3584 u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
3585 u8 channel = chan->channel;
3586 u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3587 struct rtw8852b_bb_tssi_bak tssi_bak;
3588 s32 aliment_diff, tssi_cw_default;
3589 u32 start_time, finish_time;
3590 u32 bb_reg_backup[8] = {0};
3591 const s16 *power;
3592 u8 band;
3593 bool ok;
3594 u32 tmp;
3595 u8 j;
3596
3597 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3598 "======> %s channel=%d path=%d\n", __func__, channel,
3599 path);
3600
3601 if (tssi_info->check_backup_aligmk[path][ch_idx]) {
3602 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3603 tssi_info->alignment_backup_by_ch[path][ch_idx][0]);
3604 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3605 tssi_info->alignment_backup_by_ch[path][ch_idx][1]);
3606 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3607 tssi_info->alignment_backup_by_ch[path][ch_idx][2]);
3608 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3609 tssi_info->alignment_backup_by_ch[path][ch_idx][3]);
3610
3611 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3612 "======> %s Reload TSSI Alignment !!!\n", __func__);
3613 _tssi_alimentk_dump_result(rtwdev, path);
3614 return;
3615 }
3616
3617 start_time = ktime_get_ns();
3618
3619 if (chan->band_type == RTW89_BAND_2G)
3620 power = power_2g;
3621 else
3622 power = power_5g;
3623
3624 if (channel >= 1 && channel <= 14)
3625 band = TSSI_ALIMK_2G;
3626 else if (channel >= 36 && channel <= 64)
3627 band = TSSI_ALIMK_5GL;
3628 else if (channel >= 100 && channel <= 144)
3629 band = TSSI_ALIMK_5GM;
3630 else if (channel >= 149 && channel <= 177)
3631 band = TSSI_ALIMK_5GH;
3632 else
3633 band = TSSI_ALIMK_2G;
3634
3635 rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3636 _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3637
3638 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3639 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3640 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3641 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3642
3643 ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
3644 if (!ok)
3645 goto out;
3646
3647 for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3648 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3649 "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
3650 power[j], j, tssi_cw_rpt[j]);
3651 }
3652
3653 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3654 _tssi_cw_default_mask[1]);
3655 tssi_cw_default = sign_extend32(tmp, 8);
3656 tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3657 tssi_cw_rpt[1] + tssi_cw_default;
3658 aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3659
3660 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3661 _tssi_cw_default_mask[2]);
3662 tssi_cw_default = sign_extend32(tmp, 8);
3663 tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3664
3665 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3666 _tssi_cw_default_mask[3]);
3667 tssi_cw_default = sign_extend32(tmp, 8);
3668 tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3669
3670 if (path == RF_PATH_A) {
3671 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3672 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3673 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3674
3675 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3676 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3677
3678 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3679 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3680 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3681 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3682 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3683 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3684 } else {
3685 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3686 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3687 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3688
3689 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3690 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3691
3692 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3693 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3694 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3695 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3696 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3697 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3698 }
3699
3700 tssi_info->alignment_done[path][band] = true;
3701 tssi_info->alignment_value[path][band][0] =
3702 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3703 tssi_info->alignment_value[path][band][1] =
3704 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3705 tssi_info->alignment_value[path][band][2] =
3706 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3707 tssi_info->alignment_value[path][band][3] =
3708 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3709
3710 tssi_info->check_backup_aligmk[path][ch_idx] = true;
3711 tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3712 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3713 tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3714 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3715 tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3716 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3717 tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3718 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3719
3720 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3721 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3722 path, band, R_P0_TSSI_ALIM1 + (path << 13),
3723 tssi_info->alignment_value[path][band][0]);
3724 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3725 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3726 path, band, R_P0_TSSI_ALIM3 + (path << 13),
3727 tssi_info->alignment_value[path][band][1]);
3728 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3729 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3730 path, band, R_P0_TSSI_ALIM2 + (path << 13),
3731 tssi_info->alignment_value[path][band][2]);
3732 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3733 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3734 path, band, R_P0_TSSI_ALIM4 + (path << 13),
3735 tssi_info->alignment_value[path][band][3]);
3736
3737out:
3738 _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3739 rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3740 rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0);
3741
3742 finish_time = ktime_get_ns();
3743 tssi_info->tssi_alimk_time += finish_time - start_time;
3744
3745 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3746 "[TSSI PA K] %s processing time = %d ms\n", __func__,
3747 tssi_info->tssi_alimk_time);
3748}
3749
3750void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
3751{
3752 _set_dpd_backoff(rtwdev, RTW89_PHY_0);
3753}
3754
3755void rtw8852b_rck(struct rtw89_dev *rtwdev)
3756{
3757 u8 path;
3758
3759 for (path = 0; path < RF_PATH_NUM_8852B; path++)
3760 _rck(rtwdev, path);
3761}
3762
3763void rtw8852b_dack(struct rtw89_dev *rtwdev)
3764{
3765 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3766
3767 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3768 _dac_cal(rtwdev, false);
3769 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3770}
3771
3772void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3773{
3774 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3775 u32 tx_en;
3776
3777 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3778 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3779 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3780
3781 _iqk_init(rtwdev);
3782 _iqk(rtwdev, phy_idx, false);
3783
3784 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3785 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3786}
3787
3788void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3789{
3790 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3791 u32 tx_en;
3792
3793 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3794 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3795 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3796
3797 _rx_dck(rtwdev, phy_idx);
3798
3799 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3800 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3801}
3802
3803void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3804{
3805 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3806 u32 tx_en;
3807
3808 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3809 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3810 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3811
3812 rtwdev->dpk.is_dpk_enable = true;
3813 rtwdev->dpk.is_dpk_reload_en = false;
3814 _dpk(rtwdev, phy_idx, false);
3815
3816 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3817 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3818}
3819
3820void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
3821{
3822 _dpk_track(rtwdev);
3823}
3824
3825void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
3826{
3827 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
3828 u32 tx_en;
3829 u8 i;
3830
3831 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3832 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3833
3834 _tssi_disable(rtwdev, phy);
3835
3836 for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3837 _tssi_rf_setting(rtwdev, phy, i);
3838 _tssi_set_sys(rtwdev, phy, i);
3839 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3840 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3841 _tssi_set_dck(rtwdev, phy, i);
3842 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3843 _tssi_set_dac_gain_tbl(rtwdev, phy, i);
3844 _tssi_slope_cal_org(rtwdev, phy, i);
3845 _tssi_alignment_default(rtwdev, phy, i, true);
3846 _tssi_set_tssi_slope(rtwdev, phy, i);
3847
3848 rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3849 _tmac_tx_pause(rtwdev, phy, true);
3850 if (hwtx_en)
3851 _tssi_alimentk(rtwdev, phy, i);
3852 _tmac_tx_pause(rtwdev, phy, false);
3853 rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3854 }
3855
3856 _tssi_enable(rtwdev, phy);
3857 _tssi_set_efuse_to_de(rtwdev, phy);
3858
3859 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3860}
3861
3862void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3863{
3864 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3865 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3866 u8 channel = chan->channel;
3867 u8 band;
3868 u32 i;
3869
3870 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3871 "======>%s phy=%d channel=%d\n", __func__, phy, channel);
3872
3873 if (channel >= 1 && channel <= 14)
3874 band = TSSI_ALIMK_2G;
3875 else if (channel >= 36 && channel <= 64)
3876 band = TSSI_ALIMK_5GL;
3877 else if (channel >= 100 && channel <= 144)
3878 band = TSSI_ALIMK_5GM;
3879 else if (channel >= 149 && channel <= 177)
3880 band = TSSI_ALIMK_5GH;
3881 else
3882 band = TSSI_ALIMK_2G;
3883
3884 _tssi_disable(rtwdev, phy);
3885
3886 for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
3887 _tssi_rf_setting(rtwdev, phy, i);
3888 _tssi_set_sys(rtwdev, phy, i);
3889 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3890
3891 if (tssi_info->alignment_done[i][band])
3892 _tssi_alimentk_done(rtwdev, phy, i);
3893 else
3894 _tssi_alignment_default(rtwdev, phy, i, true);
3895 }
3896
3897 _tssi_enable(rtwdev, phy);
3898 _tssi_set_efuse_to_de(rtwdev, phy);
3899}
3900
3901static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
3902 enum rtw89_phy_idx phy, bool enable)
3903{
3904 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3905 u8 channel = chan->channel;
3906
3907 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
3908 __func__, channel);
3909
3910 if (enable) {
3911 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3912 rtw8852b_tssi(rtwdev, phy, true);
3913 return;
3914 }
3915
3916 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3917 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3918 __func__,
3919 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3920 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3921
3922 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
3923 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
3924 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3925 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3926 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3927 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3928
3929 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
3930 _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
3931
3932 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3933 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3934 __func__,
3935 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3936 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3937
3938 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3939 "======> %s SCAN_END\n", __func__);
3940}
3941
3942void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
3943 enum rtw89_phy_idx phy_idx)
3944{
3945 if (scan_start)
3946 rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
3947 else
3948 rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
3949}
3950
3951static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3952 enum rtw89_bandwidth bw, bool dav)
3953{
3954 u32 rf_reg18;
3955 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
3956
3957 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
3958
3959 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
3960 if (rf_reg18 == INV_RF_DATA) {
3961 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3962 "[RFK]Invalid RF_0x18 for Path-%d\n", path);
3963 return;
3964 }
3965 rf_reg18 &= ~RR_CFGCH_BW;
3966
3967 switch (bw) {
3968 case RTW89_CHANNEL_WIDTH_5:
3969 case RTW89_CHANNEL_WIDTH_10:
3970 case RTW89_CHANNEL_WIDTH_20:
3971 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3972 break;
3973 case RTW89_CHANNEL_WIDTH_40:
3974 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3975 break;
3976 case RTW89_CHANNEL_WIDTH_80:
3977 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3978 break;
3979 default:
3980 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
3981 }
3982
3983 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
3984 RR_CFGCH_BW2) & RFREG_MASK;
3985 rf_reg18 |= RR_CFGCH_BW2;
3986 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
3987
3988 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
3989 bw, path, reg18_addr,
3990 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
3991}
3992
3993static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3994 enum rtw89_bandwidth bw)
3995{
3996 _bw_setting(rtwdev, RF_PATH_A, bw, true);
3997 _bw_setting(rtwdev, RF_PATH_B, bw, true);
3998 _bw_setting(rtwdev, RF_PATH_A, bw, false);
3999 _bw_setting(rtwdev, RF_PATH_B, bw, false);
4000}
4001
4002static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
4003{
4004 u32 bak;
4005 u32 tmp;
4006 int ret;
4007
4008 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
4009 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
4010 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
4011
4012 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4013 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4014 if (ret)
4015 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
4016
4017 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
4018
4019 return !!ret;
4020}
4021
4022static void _lck_check(struct rtw89_dev *rtwdev)
4023{
4024 u32 tmp;
4025
4026 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4027 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
4028
4029 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
4030 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
4031 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
4032 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
4033 }
4034
4035 udelay(10);
4036
4037 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4038 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
4039
4040 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4041 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4042 _set_s0_arfc18(rtwdev, tmp);
4043 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4044 }
4045
4046 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4047 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
4048
4049 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
4050 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
4051 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
4052 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
4053
4054 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
4055 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
4056 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
4057 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
4058
4059 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4060 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4061 _set_s0_arfc18(rtwdev, tmp);
4062 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4063
4064 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
4065 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
4066 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
4067 }
4068}
4069
4070static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4071{
4072 bool timeout;
4073
4074 timeout = _set_s0_arfc18(rtwdev, val);
4075 if (!timeout)
4076 _lck_check(rtwdev);
4077}
4078
4079static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4080 u8 central_ch, bool dav)
4081{
4082 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4083 bool is_2g_ch = central_ch <= 14;
4084 u32 rf_reg18;
4085
4086 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4087
4088 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4089 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4090 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4091 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4092
4093 if (!is_2g_ch)
4094 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4095 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4096
4097 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4098 RR_CFGCH_BW2) & RFREG_MASK;
4099 rf_reg18 |= RR_CFGCH_BW2;
4100
4101 if (path == RF_PATH_A && dav)
4102 _set_ch(rtwdev, rf_reg18);
4103 else
4104 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4105
4106 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
4107 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
4108
4109 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4110 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4111 central_ch, path, reg18_addr,
4112 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4113}
4114
4115static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4116{
4117 _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
4118 _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
4119 _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
4120 _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
4121}
4122
4123static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4124 enum rtw89_rf_path path)
4125{
4126 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4127 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
4128
4129 if (bw == RTW89_CHANNEL_WIDTH_20)
4130 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
4131 else if (bw == RTW89_CHANNEL_WIDTH_40)
4132 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
4133 else if (bw == RTW89_CHANNEL_WIDTH_80)
4134 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
4135 else
4136 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
4137
4138 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
4139 rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
4140
4141 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4142}
4143
4144static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4145 enum rtw89_bandwidth bw)
4146{
4147 u8 kpath, path;
4148
4149 kpath = _kpath(rtwdev, phy);
4150
4151 for (path = 0; path < RF_PATH_NUM_8852B; path++) {
4152 if (!(kpath & BIT(path)))
4153 continue;
4154
4155 _set_rxbb_bw(rtwdev, bw, path);
4156 }
4157}
4158
4159static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4160 enum rtw89_phy_idx phy, u8 central_ch,
4161 enum rtw89_band band, enum rtw89_bandwidth bw)
4162{
4163 _ctrl_ch(rtwdev, central_ch);
4164 _ctrl_bw(rtwdev, phy, bw);
4165 _rxbb_bw(rtwdev, phy, bw);
4166}
4167
4168void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
4169 const struct rtw89_chan *chan,
4170 enum rtw89_phy_idx phy_idx)
4171{
4172 rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
4173 chan->band_width);
4174}