Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include <linux/delay.h>
5#include <linux/iopoll.h>
6#include "ice_common.h"
7#include "ice_ptp_hw.h"
8#include "ice_ptp_consts.h"
9#include "ice_cgu_regs.h"
10
11static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
12 DPLL_PIN_FREQUENCY_1PPS,
13 DPLL_PIN_FREQUENCY_10MHZ,
14};
15
16static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
17 DPLL_PIN_FREQUENCY_1PPS,
18};
19
20static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
21 DPLL_PIN_FREQUENCY_10MHZ,
22};
23
24static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
25 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
26 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
27 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
28 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
29 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
30 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
31 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
32 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
33 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
34 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
35 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
36 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
37};
38
39static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
40 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
41 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
42 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
43 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
44 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
45 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
46 { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
47 { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
48 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
49 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
50 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
51 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
52 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
53 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
54};
55
56static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
57 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
58 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
59 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
60 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
61 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
62 { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
63 { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT,
64 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
65 { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT,
66 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
67};
68
69static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
70 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
71 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
72 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
73 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
74 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
75 { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
76 { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
77 { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT,
78 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
79 { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT,
80 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
81};
82
83static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
84 { "NONE", SI_REF0P, 0, 0 },
85 { "NONE", SI_REF0N, 0, 0 },
86 { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
87 { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
88 { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
89 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
90 { "NONE", SI_REF2N, 0, 0 },
91 { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT,
92 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
93 { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT,
94 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
95};
96
97static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
98 { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
99 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
100 { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
101 { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT,
102 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
103 { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT,
104 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
105};
106
107static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
108 { "NONE", ZL_REF0P, 0, 0 },
109 { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT,
110 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
111 { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
112 { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
113 { "NONE", ZL_REF2P, 0, 0 },
114 { "NONE", ZL_REF2N, 0, 0 },
115 { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
116 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
117 { "NONE", ZL_REF3N, 0, 0 },
118 { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT,
119 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
120 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
121};
122
123static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
124 { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
125 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
126 { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
127 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
128 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
129 { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
130 { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
131 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
132 { "NONE", ZL_OUT5, 0, 0 },
133};
134
135/* Low level functions for interacting with and managing the device clock used
136 * for the Precision Time Protocol.
137 *
138 * The ice hardware represents the current time using three registers:
139 *
140 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
141 * +---------------+ +---------------+ +---------------+
142 * | 32 bits | | 32 bits | | 32 bits |
143 * +---------------+ +---------------+ +---------------+
144 *
145 * The registers are incremented every clock tick using a 40bit increment
146 * value defined over two registers:
147 *
148 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
149 * +---------------+ +---------------+
150 * | 8 bit s | | 32 bits |
151 * +---------------+ +---------------+
152 *
153 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
154 * registers every clock source tick. Depending on the specific device
155 * configuration, the clock source frequency could be one of a number of
156 * values.
157 *
158 * For E810 devices, the increment frequency is 812.5 MHz
159 *
160 * For E822 devices the clock can be derived from different sources, and the
161 * increment has an effective frequency of one of the following:
162 * - 823.4375 MHz
163 * - 783.36 MHz
164 * - 796.875 MHz
165 * - 816 MHz
166 * - 830.078125 MHz
167 * - 783.36 MHz
168 *
169 * The hardware captures timestamps in the PHY for incoming packets, and for
170 * outgoing packets on request. To support this, the PHY maintains a timer
171 * that matches the lower 64 bits of the global source timer.
172 *
173 * In order to ensure that the PHY timers and the source timer are equivalent,
174 * shadow registers are used to prepare the desired initial values. A special
175 * sync command is issued to trigger copying from the shadow registers into
176 * the appropriate source and PHY registers simultaneously.
177 *
178 * The driver supports devices which have different PHYs with subtly different
179 * mechanisms to program and control the timers. We divide the devices into
180 * families named after the first major device, E810 and similar devices, and
181 * E822 and similar devices.
182 *
183 * - E822 based devices have additional support for fine grained Vernier
184 * calibration which requires significant setup
185 * - The layout of timestamp data in the PHY register blocks is different
186 * - The way timer synchronization commands are issued is different.
187 *
188 * To support this, very low level functions have an e810 or e822 suffix
189 * indicating what type of device they work on. Higher level abstractions for
190 * tasks that can be done on both devices do not have the suffix and will
191 * correctly look up the appropriate low level function when running.
192 *
193 * Functions which only make sense on a single device family may not have
194 * a suitable generic implementation
195 */
196
197/**
198 * ice_get_ptp_src_clock_index - determine source clock index
199 * @hw: pointer to HW struct
200 *
201 * Determine the source clock index currently in use, based on device
202 * capabilities reported during initialization.
203 */
204u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
205{
206 return hw->func_caps.ts_func_info.tmr_index_assoc;
207}
208
209/**
210 * ice_ptp_read_src_incval - Read source timer increment value
211 * @hw: pointer to HW struct
212 *
213 * Read the increment value of the source timer and return it.
214 */
215static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
216{
217 u32 lo, hi;
218 u8 tmr_idx;
219
220 tmr_idx = ice_get_ptp_src_clock_index(hw);
221
222 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
223 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
224
225 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
226}
227
228/**
229 * ice_read_cgu_reg_e82x - Read a CGU register
230 * @hw: pointer to the HW struct
231 * @addr: Register address to read
232 * @val: storage for register value read
233 *
234 * Read the contents of a register of the Clock Generation Unit. Only
235 * applicable to E822 devices.
236 *
237 * Return: 0 on success, other error codes when failed to read from CGU
238 */
239static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
240{
241 struct ice_sbq_msg_input cgu_msg = {
242 .opcode = ice_sbq_msg_rd,
243 .dest_dev = cgu,
244 .msg_addr_low = addr
245 };
246 int err;
247
248 err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
249 if (err) {
250 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
251 addr, err);
252 return err;
253 }
254
255 *val = cgu_msg.data;
256
257 return 0;
258}
259
260/**
261 * ice_write_cgu_reg_e82x - Write a CGU register
262 * @hw: pointer to the HW struct
263 * @addr: Register address to write
264 * @val: value to write into the register
265 *
266 * Write the specified value to a register of the Clock Generation Unit. Only
267 * applicable to E822 devices.
268 *
269 * Return: 0 on success, other error codes when failed to write to CGU
270 */
271static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
272{
273 struct ice_sbq_msg_input cgu_msg = {
274 .opcode = ice_sbq_msg_wr,
275 .dest_dev = cgu,
276 .msg_addr_low = addr,
277 .data = val
278 };
279 int err;
280
281 err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
282 if (err) {
283 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
284 addr, err);
285 return err;
286 }
287
288 return err;
289}
290
291/**
292 * ice_clk_freq_str - Convert time_ref_freq to string
293 * @clk_freq: Clock frequency
294 *
295 * Return: specified TIME_REF clock frequency converted to a string
296 */
297static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
298{
299 switch (clk_freq) {
300 case ICE_TIME_REF_FREQ_25_000:
301 return "25 MHz";
302 case ICE_TIME_REF_FREQ_122_880:
303 return "122.88 MHz";
304 case ICE_TIME_REF_FREQ_125_000:
305 return "125 MHz";
306 case ICE_TIME_REF_FREQ_153_600:
307 return "153.6 MHz";
308 case ICE_TIME_REF_FREQ_156_250:
309 return "156.25 MHz";
310 case ICE_TIME_REF_FREQ_245_760:
311 return "245.76 MHz";
312 default:
313 return "Unknown";
314 }
315}
316
317/**
318 * ice_clk_src_str - Convert time_ref_src to string
319 * @clk_src: Clock source
320 *
321 * Return: specified clock source converted to its string name
322 */
323static const char *ice_clk_src_str(enum ice_clk_src clk_src)
324{
325 switch (clk_src) {
326 case ICE_CLK_SRC_TCXO:
327 return "TCXO";
328 case ICE_CLK_SRC_TIME_REF:
329 return "TIME_REF";
330 default:
331 return "Unknown";
332 }
333}
334
335/**
336 * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
337 * @hw: pointer to the HW struct
338 * @clk_freq: Clock frequency to program
339 * @clk_src: Clock source to select (TIME_REF, or TCXO)
340 *
341 * Configure the Clock Generation Unit with the desired clock frequency and
342 * time reference, enabling the PLL which drives the PTP hardware clock.
343 *
344 * Return:
345 * * %0 - success
346 * * %-EINVAL - input parameters are incorrect
347 * * %-EBUSY - failed to lock TS PLL
348 * * %other - CGU read/write failure
349 */
350static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
351 enum ice_time_ref_freq clk_freq,
352 enum ice_clk_src clk_src)
353{
354 union tspll_ro_bwm_lf bwm_lf;
355 union nac_cgu_dword19 dw19;
356 union nac_cgu_dword22 dw22;
357 union nac_cgu_dword24 dw24;
358 union nac_cgu_dword9 dw9;
359 int err;
360
361 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
362 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
363 clk_freq);
364 return -EINVAL;
365 }
366
367 if (clk_src >= NUM_ICE_CLK_SRC) {
368 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
369 clk_src);
370 return -EINVAL;
371 }
372
373 if (clk_src == ICE_CLK_SRC_TCXO &&
374 clk_freq != ICE_TIME_REF_FREQ_25_000) {
375 dev_warn(ice_hw_to_dev(hw),
376 "TCXO only supports 25 MHz frequency\n");
377 return -EINVAL;
378 }
379
380 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
381 if (err)
382 return err;
383
384 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
385 if (err)
386 return err;
387
388 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
389 if (err)
390 return err;
391
392 /* Log the current clock configuration */
393 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
394 dw24.ts_pll_enable ? "enabled" : "disabled",
395 ice_clk_src_str(dw24.time_ref_sel),
396 ice_clk_freq_str(dw9.time_ref_freq_sel),
397 bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
398
399 /* Disable the PLL before changing the clock source or frequency */
400 if (dw24.ts_pll_enable) {
401 dw24.ts_pll_enable = 0;
402
403 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
404 if (err)
405 return err;
406 }
407
408 /* Set the frequency */
409 dw9.time_ref_freq_sel = clk_freq;
410 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
411 if (err)
412 return err;
413
414 /* Configure the TS PLL feedback divisor */
415 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
416 if (err)
417 return err;
418
419 dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
420 dw19.tspll_ndivratio = 1;
421
422 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
423 if (err)
424 return err;
425
426 /* Configure the TS PLL post divisor */
427 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
428 if (err)
429 return err;
430
431 dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
432 dw22.time1588clk_sel_div2 = 0;
433
434 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
435 if (err)
436 return err;
437
438 /* Configure the TS PLL pre divisor and clock source */
439 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
440 if (err)
441 return err;
442
443 dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
444 dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
445 dw24.time_ref_sel = clk_src;
446
447 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
448 if (err)
449 return err;
450
451 /* Finally, enable the PLL */
452 dw24.ts_pll_enable = 1;
453
454 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
455 if (err)
456 return err;
457
458 /* Wait to verify if the PLL locks */
459 usleep_range(1000, 5000);
460
461 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
462 if (err)
463 return err;
464
465 if (!bwm_lf.plllock_true_lock_cri) {
466 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
467 return -EBUSY;
468 }
469
470 /* Log the current clock configuration */
471 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
472 dw24.ts_pll_enable ? "enabled" : "disabled",
473 ice_clk_src_str(dw24.time_ref_sel),
474 ice_clk_freq_str(dw9.time_ref_freq_sel),
475 bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
476
477 return 0;
478}
479
480/**
481 * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
482 * @hw: pointer to the HW struct
483 * @clk_freq: Clock frequency to program
484 * @clk_src: Clock source to select (TIME_REF, or TCXO)
485 *
486 * Configure the Clock Generation Unit with the desired clock frequency and
487 * time reference, enabling the PLL which drives the PTP hardware clock.
488 *
489 * Return:
490 * * %0 - success
491 * * %-EINVAL - input parameters are incorrect
492 * * %-EBUSY - failed to lock TS PLL
493 * * %other - CGU read/write failure
494 */
495static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
496 enum ice_time_ref_freq clk_freq,
497 enum ice_clk_src clk_src)
498{
499 union tspll_ro_lock_e825c ro_lock;
500 union nac_cgu_dword16_e825c dw16;
501 union nac_cgu_dword23_e825c dw23;
502 union nac_cgu_dword19 dw19;
503 union nac_cgu_dword22 dw22;
504 union nac_cgu_dword24 dw24;
505 union nac_cgu_dword9 dw9;
506 int err;
507
508 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
509 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
510 clk_freq);
511 return -EINVAL;
512 }
513
514 if (clk_src >= NUM_ICE_CLK_SRC) {
515 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
516 clk_src);
517 return -EINVAL;
518 }
519
520 if (clk_src == ICE_CLK_SRC_TCXO &&
521 clk_freq != ICE_TIME_REF_FREQ_156_250) {
522 dev_warn(ice_hw_to_dev(hw),
523 "TCXO only supports 156.25 MHz frequency\n");
524 return -EINVAL;
525 }
526
527 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
528 if (err)
529 return err;
530
531 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
532 if (err)
533 return err;
534
535 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
536 if (err)
537 return err;
538
539 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
540 if (err)
541 return err;
542
543 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
544 if (err)
545 return err;
546
547 /* Log the current clock configuration */
548 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
549 dw24.ts_pll_enable ? "enabled" : "disabled",
550 ice_clk_src_str(dw23.time_ref_sel),
551 ice_clk_freq_str(dw9.time_ref_freq_sel),
552 ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
553
554 /* Disable the PLL before changing the clock source or frequency */
555 if (dw23.ts_pll_enable) {
556 dw23.ts_pll_enable = 0;
557
558 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
559 dw23.val);
560 if (err)
561 return err;
562 }
563
564 /* Set the frequency */
565 dw9.time_ref_freq_sel = clk_freq;
566
567 /* Enable the correct receiver */
568 if (clk_src == ICE_CLK_SRC_TCXO) {
569 dw9.time_ref_en = 0;
570 dw9.clk_eref0_en = 1;
571 } else {
572 dw9.time_ref_en = 1;
573 dw9.clk_eref0_en = 0;
574 }
575 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
576 if (err)
577 return err;
578
579 /* Choose the referenced frequency */
580 dw16.tspll_ck_refclkfreq =
581 e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
582 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
583 if (err)
584 return err;
585
586 /* Configure the TS PLL feedback divisor */
587 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
588 if (err)
589 return err;
590
591 dw19.tspll_fbdiv_intgr =
592 e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
593 dw19.tspll_ndivratio =
594 e825c_cgu_params[clk_freq].tspll_ndivratio;
595
596 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
597 if (err)
598 return err;
599
600 /* Configure the TS PLL post divisor */
601 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
602 if (err)
603 return err;
604
605 /* These two are constant for E825C */
606 dw22.time1588clk_div = 5;
607 dw22.time1588clk_sel_div2 = 0;
608
609 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
610 if (err)
611 return err;
612
613 /* Configure the TS PLL pre divisor and clock source */
614 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
615 if (err)
616 return err;
617
618 dw23.ref1588_ck_div =
619 e825c_cgu_params[clk_freq].ref1588_ck_div;
620 dw23.time_ref_sel = clk_src;
621
622 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
623 if (err)
624 return err;
625
626 dw24.tspll_fbdiv_frac =
627 e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
628
629 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
630 if (err)
631 return err;
632
633 /* Finally, enable the PLL */
634 dw23.ts_pll_enable = 1;
635
636 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
637 if (err)
638 return err;
639
640 /* Wait to verify if the PLL locks */
641 usleep_range(1000, 5000);
642
643 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
644 if (err)
645 return err;
646
647 if (!ro_lock.plllock_true_lock_cri) {
648 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
649 return -EBUSY;
650 }
651
652 /* Log the current clock configuration */
653 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
654 dw24.ts_pll_enable ? "enabled" : "disabled",
655 ice_clk_src_str(dw23.time_ref_sel),
656 ice_clk_freq_str(dw9.time_ref_freq_sel),
657 ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
658
659 return 0;
660}
661
662#define ICE_ONE_PPS_OUT_AMP_MAX 3
663
664/**
665 * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU
666 * @hw: pointer to the HW struct
667 * @enable: true to enable 1PPS output, false to disable it
668 *
669 * Return: 0 on success, other negative error code when CGU read/write failed
670 */
671int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable)
672{
673 union nac_cgu_dword9 dw9;
674 int err;
675
676 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
677 if (err)
678 return err;
679
680 dw9.one_pps_out_en = enable;
681 dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
682 return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
683}
684
685/**
686 * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
687 * @hw: pointer to the HW struct
688 *
689 * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
690 * losing TS PLL lock, but always show current state.
691 *
692 * Return: 0 on success, other error codes when failed to read/write CGU
693 */
694static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
695{
696 union tspll_cntr_bist_settings cntr_bist;
697 int err;
698
699 err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
700 &cntr_bist.val);
701 if (err)
702 return err;
703
704 /* Disable sticky lock detection so lock err reported is accurate */
705 cntr_bist.i_plllock_sel_0 = 0;
706 cntr_bist.i_plllock_sel_1 = 0;
707
708 return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
709 cntr_bist.val);
710}
711
712/**
713 * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
714 * @hw: pointer to the HW struct
715 *
716 * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
717 * losing TS PLL lock, but always show current state.
718 *
719 * Return: 0 on success, other error codes when failed to read/write CGU
720 */
721static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
722{
723 union tspll_bw_tdc_e825c bw_tdc;
724 int err;
725
726 err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
727 if (err)
728 return err;
729
730 bw_tdc.i_plllock_sel_1_0 = 0;
731
732 return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
733}
734
735/**
736 * ice_init_cgu_e82x - Initialize CGU with settings from firmware
737 * @hw: pointer to the HW structure
738 *
739 * Initialize the Clock Generation Unit of the E822 device.
740 *
741 * Return: 0 on success, other error codes when failed to read/write/cfg CGU
742 */
743static int ice_init_cgu_e82x(struct ice_hw *hw)
744{
745 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
746 int err;
747
748 /* Disable sticky lock detection so lock err reported is accurate */
749 if (ice_is_e825c(hw))
750 err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
751 else
752 err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
753 if (err)
754 return err;
755
756 /* Configure the CGU PLL using the parameters from the function
757 * capabilities.
758 */
759 if (ice_is_e825c(hw))
760 err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
761 (enum ice_clk_src)ts_info->clk_src);
762 else
763 err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
764 (enum ice_clk_src)ts_info->clk_src);
765
766 return err;
767}
768
769/**
770 * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
771 * @hw: pointer to HW struct
772 * @cmd: Timer command
773 *
774 * Return: the source timer command register value for the given PTP timer
775 * command.
776 */
777static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw,
778 enum ice_ptp_tmr_cmd cmd)
779{
780 u32 cmd_val, tmr_idx;
781
782 switch (cmd) {
783 case ICE_PTP_INIT_TIME:
784 cmd_val = GLTSYN_CMD_INIT_TIME;
785 break;
786 case ICE_PTP_INIT_INCVAL:
787 cmd_val = GLTSYN_CMD_INIT_INCVAL;
788 break;
789 case ICE_PTP_ADJ_TIME:
790 cmd_val = GLTSYN_CMD_ADJ_TIME;
791 break;
792 case ICE_PTP_ADJ_TIME_AT_TIME:
793 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
794 break;
795 case ICE_PTP_NOP:
796 case ICE_PTP_READ_TIME:
797 cmd_val = GLTSYN_CMD_READ_TIME;
798 break;
799 default:
800 dev_warn(ice_hw_to_dev(hw),
801 "Ignoring unrecognized timer command %u\n", cmd);
802 cmd_val = 0;
803 }
804
805 tmr_idx = ice_get_ptp_src_clock_index(hw);
806
807 return tmr_idx << SEL_CPK_SRC | cmd_val;
808}
809
810/**
811 * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value
812 * @hw: pointer to HW struct
813 * @cmd: Timer command
814 *
815 * Note that some hardware families use a different command register value for
816 * the PHY ports, while other hardware families use the same register values
817 * as the source timer.
818 *
819 * Return: the PHY port timer command register value for the given PTP timer
820 * command.
821 */
822static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
823 enum ice_ptp_tmr_cmd cmd)
824{
825 u32 cmd_val, tmr_idx;
826
827 /* Certain hardware families share the same register values for the
828 * port register and source timer register.
829 */
830 switch (ice_get_phy_model(hw)) {
831 case ICE_PHY_E810:
832 return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
833 default:
834 break;
835 }
836
837 switch (cmd) {
838 case ICE_PTP_INIT_TIME:
839 cmd_val = PHY_CMD_INIT_TIME;
840 break;
841 case ICE_PTP_INIT_INCVAL:
842 cmd_val = PHY_CMD_INIT_INCVAL;
843 break;
844 case ICE_PTP_ADJ_TIME:
845 cmd_val = PHY_CMD_ADJ_TIME;
846 break;
847 case ICE_PTP_ADJ_TIME_AT_TIME:
848 cmd_val = PHY_CMD_ADJ_TIME_AT_TIME;
849 break;
850 case ICE_PTP_READ_TIME:
851 cmd_val = PHY_CMD_READ_TIME;
852 break;
853 case ICE_PTP_NOP:
854 cmd_val = 0;
855 break;
856 default:
857 dev_warn(ice_hw_to_dev(hw),
858 "Ignoring unrecognized timer command %u\n", cmd);
859 cmd_val = 0;
860 }
861
862 tmr_idx = ice_get_ptp_src_clock_index(hw);
863
864 return tmr_idx << SEL_PHY_SRC | cmd_val;
865}
866
867/**
868 * ice_ptp_src_cmd - Prepare source timer for a timer command
869 * @hw: pointer to HW structure
870 * @cmd: Timer command
871 *
872 * Prepare the source timer for an upcoming timer sync command.
873 */
874void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
875{
876 u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
877
878 wr32(hw, GLTSYN_CMD, cmd_val);
879}
880
881/**
882 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
883 * @hw: pointer to HW struct
884 *
885 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
886 * write immediately. This triggers the hardware to begin executing all of the
887 * source and PHY timer commands synchronously.
888 */
889static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
890{
891 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
892
893 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
894 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
895 ice_flush(hw);
896}
897
898/* 56G PHY device functions
899 *
900 * The following functions operate on devices with the ETH 56G PHY.
901 */
902
903/**
904 * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
905 * @hw: pointer to the HW struct
906 * @port: destination port
907 *
908 * Return: destination sideband queue PHY device.
909 */
910static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
911 u8 port)
912{
913 /* On a single complex E825, PHY 0 is always destination device phy_0
914 * and PHY 1 is phy_0_peer.
915 */
916 if (port >= hw->ptp.ports_per_phy)
917 return eth56g_phy_1;
918 else
919 return eth56g_phy_0;
920}
921
922/**
923 * ice_write_phy_eth56g - Write a PHY port register
924 * @hw: pointer to the HW struct
925 * @port: destination port
926 * @addr: PHY register address
927 * @val: Value to write
928 *
929 * Return: 0 on success, other error codes when failed to write to PHY
930 */
931static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
932{
933 struct ice_sbq_msg_input msg = {
934 .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
935 .opcode = ice_sbq_msg_wr,
936 .msg_addr_low = lower_16_bits(addr),
937 .msg_addr_high = upper_16_bits(addr),
938 .data = val
939 };
940 int err;
941
942 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
943 if (err)
944 ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
945 err);
946
947 return err;
948}
949
950/**
951 * ice_read_phy_eth56g - Read a PHY port register
952 * @hw: pointer to the HW struct
953 * @port: destination port
954 * @addr: PHY register address
955 * @val: Value to write
956 *
957 * Return: 0 on success, other error codes when failed to read from PHY
958 */
959static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
960{
961 struct ice_sbq_msg_input msg = {
962 .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
963 .opcode = ice_sbq_msg_rd,
964 .msg_addr_low = lower_16_bits(addr),
965 .msg_addr_high = upper_16_bits(addr)
966 };
967 int err;
968
969 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
970 if (err)
971 ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
972 err);
973 else
974 *val = msg.data;
975
976 return err;
977}
978
979/**
980 * ice_phy_res_address_eth56g - Calculate a PHY port register address
981 * @hw: pointer to the HW struct
982 * @lane: Lane number to be written
983 * @res_type: resource type (register/memory)
984 * @offset: Offset from PHY port register base
985 * @addr: The result address
986 *
987 * Return:
988 * * %0 - success
989 * * %EINVAL - invalid port number or resource type
990 */
991static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
992 enum eth56g_res_type res_type,
993 u32 offset,
994 u32 *addr)
995{
996 if (res_type >= NUM_ETH56G_PHY_RES)
997 return -EINVAL;
998
999 /* Lanes 4..7 are in fact 0..3 on a second PHY */
1000 lane %= hw->ptp.ports_per_phy;
1001 *addr = eth56g_phy_res[res_type].base[0] +
1002 lane * eth56g_phy_res[res_type].step + offset;
1003
1004 return 0;
1005}
1006
1007/**
1008 * ice_write_port_eth56g - Write a PHY port register
1009 * @hw: pointer to the HW struct
1010 * @offset: PHY register offset
1011 * @port: Port number
1012 * @val: Value to write
1013 * @res_type: resource type (register/memory)
1014 *
1015 * Return:
1016 * * %0 - success
1017 * * %EINVAL - invalid port number or resource type
1018 * * %other - failed to write to PHY
1019 */
1020static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
1021 u32 val, enum eth56g_res_type res_type)
1022{
1023 u32 addr;
1024 int err;
1025
1026 if (port >= hw->ptp.num_lports)
1027 return -EINVAL;
1028
1029 err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
1030 if (err)
1031 return err;
1032
1033 return ice_write_phy_eth56g(hw, port, addr, val);
1034}
1035
1036/**
1037 * ice_read_port_eth56g - Read a PHY port register
1038 * @hw: pointer to the HW struct
1039 * @offset: PHY register offset
1040 * @port: Port number
1041 * @val: Value to write
1042 * @res_type: resource type (register/memory)
1043 *
1044 * Return:
1045 * * %0 - success
1046 * * %EINVAL - invalid port number or resource type
1047 * * %other - failed to read from PHY
1048 */
1049static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
1050 u32 *val, enum eth56g_res_type res_type)
1051{
1052 u32 addr;
1053 int err;
1054
1055 if (port >= hw->ptp.num_lports)
1056 return -EINVAL;
1057
1058 err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
1059 if (err)
1060 return err;
1061
1062 return ice_read_phy_eth56g(hw, port, addr, val);
1063}
1064
1065/**
1066 * ice_write_ptp_reg_eth56g - Write a PHY port register
1067 * @hw: pointer to the HW struct
1068 * @port: Port number to be written
1069 * @offset: Offset from PHY port register base
1070 * @val: Value to write
1071 *
1072 * Return:
1073 * * %0 - success
1074 * * %EINVAL - invalid port number or resource type
1075 * * %other - failed to write to PHY
1076 */
1077static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
1078 u32 val)
1079{
1080 return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
1081}
1082
1083/**
1084 * ice_write_mac_reg_eth56g - Write a MAC PHY port register
1085 * parameter
1086 * @hw: pointer to the HW struct
1087 * @port: Port number to be written
1088 * @offset: Offset from PHY port register base
1089 * @val: Value to write
1090 *
1091 * Return:
1092 * * %0 - success
1093 * * %EINVAL - invalid port number or resource type
1094 * * %other - failed to write to PHY
1095 */
1096static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
1097 u32 val)
1098{
1099 return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
1100}
1101
1102/**
1103 * ice_write_xpcs_reg_eth56g - Write a PHY port register
1104 * @hw: pointer to the HW struct
1105 * @port: Port number to be written
1106 * @offset: Offset from PHY port register base
1107 * @val: Value to write
1108 *
1109 * Return:
1110 * * %0 - success
1111 * * %EINVAL - invalid port number or resource type
1112 * * %other - failed to write to PHY
1113 */
1114static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
1115 u32 val)
1116{
1117 return ice_write_port_eth56g(hw, port, offset, val,
1118 ETH56G_PHY_REG_XPCS);
1119}
1120
1121/**
1122 * ice_read_ptp_reg_eth56g - Read a PHY port register
1123 * @hw: pointer to the HW struct
1124 * @port: Port number to be read
1125 * @offset: Offset from PHY port register base
1126 * @val: Pointer to the value to read (out param)
1127 *
1128 * Return:
1129 * * %0 - success
1130 * * %EINVAL - invalid port number or resource type
1131 * * %other - failed to read from PHY
1132 */
1133static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
1134 u32 *val)
1135{
1136 return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
1137}
1138
1139/**
1140 * ice_read_mac_reg_eth56g - Read a PHY port register
1141 * @hw: pointer to the HW struct
1142 * @port: Port number to be read
1143 * @offset: Offset from PHY port register base
1144 * @val: Pointer to the value to read (out param)
1145 *
1146 * Return:
1147 * * %0 - success
1148 * * %EINVAL - invalid port number or resource type
1149 * * %other - failed to read from PHY
1150 */
1151static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
1152 u32 *val)
1153{
1154 return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
1155}
1156
1157/**
1158 * ice_read_gpcs_reg_eth56g - Read a PHY port register
1159 * @hw: pointer to the HW struct
1160 * @port: Port number to be read
1161 * @offset: Offset from PHY port register base
1162 * @val: Pointer to the value to read (out param)
1163 *
1164 * Return:
1165 * * %0 - success
1166 * * %EINVAL - invalid port number or resource type
1167 * * %other - failed to read from PHY
1168 */
1169static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
1170 u32 *val)
1171{
1172 return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS);
1173}
1174
1175/**
1176 * ice_read_port_mem_eth56g - Read a PHY port memory location
1177 * @hw: pointer to the HW struct
1178 * @port: Port number to be read
1179 * @offset: Offset from PHY port register base
1180 * @val: Pointer to the value to read (out param)
1181 *
1182 * Return:
1183 * * %0 - success
1184 * * %EINVAL - invalid port number or resource type
1185 * * %other - failed to read from PHY
1186 */
1187static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
1188 u32 *val)
1189{
1190 return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
1191}
1192
1193/**
1194 * ice_write_port_mem_eth56g - Write a PHY port memory location
1195 * @hw: pointer to the HW struct
1196 * @port: Port number to be read
1197 * @offset: Offset from PHY port register base
1198 * @val: Pointer to the value to read (out param)
1199 *
1200 * Return:
1201 * * %0 - success
1202 * * %EINVAL - invalid port number or resource type
1203 * * %other - failed to write to PHY
1204 */
1205static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
1206 u32 val)
1207{
1208 return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
1209}
1210
1211/**
1212 * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
1213 * @hw: pointer to the HW struct
1214 * @offset: PHY register offset
1215 * @port: Port number
1216 * @val: Value to write
1217 *
1218 * Return:
1219 * * %0 - success
1220 * * %EIO - invalid port number or resource type
1221 * * %other - failed to write to PHY
1222 */
1223static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
1224 u32 offset, u32 val)
1225{
1226 u32 addr;
1227
1228 if (port >= hw->ptp.num_lports)
1229 return -EIO;
1230
1231 addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
1232
1233 return ice_write_phy_eth56g(hw, port, addr, val);
1234}
1235
1236/**
1237 * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
1238 * @hw: pointer to the HW struct
1239 * @offset: PHY register offset
1240 * @port: Port number
1241 * @val: Value to read
1242 *
1243 * Return:
1244 * * %0 - success
1245 * * %EIO - invalid port number or resource type
1246 * * %other - failed to read from PHY
1247 */
1248static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
1249 u32 offset, u32 *val)
1250{
1251 u32 addr;
1252
1253 if (port >= hw->ptp.num_lports)
1254 return -EIO;
1255
1256 addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
1257
1258 return ice_read_phy_eth56g(hw, port, addr, val);
1259}
1260
1261/**
1262 * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
1263 * @low_addr: the low address to check
1264 * @high_addr: on return, contains the high address of the 64bit register
1265 *
1266 * Write the appropriate high register offset to use.
1267 *
1268 * Return: true if the provided low address is one of the known 64bit PHY values
1269 * represented as two 32bit registers, false otherwise.
1270 */
1271static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
1272{
1273 switch (low_addr) {
1274 case PHY_REG_TX_TIMER_INC_PRE_L:
1275 *high_addr = PHY_REG_TX_TIMER_INC_PRE_U;
1276 return true;
1277 case PHY_REG_RX_TIMER_INC_PRE_L:
1278 *high_addr = PHY_REG_RX_TIMER_INC_PRE_U;
1279 return true;
1280 case PHY_REG_TX_CAPTURE_L:
1281 *high_addr = PHY_REG_TX_CAPTURE_U;
1282 return true;
1283 case PHY_REG_RX_CAPTURE_L:
1284 *high_addr = PHY_REG_RX_CAPTURE_U;
1285 return true;
1286 case PHY_REG_TOTAL_TX_OFFSET_L:
1287 *high_addr = PHY_REG_TOTAL_TX_OFFSET_U;
1288 return true;
1289 case PHY_REG_TOTAL_RX_OFFSET_L:
1290 *high_addr = PHY_REG_TOTAL_RX_OFFSET_U;
1291 return true;
1292 case PHY_REG_TX_MEMORY_STATUS_L:
1293 *high_addr = PHY_REG_TX_MEMORY_STATUS_U;
1294 return true;
1295 default:
1296 return false;
1297 }
1298}
1299
1300/**
1301 * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register
1302 * @low_addr: the low address to check
1303 * @high_addr: on return, contains the high address of the 40bit value
1304 *
1305 * Write the appropriate high register offset to use.
1306 *
1307 * Return: true if the provided low address is one of the known 40bit PHY
1308 * values split into two registers with the lower 8 bits in the low register and
1309 * the upper 32 bits in the high register, false otherwise.
1310 */
1311static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
1312{
1313 switch (low_addr) {
1314 case PHY_REG_TIMETUS_L:
1315 *high_addr = PHY_REG_TIMETUS_U;
1316 return true;
1317 case PHY_PCS_REF_TUS_L:
1318 *high_addr = PHY_PCS_REF_TUS_U;
1319 return true;
1320 case PHY_PCS_REF_INC_L:
1321 *high_addr = PHY_PCS_REF_INC_U;
1322 return true;
1323 default:
1324 return false;
1325 }
1326}
1327
1328/**
1329 * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers
1330 * @hw: pointer to the HW struct
1331 * @port: PHY port to read from
1332 * @low_addr: offset of the lower register to read from
1333 * @val: on return, the contents of the 64bit value from the PHY registers
1334 * @res_type: resource type
1335 *
1336 * Check if the caller has specified a known 40 bit register offset and read
1337 * the two registers associated with a 40bit value and return it in the val
1338 * pointer.
1339 *
1340 * Return:
1341 * * %0 - success
1342 * * %EINVAL - not a 64 bit register
1343 * * %other - failed to read from PHY
1344 */
1345static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
1346 u64 *val, enum eth56g_res_type res_type)
1347{
1348 u16 high_addr;
1349 u32 lo, hi;
1350 int err;
1351
1352 if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
1353 return -EINVAL;
1354
1355 err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type);
1356 if (err) {
1357 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d",
1358 low_addr, err);
1359 return err;
1360 }
1361
1362 err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type);
1363 if (err) {
1364 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d",
1365 high_addr, err);
1366 return err;
1367 }
1368
1369 *val = ((u64)hi << 32) | lo;
1370
1371 return 0;
1372}
1373
1374/**
1375 * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers
1376 * @hw: pointer to the HW struct
1377 * @port: PHY port to read from
1378 * @low_addr: offset of the lower register to read from
1379 * @val: on return, the contents of the 64bit value from the PHY registers
1380 *
1381 * Check if the caller has specified a known 40 bit register offset and read
1382 * the two registers associated with a 40bit value and return it in the val
1383 * pointer.
1384 *
1385 * Return:
1386 * * %0 - success
1387 * * %EINVAL - not a 64 bit register
1388 * * %other - failed to read from PHY
1389 */
1390static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
1391 u64 *val)
1392{
1393 return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val,
1394 ETH56G_PHY_REG_PTP);
1395}
1396
1397/**
1398 * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY
1399 * @hw: pointer to the HW struct
1400 * @port: port to write to
1401 * @low_addr: offset of the low register
1402 * @val: 40b value to write
1403 * @res_type: resource type
1404 *
1405 * Check if the caller has specified a known 40 bit register offset and write
1406 * provided 40b value to the two associated registers by splitting it up into
1407 * two chunks, the lower 8 bits and the upper 32 bits.
1408 *
1409 * Return:
1410 * * %0 - success
1411 * * %EINVAL - not a 40 bit register
1412 * * %other - failed to write to PHY
1413 */
1414static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
1415 u16 low_addr, u64 val,
1416 enum eth56g_res_type res_type)
1417{
1418 u16 high_addr;
1419 u32 lo, hi;
1420 int err;
1421
1422 if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr))
1423 return -EINVAL;
1424
1425 lo = FIELD_GET(P_REG_40B_LOW_M, val);
1426 hi = (u32)(val >> P_REG_40B_HIGH_S);
1427
1428 err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
1429 if (err) {
1430 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
1431 low_addr, err);
1432 return err;
1433 }
1434
1435 err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
1436 if (err) {
1437 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
1438 high_addr, err);
1439 return err;
1440 }
1441
1442 return 0;
1443}
1444
1445/**
1446 * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY
1447 * @hw: pointer to the HW struct
1448 * @port: port to write to
1449 * @low_addr: offset of the low register
1450 * @val: 40b value to write
1451 *
1452 * Check if the caller has specified a known 40 bit register offset and write
1453 * provided 40b value to the two associated registers by splitting it up into
1454 * two chunks, the lower 8 bits and the upper 32 bits.
1455 *
1456 * Return:
1457 * * %0 - success
1458 * * %EINVAL - not a 40 bit register
1459 * * %other - failed to write to PHY
1460 */
1461static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
1462 u16 low_addr, u64 val)
1463{
1464 return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val,
1465 ETH56G_PHY_REG_PTP);
1466}
1467
1468/**
1469 * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers
1470 * @hw: pointer to the HW struct
1471 * @port: PHY port to read from
1472 * @low_addr: offset of the lower register to read from
1473 * @val: the contents of the 64bit value to write to PHY
1474 * @res_type: resource type
1475 *
1476 * Check if the caller has specified a known 64 bit register offset and write
1477 * the 64bit value to the two associated 32bit PHY registers.
1478 *
1479 * Return:
1480 * * %0 - success
1481 * * %EINVAL - not a 64 bit register
1482 * * %other - failed to write to PHY
1483 */
1484static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
1485 u16 low_addr, u64 val,
1486 enum eth56g_res_type res_type)
1487{
1488 u16 high_addr;
1489 u32 lo, hi;
1490 int err;
1491
1492 if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
1493 return -EINVAL;
1494
1495 lo = lower_32_bits(val);
1496 hi = upper_32_bits(val);
1497
1498 err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
1499 if (err) {
1500 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
1501 low_addr, err);
1502 return err;
1503 }
1504
1505 err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
1506 if (err) {
1507 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
1508 high_addr, err);
1509 return err;
1510 }
1511
1512 return 0;
1513}
1514
1515/**
1516 * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers
1517 * @hw: pointer to the HW struct
1518 * @port: PHY port to read from
1519 * @low_addr: offset of the lower register to read from
1520 * @val: the contents of the 64bit value to write to PHY
1521 *
1522 * Check if the caller has specified a known 64 bit register offset and write
1523 * the 64bit value to the two associated 32bit PHY registers.
1524 *
1525 * Return:
1526 * * %0 - success
1527 * * %EINVAL - not a 64 bit register
1528 * * %other - failed to write to PHY
1529 */
1530static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
1531 u16 low_addr, u64 val)
1532{
1533 return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val,
1534 ETH56G_PHY_REG_PTP);
1535}
1536
1537/**
1538 * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory
1539 * @hw: pointer to the HW struct
1540 * @port: the port to read from
1541 * @idx: the timestamp index to read
1542 * @tstamp: on return, the 40bit timestamp value
1543 *
1544 * Read a 40bit timestamp value out of the two associated entries in the
1545 * port memory block of the internal PHYs of the 56G devices.
1546 *
1547 * Return:
1548 * * %0 - success
1549 * * %other - failed to read from PHY
1550 */
1551static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
1552 u64 *tstamp)
1553{
1554 u16 lo_addr, hi_addr;
1555 u32 lo, hi;
1556 int err;
1557
1558 lo_addr = (u16)PHY_TSTAMP_L(idx);
1559 hi_addr = (u16)PHY_TSTAMP_U(idx);
1560
1561 err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo);
1562 if (err) {
1563 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
1564 err);
1565 return err;
1566 }
1567
1568 err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi);
1569 if (err) {
1570 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
1571 err);
1572 return err;
1573 }
1574
1575 /* For 56G based internal PHYs, the timestamp is reported with the
1576 * lower 8 bits in the low register, and the upper 32 bits in the high
1577 * register.
1578 */
1579 *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
1580 FIELD_PREP(TS_PHY_LOW_M, lo);
1581
1582 return 0;
1583}
1584
1585/**
1586 * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block
1587 * @hw: pointer to the HW struct
1588 * @port: the quad to read from
1589 * @idx: the timestamp index to reset
1590 *
1591 * Read and then forcibly clear the timestamp index to ensure the valid bit is
1592 * cleared and the timestamp status bit is reset in the PHY port memory of
1593 * internal PHYs of the 56G devices.
1594 *
1595 * To directly clear the contents of the timestamp block entirely, discarding
1596 * all timestamp data at once, software should instead use
1597 * ice_ptp_reset_ts_memory_quad_eth56g().
1598 *
1599 * This function should only be called on an idx whose bit is set according to
1600 * ice_get_phy_tx_tstamp_ready().
1601 *
1602 * Return:
1603 * * %0 - success
1604 * * %other - failed to write to PHY
1605 */
1606static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx)
1607{
1608 u64 unused_tstamp;
1609 u16 lo_addr;
1610 int err;
1611
1612 /* Read the timestamp register to ensure the timestamp status bit is
1613 * cleared.
1614 */
1615 err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp);
1616 if (err) {
1617 ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n",
1618 port, idx, err);
1619 }
1620
1621 lo_addr = (u16)PHY_TSTAMP_L(idx);
1622
1623 err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0);
1624 if (err) {
1625 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n",
1626 port, idx, err);
1627 return err;
1628 }
1629
1630 return 0;
1631}
1632
1633/**
1634 * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block
1635 * @hw: pointer to the HW struct
1636 */
1637static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw)
1638{
1639 unsigned int port;
1640
1641 for (port = 0; port < hw->ptp.num_lports; port++) {
1642 ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
1643 0);
1644 ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U,
1645 0);
1646 }
1647}
1648
1649/**
1650 * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time
1651 * @hw: pointer to the HW struct
1652 * @port: port number
1653 * @time: time to initialize the PHY port clocks to
1654 *
1655 * Write a new initial time value into registers of a specific PHY port.
1656 *
1657 * Return:
1658 * * %0 - success
1659 * * %other - failed to write to PHY
1660 */
1661static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port,
1662 u64 time)
1663{
1664 int err;
1665
1666 /* Tx case */
1667 err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
1668 time);
1669 if (err)
1670 return err;
1671
1672 /* Rx case */
1673 return ice_write_64b_ptp_reg_eth56g(hw, port,
1674 PHY_REG_RX_TIMER_INC_PRE_L, time);
1675}
1676
1677/**
1678 * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time
1679 * @hw: pointer to the HW struct
1680 * @time: Time to initialize the PHY port clocks to
1681 *
1682 * Program the PHY port registers with a new initial time value. The port
1683 * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
1684 * command. The time value is the upper 32 bits of the PHY timer, usually in
1685 * units of nominal nanoseconds.
1686 *
1687 * Return:
1688 * * %0 - success
1689 * * %other - failed to write to PHY
1690 */
1691static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time)
1692{
1693 u64 phy_time;
1694 u8 port;
1695
1696 /* The time represents the upper 32 bits of the PHY timer, so we need
1697 * to shift to account for this when programming.
1698 */
1699 phy_time = (u64)time << 32;
1700
1701 for (port = 0; port < hw->ptp.num_lports; port++) {
1702 int err;
1703
1704 err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time);
1705 if (err) {
1706 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1707 port, err);
1708 return err;
1709 }
1710 }
1711
1712 return 0;
1713}
1714
1715/**
1716 * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
1717 * @hw: pointer to HW struct
1718 * @port: Port number to be programmed
1719 * @time: time in cycles to adjust the port clocks
1720 *
1721 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1722 * registers. The atomic adjustment won't be completed until the driver issues
1723 * an ICE_PTP_ADJ_TIME command.
1724 *
1725 * Note that time is not in units of nanoseconds. It is in clock time
1726 * including the lower sub-nanosecond portion of the port timer.
1727 *
1728 * Negative adjustments are supported using 2s complement arithmetic.
1729 *
1730 * Return:
1731 * * %0 - success
1732 * * %other - failed to write to PHY
1733 */
1734static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time)
1735{
1736 u32 l_time, u_time;
1737 int err;
1738
1739 l_time = lower_32_bits(time);
1740 u_time = upper_32_bits(time);
1741
1742 /* Tx case */
1743 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
1744 l_time);
1745 if (err)
1746 goto exit_err;
1747
1748 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U,
1749 u_time);
1750 if (err)
1751 goto exit_err;
1752
1753 /* Rx case */
1754 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L,
1755 l_time);
1756 if (err)
1757 goto exit_err;
1758
1759 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U,
1760 u_time);
1761 if (err)
1762 goto exit_err;
1763
1764 return 0;
1765
1766exit_err:
1767 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1768 port, err);
1769 return err;
1770}
1771
1772/**
1773 * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment
1774 * @hw: pointer to HW struct
1775 * @adj: adjustment in nanoseconds
1776 *
1777 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1778 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1779 * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
1780 *
1781 * Return:
1782 * * %0 - success
1783 * * %other - failed to write to PHY
1784 */
1785static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj)
1786{
1787 s64 cycles;
1788 u8 port;
1789
1790 /* The port clock supports adjustment of the sub-nanosecond portion of
1791 * the clock (lowest 32 bits). We shift the provided adjustment in
1792 * nanoseconds by 32 to calculate the appropriate adjustment to program
1793 * into the PHY ports.
1794 */
1795 cycles = (s64)adj << 32;
1796
1797 for (port = 0; port < hw->ptp.num_lports; port++) {
1798 int err;
1799
1800 err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles);
1801 if (err)
1802 return err;
1803 }
1804
1805 return 0;
1806}
1807
1808/**
1809 * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment
1810 * @hw: pointer to HW struct
1811 * @incval: new increment value to prepare
1812 *
1813 * Prepare each of the PHY ports for a new increment value by programming the
1814 * port's TIMETUS registers. The new increment value will be updated after
1815 * issuing an ICE_PTP_INIT_INCVAL command.
1816 *
1817 * Return:
1818 * * %0 - success
1819 * * %other - failed to write to PHY
1820 */
1821static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval)
1822{
1823 u8 port;
1824
1825 for (port = 0; port < hw->ptp.num_lports; port++) {
1826 int err;
1827
1828 err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
1829 incval);
1830 if (err) {
1831 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1832 port, err);
1833 return err;
1834 }
1835 }
1836
1837 return 0;
1838}
1839
1840/**
1841 * ice_ptp_read_port_capture_eth56g - Read a port's local time capture
1842 * @hw: pointer to HW struct
1843 * @port: Port number to read
1844 * @tx_ts: on return, the Tx port time capture
1845 * @rx_ts: on return, the Rx port time capture
1846 *
1847 * Read the port's Tx and Rx local time capture values.
1848 *
1849 * Return:
1850 * * %0 - success
1851 * * %other - failed to read from PHY
1852 */
1853static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port,
1854 u64 *tx_ts, u64 *rx_ts)
1855{
1856 int err;
1857
1858 /* Tx case */
1859 err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L,
1860 tx_ts);
1861 if (err) {
1862 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1863 err);
1864 return err;
1865 }
1866
1867 ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts);
1868
1869 /* Rx case */
1870 err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L,
1871 rx_ts);
1872 if (err) {
1873 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1874 err);
1875 return err;
1876 }
1877
1878 ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts);
1879
1880 return 0;
1881}
1882
1883/**
1884 * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command
1885 * @hw: pointer to HW struct
1886 * @port: Port to which cmd has to be sent
1887 * @cmd: Command to be sent to the port
1888 *
1889 * Prepare the requested port for an upcoming timer sync command.
1890 *
1891 * Return:
1892 * * %0 - success
1893 * * %other - failed to write to PHY
1894 */
1895static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port,
1896 enum ice_ptp_tmr_cmd cmd)
1897{
1898 u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
1899 int err;
1900
1901 /* Tx case */
1902 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val);
1903 if (err) {
1904 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1905 err);
1906 return err;
1907 }
1908
1909 /* Rx case */
1910 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val);
1911 if (err) {
1912 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1913 err);
1914 return err;
1915 }
1916
1917 return 0;
1918}
1919
1920/**
1921 * ice_phy_get_speed_eth56g - Get link speed based on PHY link type
1922 * @li: pointer to link information struct
1923 *
1924 * Return: simplified ETH56G PHY speed
1925 */
1926static enum ice_eth56g_link_spd
1927ice_phy_get_speed_eth56g(struct ice_link_status *li)
1928{
1929 u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low,
1930 li->phy_type_high);
1931
1932 switch (speed) {
1933 case ICE_AQ_LINK_SPEED_1000MB:
1934 return ICE_ETH56G_LNK_SPD_1G;
1935 case ICE_AQ_LINK_SPEED_2500MB:
1936 return ICE_ETH56G_LNK_SPD_2_5G;
1937 case ICE_AQ_LINK_SPEED_10GB:
1938 return ICE_ETH56G_LNK_SPD_10G;
1939 case ICE_AQ_LINK_SPEED_25GB:
1940 return ICE_ETH56G_LNK_SPD_25G;
1941 case ICE_AQ_LINK_SPEED_40GB:
1942 return ICE_ETH56G_LNK_SPD_40G;
1943 case ICE_AQ_LINK_SPEED_50GB:
1944 switch (li->phy_type_low) {
1945 case ICE_PHY_TYPE_LOW_50GBASE_SR:
1946 case ICE_PHY_TYPE_LOW_50GBASE_FR:
1947 case ICE_PHY_TYPE_LOW_50GBASE_LR:
1948 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
1949 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
1950 case ICE_PHY_TYPE_LOW_50G_AUI1:
1951 return ICE_ETH56G_LNK_SPD_50G;
1952 default:
1953 return ICE_ETH56G_LNK_SPD_50G2;
1954 }
1955 case ICE_AQ_LINK_SPEED_100GB:
1956 if (li->phy_type_high ||
1957 li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2)
1958 return ICE_ETH56G_LNK_SPD_100G2;
1959 else
1960 return ICE_ETH56G_LNK_SPD_100G;
1961 default:
1962 return ICE_ETH56G_LNK_SPD_1G;
1963 }
1964}
1965
1966/**
1967 * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle
1968 * @hw: pointer to the HW struct
1969 * @port: port to configure
1970 *
1971 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1972 * timestamp calibration process.
1973 *
1974 * Return:
1975 * * %0 - success
1976 * * %other - PHY read/write failed
1977 */
1978static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
1979{
1980 u32 val;
1981 int err;
1982
1983 err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH,
1984 ICE_ETH56G_NOMINAL_THRESH4);
1985 if (err) {
1986 ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d",
1987 err);
1988 return err;
1989 }
1990
1991 switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
1992 case ICE_ETH56G_LNK_SPD_1G:
1993 case ICE_ETH56G_LNK_SPD_2_5G:
1994 err = ice_read_quad_ptp_reg_eth56g(hw, port,
1995 PHY_GPCS_CONFIG_REG0, &val);
1996 if (err) {
1997 ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
1998 err);
1999 return err;
2000 }
2001
2002 val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M;
2003 val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
2004 ICE_ETH56G_NOMINAL_TX_THRESH);
2005
2006 err = ice_write_quad_ptp_reg_eth56g(hw, port,
2007 PHY_GPCS_CONFIG_REG0, val);
2008 if (err) {
2009 ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
2010 err);
2011 return err;
2012 }
2013 break;
2014 default:
2015 break;
2016 }
2017
2018 err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L,
2019 ICE_ETH56G_NOMINAL_PCS_REF_TUS);
2020 if (err) {
2021 ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d",
2022 err);
2023 return err;
2024 }
2025
2026 err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L,
2027 ICE_ETH56G_NOMINAL_PCS_REF_INC);
2028 if (err) {
2029 ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d",
2030 err);
2031 return err;
2032 }
2033
2034 return 0;
2035}
2036
2037/**
2038 * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings
2039 * @hw: Pointer to the HW struct
2040 * @port: Port to configure
2041 *
2042 * Return:
2043 * * %0 - success
2044 * * %other - PHY read/write failed
2045 */
2046int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
2047{
2048 u8 quad_lane = port % ICE_PORTS_PER_QUAD;
2049 u32 addr, val, peer_delay;
2050 bool enable, sfd_ena;
2051 int err;
2052
2053 enable = hw->ptp.phy.eth56g.onestep_ena;
2054 peer_delay = hw->ptp.phy.eth56g.peer_delay;
2055 sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
2056
2057 addr = PHY_PTP_1STEP_CONFIG;
2058 err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
2059 if (err)
2060 return err;
2061
2062 if (enable)
2063 val |= BIT(quad_lane);
2064 else
2065 val &= ~BIT(quad_lane);
2066
2067 val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
2068
2069 err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
2070 if (err)
2071 return err;
2072
2073 addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
2074 val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
2075 if (peer_delay)
2076 val |= PHY_PTP_1STEP_PD_ADD_PD_M;
2077 val |= PHY_PTP_1STEP_PD_DLY_V_M;
2078 err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
2079 if (err)
2080 return err;
2081
2082 val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
2083 err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
2084 if (err)
2085 return err;
2086
2087 addr = PHY_MAC_XIF_MODE;
2088 err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
2089 if (err)
2090 return err;
2091
2092 val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M |
2093 PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M);
2094
2095 switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
2096 case ICE_ETH56G_LNK_SPD_1G:
2097 case ICE_ETH56G_LNK_SPD_2_5G:
2098 val |= PHY_MAC_XIF_GMII_TS_SEL_M;
2099 break;
2100 default:
2101 break;
2102 }
2103
2104 val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) |
2105 FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
2106 FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
2107
2108 return ice_write_mac_reg_eth56g(hw, port, addr, val);
2109}
2110
2111/**
2112 * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values
2113 * @a: multiplier value
2114 * @b: multiplicand value
2115 *
2116 * Return: result of multiplication
2117 */
2118static u32 mul_u32_u32_fx_q9(u32 a, u32 b)
2119{
2120 return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W);
2121}
2122
2123/**
2124 * add_u32_u32_fx - Add two u32 fixed point values and discard overflow
2125 * @a: first value
2126 * @b: second value
2127 *
2128 * Return: result of addition
2129 */
2130static u32 add_u32_u32_fx(u32 a, u32 b)
2131{
2132 return lower_32_bits(((u64)a + b));
2133}
2134
2135/**
2136 * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value
2137 * @hw: pointer to the HW struct
2138 * @port: port to configure
2139 * @bs: bitslip multiplier
2140 * @fc: FC-FEC enabled
2141 * @rs: RS-FEC enabled
2142 * @spd: link speed
2143 *
2144 * Return: calculated bitslip value
2145 */
2146static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
2147 bool fc, bool rs,
2148 enum ice_eth56g_link_spd spd)
2149{
2150 u32 bitslip;
2151 int err;
2152
2153 if (!bs || rs)
2154 return 0;
2155
2156 if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
2157 err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
2158 &bitslip);
2159 } else {
2160 u8 quad_lane = port % ICE_PORTS_PER_QUAD;
2161 u32 addr;
2162
2163 addr = PHY_REG_SD_BIT_SLIP(quad_lane);
2164 err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
2165 }
2166 if (err)
2167 return 0;
2168
2169 if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) {
2170 /* Bitslip register value of 0 corresponds to 10 so substitute
2171 * it for calculations
2172 */
2173 bitslip = 10;
2174 } else if (spd == ICE_ETH56G_LNK_SPD_10G ||
2175 spd == ICE_ETH56G_LNK_SPD_25G) {
2176 if (fc)
2177 bitslip = bitslip * 2 + 32;
2178 else
2179 bitslip = (u32)((s32)bitslip * -1 + 20);
2180 }
2181
2182 bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W;
2183 return mul_u32_u32_fx_q9(bitslip, bs);
2184}
2185
2186/**
2187 * ice_ptp_calc_deskew_eth56g - Calculate deskew value
2188 * @hw: pointer to the HW struct
2189 * @port: port to configure
2190 * @ds: deskew multiplier
2191 * @rs: RS-FEC enabled
2192 * @spd: link speed
2193 *
2194 * Return: calculated deskew value
2195 */
2196static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds,
2197 bool rs, enum ice_eth56g_link_spd spd)
2198{
2199 u32 deskew_i, deskew_f;
2200 int err;
2201
2202 if (!ds)
2203 return 0;
2204
2205 read_poll_timeout(ice_read_ptp_reg_eth56g, err,
2206 FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500,
2207 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0,
2208 &deskew_i);
2209 if (err)
2210 return err;
2211
2212 deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i);
2213 deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i);
2214
2215 if (rs && spd == ICE_ETH56G_LNK_SPD_50G2)
2216 ds = 0x633; /* 3.1 */
2217 else if (rs && spd == ICE_ETH56G_LNK_SPD_100G)
2218 ds = 0x31b; /* 1.552 */
2219
2220 deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i);
2221 /* Shift 3 fractional bits to the end of the integer part */
2222 deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W;
2223 return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds);
2224}
2225
2226/**
2227 * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values
2228 * @hw: pointer to the HW struct
2229 * @port: port to configure
2230 * @spd: link speed
2231 * @cfg: structure to store output values
2232 * @fc: FC-FEC enabled
2233 * @rs: RS-FEC enabled
2234 *
2235 * Return:
2236 * * %0 - success
2237 * * %other - failed to write to PHY
2238 */
2239static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port,
2240 enum ice_eth56g_link_spd spd,
2241 const struct ice_eth56g_mac_reg_cfg *cfg,
2242 bool fc, bool rs)
2243{
2244 u32 rx_offset, tx_offset, bs_ds;
2245 bool onestep, sfd;
2246
2247 onestep = hw->ptp.phy.eth56g.onestep_ena;
2248 sfd = hw->ptp.phy.eth56g.sfd_ena;
2249 bs_ds = cfg->rx_offset.bs_ds;
2250
2251 if (fc)
2252 rx_offset = cfg->rx_offset.fc;
2253 else if (rs)
2254 rx_offset = cfg->rx_offset.rs;
2255 else
2256 rx_offset = cfg->rx_offset.no_fec;
2257
2258 rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes);
2259 if (sfd)
2260 rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd);
2261
2262 if (spd < ICE_ETH56G_LNK_SPD_40G)
2263 bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs,
2264 spd);
2265 else
2266 bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd);
2267 rx_offset = add_u32_u32_fx(rx_offset, bs_ds);
2268 rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT |
2269 ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC;
2270
2271 if (fc)
2272 tx_offset = cfg->tx_offset.fc;
2273 else if (rs)
2274 tx_offset = cfg->tx_offset.rs;
2275 else
2276 tx_offset = cfg->tx_offset.no_fec;
2277 tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd +
2278 cfg->tx_offset.onestep * onestep;
2279
2280 ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset);
2281 return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset);
2282}
2283
2284/**
2285 * ice_phy_cfg_mac_eth56g - Configure MAC for PTP
2286 * @hw: Pointer to the HW struct
2287 * @port: Port to configure
2288 *
2289 * Return:
2290 * * %0 - success
2291 * * %other - failed to write to PHY
2292 */
2293static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port)
2294{
2295 const struct ice_eth56g_mac_reg_cfg *cfg;
2296 enum ice_eth56g_link_spd spd;
2297 struct ice_link_status *li;
2298 bool fc = false;
2299 bool rs = false;
2300 bool onestep;
2301 u32 val;
2302 int err;
2303
2304 onestep = hw->ptp.phy.eth56g.onestep_ena;
2305 li = &hw->port_info->phy.link_info;
2306 spd = ice_phy_get_speed_eth56g(li);
2307 if (!!(li->an_info & ICE_AQ_FEC_EN)) {
2308 if (spd == ICE_ETH56G_LNK_SPD_10G) {
2309 fc = true;
2310 } else {
2311 fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN);
2312 rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN);
2313 }
2314 }
2315 cfg = ð56g_mac_cfg[spd];
2316
2317 err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0);
2318 if (err)
2319 return err;
2320
2321 err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0);
2322 if (err)
2323 return err;
2324
2325 val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M,
2326 cfg->tx_mode.def + rs * cfg->tx_mode.rs) |
2327 FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) |
2328 FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M,
2329 cfg->tx_cw_dly.def +
2330 onestep * cfg->tx_cw_dly.onestep) |
2331 FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M,
2332 cfg->rx_mode.def + rs * cfg->rx_mode.rs) |
2333 FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M,
2334 cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) |
2335 FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M,
2336 cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) |
2337 FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk);
2338 err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val);
2339 if (err)
2340 return err;
2341
2342 err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME,
2343 cfg->blktime);
2344 if (err)
2345 return err;
2346
2347 err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs);
2348 if (err)
2349 return err;
2350
2351 if (spd == ICE_ETH56G_LNK_SPD_25G && !rs)
2352 val = 0;
2353 else
2354 val = cfg->mktime;
2355
2356 return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val);
2357}
2358
2359/**
2360 * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt
2361 * @hw: pointer to the HW struct
2362 * @port: the timestamp port
2363 * @ena: enable or disable interrupt
2364 * @threshold: interrupt threshold
2365 *
2366 * Configure TX timestamp interrupt for the specified port
2367 *
2368 * Return:
2369 * * %0 - success
2370 * * %other - PHY read/write failed
2371 */
2372int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
2373{
2374 int err;
2375 u32 val;
2376
2377 err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
2378 if (err)
2379 return err;
2380
2381 if (ena) {
2382 val |= PHY_TS_INT_CONFIG_ENA_M;
2383 val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M;
2384 val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold);
2385 } else {
2386 val &= ~PHY_TS_INT_CONFIG_ENA_M;
2387 }
2388
2389 return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
2390}
2391
2392/**
2393 * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time
2394 * @hw: pointer to the HW struct
2395 * @port: the PHY port to read
2396 * @phy_time: on return, the 64bit PHY timer value
2397 * @phc_time: on return, the lower 64bits of PHC time
2398 *
2399 * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
2400 * and PHC timer values.
2401 *
2402 * Return:
2403 * * %0 - success
2404 * * %other - PHY read/write failed
2405 */
2406static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
2407 u64 *phy_time, u64 *phc_time)
2408{
2409 u64 tx_time, rx_time;
2410 u32 zo, lo;
2411 u8 tmr_idx;
2412 int err;
2413
2414 tmr_idx = ice_get_ptp_src_clock_index(hw);
2415
2416 /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
2417 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2418
2419 /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
2420 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
2421 if (err)
2422 return err;
2423
2424 /* Issue the sync to start the ICE_PTP_READ_TIME capture */
2425 ice_ptp_exec_tmr_cmd(hw);
2426
2427 /* Read the captured PHC time from the shadow time registers */
2428 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2429 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2430 *phc_time = (u64)lo << 32 | zo;
2431
2432 /* Read the captured PHY time from the PHY shadow registers */
2433 err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time);
2434 if (err)
2435 return err;
2436
2437 /* If the PHY Tx and Rx timers don't match, log a warning message.
2438 * Note that this should not happen in normal circumstances since the
2439 * driver always programs them together.
2440 */
2441 if (tx_time != rx_time)
2442 dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2443 port, tx_time, rx_time);
2444
2445 *phy_time = tx_time;
2446
2447 return 0;
2448}
2449
2450/**
2451 * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer
2452 * @hw: pointer to the HW struct
2453 * @port: the PHY port to synchronize
2454 *
2455 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2456 * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
2457 * simultaneous read of the PHY timer and PHC timer. Then we use the
2458 * difference to calculate an appropriate 2s complement addition to add
2459 * to the PHY timer in order to ensure it reads the same value as the
2460 * primary PHC timer.
2461 *
2462 * Return:
2463 * * %0 - success
2464 * * %-EBUSY- failed to acquire PTP semaphore
2465 * * %other - PHY read/write failed
2466 */
2467static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port)
2468{
2469 u64 phc_time, phy_time, difference;
2470 int err;
2471
2472 if (!ice_ptp_lock(hw)) {
2473 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2474 return -EBUSY;
2475 }
2476
2477 err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
2478 if (err)
2479 goto err_unlock;
2480
2481 /* Calculate the amount required to add to the port time in order for
2482 * it to match the PHC time.
2483 *
2484 * Note that the port adjustment is done using 2s complement
2485 * arithmetic. This is convenient since it means that we can simply
2486 * calculate the difference between the PHC time and the port time,
2487 * and it will be interpreted correctly.
2488 */
2489
2490 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2491 difference = phc_time - phy_time;
2492
2493 err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference);
2494 if (err)
2495 goto err_unlock;
2496
2497 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
2498 if (err)
2499 goto err_unlock;
2500
2501 /* Issue the sync to activate the time adjustment */
2502 ice_ptp_exec_tmr_cmd(hw);
2503
2504 /* Re-capture the timer values to flush the command registers and
2505 * verify that the time was properly adjusted.
2506 */
2507 err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
2508 if (err)
2509 goto err_unlock;
2510
2511 dev_info(ice_hw_to_dev(hw),
2512 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2513 port, phy_time, phc_time);
2514
2515err_unlock:
2516 ice_ptp_unlock(hw);
2517 return err;
2518}
2519
2520/**
2521 * ice_stop_phy_timer_eth56g - Stop the PHY clock timer
2522 * @hw: pointer to the HW struct
2523 * @port: the PHY port to stop
2524 * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS
2525 *
2526 * Stop the clock of a PHY port. This must be done as part of the flow to
2527 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2528 * initialized or when link speed changes.
2529 *
2530 * Return:
2531 * * %0 - success
2532 * * %other - failed to write to PHY
2533 */
2534int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
2535{
2536 int err;
2537
2538 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0);
2539 if (err)
2540 return err;
2541
2542 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0);
2543 if (err)
2544 return err;
2545
2546 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2547
2548 return 0;
2549}
2550
2551/**
2552 * ice_start_phy_timer_eth56g - Start the PHY clock timer
2553 * @hw: pointer to the HW struct
2554 * @port: the PHY port to start
2555 *
2556 * Start the clock of a PHY port. This must be done as part of the flow to
2557 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2558 * initialized or when link speed changes.
2559 *
2560 * Return:
2561 * * %0 - success
2562 * * %other - PHY read/write failed
2563 */
2564int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
2565{
2566 u32 lo, hi;
2567 u64 incval;
2568 u8 tmr_idx;
2569 int err;
2570
2571 tmr_idx = ice_get_ptp_src_clock_index(hw);
2572
2573 err = ice_stop_phy_timer_eth56g(hw, port, false);
2574 if (err)
2575 return err;
2576
2577 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2578
2579 err = ice_phy_cfg_parpcs_eth56g(hw, port);
2580 if (err)
2581 return err;
2582
2583 err = ice_phy_cfg_ptp_1step_eth56g(hw, port);
2584 if (err)
2585 return err;
2586
2587 err = ice_phy_cfg_mac_eth56g(hw, port);
2588 if (err)
2589 return err;
2590
2591 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2592 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2593 incval = (u64)hi << 32 | lo;
2594
2595 err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
2596 if (err)
2597 return err;
2598
2599 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2600 if (err)
2601 return err;
2602
2603 ice_ptp_exec_tmr_cmd(hw);
2604
2605 err = ice_sync_phy_timer_eth56g(hw, port);
2606 if (err)
2607 return err;
2608
2609 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1);
2610 if (err)
2611 return err;
2612
2613 err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1);
2614 if (err)
2615 return err;
2616
2617 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2618
2619 return 0;
2620}
2621
2622/**
2623 * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
2624 * @hw: pointer to HW struct
2625 * @enable: Enable or disable access
2626 *
2627 * Enable sideband devices (PHY and others) access.
2628 */
2629static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
2630{
2631 u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
2632
2633 if (enable)
2634 val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
2635 else
2636 val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
2637
2638 wr32(hw, PF_SB_REM_DEV_CTL, val);
2639}
2640
2641/**
2642 * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
2643 * @hw: pointer to HW struct
2644 *
2645 * Perform PHC initialization steps specific to E82X devices.
2646 *
2647 * Return:
2648 * * %0 - success
2649 * * %other - failed to initialize CGU
2650 */
2651static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
2652{
2653 ice_sb_access_ena_eth56g(hw, true);
2654 /* Initialize the Clock Generation Unit */
2655 return ice_init_cgu_e82x(hw);
2656}
2657
2658/**
2659 * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
2660 * @hw: pointer to the HW struct
2661 * @ts_status: the timestamp mask pointer
2662 *
2663 * Read the PHY Tx timestamp status mask indicating which ports have Tx
2664 * timestamps available.
2665 *
2666 * Return:
2667 * * %0 - success
2668 * * %other - failed to read from PHY
2669 */
2670int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
2671{
2672 const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g;
2673 u8 phy, mask;
2674 u32 status;
2675
2676 mask = (1 << hw->ptp.ports_per_phy) - 1;
2677 *ts_status = 0;
2678
2679 for (phy = 0; phy < params->num_phys; phy++) {
2680 int err;
2681
2682 err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status);
2683 if (err)
2684 return err;
2685
2686 *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy);
2687 }
2688
2689 ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status);
2690
2691 return 0;
2692}
2693
2694/**
2695 * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register
2696 * @hw: pointer to the HW struct
2697 * @port: the PHY port to read from
2698 * @tstamp_ready: contents of the Tx memory status register
2699 *
2700 * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in
2701 * the PHY are ready. A set bit means the corresponding timestamp is valid and
2702 * ready to be captured from the PHY timestamp block.
2703 *
2704 * Return:
2705 * * %0 - success
2706 * * %other - failed to read from PHY
2707 */
2708static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
2709 u64 *tstamp_ready)
2710{
2711 int err;
2712
2713 err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
2714 tstamp_ready);
2715 if (err) {
2716 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n",
2717 port, err);
2718 return err;
2719 }
2720
2721 return 0;
2722}
2723
2724/**
2725 * ice_ptp_init_phy_e825 - initialize PHY parameters
2726 * @hw: pointer to the HW struct
2727 */
2728static void ice_ptp_init_phy_e825(struct ice_hw *hw)
2729{
2730 struct ice_ptp_hw *ptp = &hw->ptp;
2731 struct ice_eth56g_params *params;
2732 u32 phy_rev;
2733 int err;
2734
2735 ptp->phy_model = ICE_PHY_ETH56G;
2736 params = &ptp->phy.eth56g;
2737 params->onestep_ena = false;
2738 params->peer_delay = 0;
2739 params->sfd_ena = false;
2740 params->num_phys = 2;
2741 ptp->ports_per_phy = 4;
2742 ptp->num_lports = params->num_phys * ptp->ports_per_phy;
2743
2744 ice_sb_access_ena_eth56g(hw, true);
2745 err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
2746 if (err || phy_rev != PHY_REVISION_ETH56G)
2747 ptp->phy_model = ICE_PHY_UNSUP;
2748}
2749
2750/* E822 family functions
2751 *
2752 * The following functions operate on the E822 family of devices.
2753 */
2754
2755/**
2756 * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
2757 * @hw: pointer to the HW struct
2758 * @msg: the PHY message buffer to fill in
2759 * @port: the port to access
2760 * @offset: the register offset
2761 */
2762static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
2763 struct ice_sbq_msg_input *msg, u8 port,
2764 u16 offset)
2765{
2766 int phy_port, quadtype;
2767
2768 phy_port = port % hw->ptp.ports_per_phy;
2769 quadtype = ICE_GET_QUAD_NUM(port) %
2770 ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
2771
2772 if (quadtype == 0) {
2773 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
2774 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
2775 } else {
2776 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
2777 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
2778 }
2779
2780 msg->dest_dev = rmn_0;
2781}
2782
2783/**
2784 * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
2785 * @low_addr: the low address to check
2786 * @high_addr: on return, contains the high address of the 64bit register
2787 *
2788 * Checks if the provided low address is one of the known 64bit PHY values
2789 * represented as two 32bit registers. If it is, return the appropriate high
2790 * register offset to use.
2791 */
2792static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
2793{
2794 switch (low_addr) {
2795 case P_REG_PAR_PCS_TX_OFFSET_L:
2796 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
2797 return true;
2798 case P_REG_PAR_PCS_RX_OFFSET_L:
2799 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
2800 return true;
2801 case P_REG_PAR_TX_TIME_L:
2802 *high_addr = P_REG_PAR_TX_TIME_U;
2803 return true;
2804 case P_REG_PAR_RX_TIME_L:
2805 *high_addr = P_REG_PAR_RX_TIME_U;
2806 return true;
2807 case P_REG_TOTAL_TX_OFFSET_L:
2808 *high_addr = P_REG_TOTAL_TX_OFFSET_U;
2809 return true;
2810 case P_REG_TOTAL_RX_OFFSET_L:
2811 *high_addr = P_REG_TOTAL_RX_OFFSET_U;
2812 return true;
2813 case P_REG_UIX66_10G_40G_L:
2814 *high_addr = P_REG_UIX66_10G_40G_U;
2815 return true;
2816 case P_REG_UIX66_25G_100G_L:
2817 *high_addr = P_REG_UIX66_25G_100G_U;
2818 return true;
2819 case P_REG_TX_CAPTURE_L:
2820 *high_addr = P_REG_TX_CAPTURE_U;
2821 return true;
2822 case P_REG_RX_CAPTURE_L:
2823 *high_addr = P_REG_RX_CAPTURE_U;
2824 return true;
2825 case P_REG_TX_TIMER_INC_PRE_L:
2826 *high_addr = P_REG_TX_TIMER_INC_PRE_U;
2827 return true;
2828 case P_REG_RX_TIMER_INC_PRE_L:
2829 *high_addr = P_REG_RX_TIMER_INC_PRE_U;
2830 return true;
2831 default:
2832 return false;
2833 }
2834}
2835
2836/**
2837 * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
2838 * @low_addr: the low address to check
2839 * @high_addr: on return, contains the high address of the 40bit value
2840 *
2841 * Checks if the provided low address is one of the known 40bit PHY values
2842 * split into two registers with the lower 8 bits in the low register and the
2843 * upper 32 bits in the high register. If it is, return the appropriate high
2844 * register offset to use.
2845 */
2846static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
2847{
2848 switch (low_addr) {
2849 case P_REG_TIMETUS_L:
2850 *high_addr = P_REG_TIMETUS_U;
2851 return true;
2852 case P_REG_PAR_RX_TUS_L:
2853 *high_addr = P_REG_PAR_RX_TUS_U;
2854 return true;
2855 case P_REG_PAR_TX_TUS_L:
2856 *high_addr = P_REG_PAR_TX_TUS_U;
2857 return true;
2858 case P_REG_PCS_RX_TUS_L:
2859 *high_addr = P_REG_PCS_RX_TUS_U;
2860 return true;
2861 case P_REG_PCS_TX_TUS_L:
2862 *high_addr = P_REG_PCS_TX_TUS_U;
2863 return true;
2864 case P_REG_DESK_PAR_RX_TUS_L:
2865 *high_addr = P_REG_DESK_PAR_RX_TUS_U;
2866 return true;
2867 case P_REG_DESK_PAR_TX_TUS_L:
2868 *high_addr = P_REG_DESK_PAR_TX_TUS_U;
2869 return true;
2870 case P_REG_DESK_PCS_RX_TUS_L:
2871 *high_addr = P_REG_DESK_PCS_RX_TUS_U;
2872 return true;
2873 case P_REG_DESK_PCS_TX_TUS_L:
2874 *high_addr = P_REG_DESK_PCS_TX_TUS_U;
2875 return true;
2876 default:
2877 return false;
2878 }
2879}
2880
2881/**
2882 * ice_read_phy_reg_e82x - Read a PHY register
2883 * @hw: pointer to the HW struct
2884 * @port: PHY port to read from
2885 * @offset: PHY register offset to read
2886 * @val: on return, the contents read from the PHY
2887 *
2888 * Read a PHY register for the given port over the device sideband queue.
2889 */
2890static int
2891ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
2892{
2893 struct ice_sbq_msg_input msg = {0};
2894 int err;
2895
2896 ice_fill_phy_msg_e82x(hw, &msg, port, offset);
2897 msg.opcode = ice_sbq_msg_rd;
2898
2899 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
2900 if (err) {
2901 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2902 err);
2903 return err;
2904 }
2905
2906 *val = msg.data;
2907
2908 return 0;
2909}
2910
2911/**
2912 * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
2913 * @hw: pointer to the HW struct
2914 * @port: PHY port to read from
2915 * @low_addr: offset of the lower register to read from
2916 * @val: on return, the contents of the 64bit value from the PHY registers
2917 *
2918 * Reads the two registers associated with a 64bit value and returns it in the
2919 * val pointer. The offset always specifies the lower register offset to use.
2920 * The high offset is looked up. This function only operates on registers
2921 * known to be two parts of a 64bit value.
2922 */
2923static int
2924ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
2925{
2926 u32 low, high;
2927 u16 high_addr;
2928 int err;
2929
2930 /* Only operate on registers known to be split into two 32bit
2931 * registers.
2932 */
2933 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
2934 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
2935 low_addr);
2936 return -EINVAL;
2937 }
2938
2939 err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
2940 if (err) {
2941 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
2942 low_addr, err);
2943 return err;
2944 }
2945
2946 err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
2947 if (err) {
2948 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
2949 high_addr, err);
2950 return err;
2951 }
2952
2953 *val = (u64)high << 32 | low;
2954
2955 return 0;
2956}
2957
2958/**
2959 * ice_write_phy_reg_e82x - Write a PHY register
2960 * @hw: pointer to the HW struct
2961 * @port: PHY port to write to
2962 * @offset: PHY register offset to write
2963 * @val: The value to write to the register
2964 *
2965 * Write a PHY register for the given port over the device sideband queue.
2966 */
2967static int
2968ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
2969{
2970 struct ice_sbq_msg_input msg = {0};
2971 int err;
2972
2973 ice_fill_phy_msg_e82x(hw, &msg, port, offset);
2974 msg.opcode = ice_sbq_msg_wr;
2975 msg.data = val;
2976
2977 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
2978 if (err) {
2979 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2980 err);
2981 return err;
2982 }
2983
2984 return 0;
2985}
2986
2987/**
2988 * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
2989 * @hw: pointer to the HW struct
2990 * @port: port to write to
2991 * @low_addr: offset of the low register
2992 * @val: 40b value to write
2993 *
2994 * Write the provided 40b value to the two associated registers by splitting
2995 * it up into two chunks, the lower 8 bits and the upper 32 bits.
2996 */
2997static int
2998ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
2999{
3000 u32 low, high;
3001 u16 high_addr;
3002 int err;
3003
3004 /* Only operate on registers known to be split into a lower 8 bit
3005 * register and an upper 32 bit register.
3006 */
3007 if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
3008 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
3009 low_addr);
3010 return -EINVAL;
3011 }
3012 low = FIELD_GET(P_REG_40B_LOW_M, val);
3013 high = (u32)(val >> P_REG_40B_HIGH_S);
3014
3015 err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
3016 if (err) {
3017 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
3018 low_addr, err);
3019 return err;
3020 }
3021
3022 err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
3023 if (err) {
3024 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
3025 high_addr, err);
3026 return err;
3027 }
3028
3029 return 0;
3030}
3031
3032/**
3033 * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
3034 * @hw: pointer to the HW struct
3035 * @port: PHY port to read from
3036 * @low_addr: offset of the lower register to read from
3037 * @val: the contents of the 64bit value to write to PHY
3038 *
3039 * Write the 64bit value to the two associated 32bit PHY registers. The offset
3040 * is always specified as the lower register, and the high address is looked
3041 * up. This function only operates on registers known to be two parts of
3042 * a 64bit value.
3043 */
3044static int
3045ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
3046{
3047 u32 low, high;
3048 u16 high_addr;
3049 int err;
3050
3051 /* Only operate on registers known to be split into two 32bit
3052 * registers.
3053 */
3054 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
3055 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
3056 low_addr);
3057 return -EINVAL;
3058 }
3059
3060 low = lower_32_bits(val);
3061 high = upper_32_bits(val);
3062
3063 err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
3064 if (err) {
3065 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
3066 low_addr, err);
3067 return err;
3068 }
3069
3070 err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
3071 if (err) {
3072 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
3073 high_addr, err);
3074 return err;
3075 }
3076
3077 return 0;
3078}
3079
3080/**
3081 * ice_fill_quad_msg_e82x - Fill message data for quad register access
3082 * @hw: pointer to the HW struct
3083 * @msg: the PHY message buffer to fill in
3084 * @quad: the quad to access
3085 * @offset: the register offset
3086 *
3087 * Fill a message buffer for accessing a register in a quad shared between
3088 * multiple PHYs.
3089 *
3090 * Return:
3091 * * %0 - OK
3092 * * %-EINVAL - invalid quad number
3093 */
3094static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
3095 struct ice_sbq_msg_input *msg, u8 quad,
3096 u16 offset)
3097{
3098 u32 addr;
3099
3100 if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
3101 return -EINVAL;
3102
3103 msg->dest_dev = rmn_0;
3104
3105 if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
3106 addr = Q_0_BASE + offset;
3107 else
3108 addr = Q_1_BASE + offset;
3109
3110 msg->msg_addr_low = lower_16_bits(addr);
3111 msg->msg_addr_high = upper_16_bits(addr);
3112
3113 return 0;
3114}
3115
3116/**
3117 * ice_read_quad_reg_e82x - Read a PHY quad register
3118 * @hw: pointer to the HW struct
3119 * @quad: quad to read from
3120 * @offset: quad register offset to read
3121 * @val: on return, the contents read from the quad
3122 *
3123 * Read a quad register over the device sideband queue. Quad registers are
3124 * shared between multiple PHYs.
3125 */
3126int
3127ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
3128{
3129 struct ice_sbq_msg_input msg = {0};
3130 int err;
3131
3132 err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
3133 if (err)
3134 return err;
3135
3136 msg.opcode = ice_sbq_msg_rd;
3137
3138 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
3139 if (err) {
3140 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
3141 err);
3142 return err;
3143 }
3144
3145 *val = msg.data;
3146
3147 return 0;
3148}
3149
3150/**
3151 * ice_write_quad_reg_e82x - Write a PHY quad register
3152 * @hw: pointer to the HW struct
3153 * @quad: quad to write to
3154 * @offset: quad register offset to write
3155 * @val: The value to write to the register
3156 *
3157 * Write a quad register over the device sideband queue. Quad registers are
3158 * shared between multiple PHYs.
3159 */
3160int
3161ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
3162{
3163 struct ice_sbq_msg_input msg = {0};
3164 int err;
3165
3166 err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
3167 if (err)
3168 return err;
3169
3170 msg.opcode = ice_sbq_msg_wr;
3171 msg.data = val;
3172
3173 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
3174 if (err) {
3175 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
3176 err);
3177 return err;
3178 }
3179
3180 return 0;
3181}
3182
3183/**
3184 * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
3185 * @hw: pointer to the HW struct
3186 * @quad: the quad to read from
3187 * @idx: the timestamp index to read
3188 * @tstamp: on return, the 40bit timestamp value
3189 *
3190 * Read a 40bit timestamp value out of the two associated registers in the
3191 * quad memory block that is shared between the internal PHYs of the E822
3192 * family of devices.
3193 */
3194static int
3195ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
3196{
3197 u16 lo_addr, hi_addr;
3198 u32 lo, hi;
3199 int err;
3200
3201 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
3202 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
3203
3204 err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
3205 if (err) {
3206 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
3207 err);
3208 return err;
3209 }
3210
3211 err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
3212 if (err) {
3213 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
3214 err);
3215 return err;
3216 }
3217
3218 /* For E822 based internal PHYs, the timestamp is reported with the
3219 * lower 8 bits in the low register, and the upper 32 bits in the high
3220 * register.
3221 */
3222 *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
3223
3224 return 0;
3225}
3226
3227/**
3228 * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
3229 * @hw: pointer to the HW struct
3230 * @quad: the quad to read from
3231 * @idx: the timestamp index to reset
3232 *
3233 * Read the timestamp out of the quad to clear its timestamp status bit from
3234 * the PHY quad block that is shared between the internal PHYs of the E822
3235 * devices.
3236 *
3237 * Note that unlike E810, software cannot directly write to the quad memory
3238 * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
3239 * to determine which timestamps are valid. Reading a timestamp auto-clears
3240 * the valid bit.
3241 *
3242 * To directly clear the contents of the timestamp block entirely, discarding
3243 * all timestamp data at once, software should instead use
3244 * ice_ptp_reset_ts_memory_quad_e82x().
3245 *
3246 * This function should only be called on an idx whose bit is set according to
3247 * ice_get_phy_tx_tstamp_ready().
3248 */
3249static int
3250ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
3251{
3252 u64 unused_tstamp;
3253 int err;
3254
3255 err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
3256 if (err) {
3257 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
3258 quad, idx, err);
3259 return err;
3260 }
3261
3262 return 0;
3263}
3264
3265/**
3266 * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
3267 * @hw: pointer to the HW struct
3268 * @quad: the quad to read from
3269 *
3270 * Clear all timestamps from the PHY quad block that is shared between the
3271 * internal PHYs on the E822 devices.
3272 */
3273void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
3274{
3275 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
3276 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
3277}
3278
3279/**
3280 * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
3281 * @hw: pointer to the HW struct
3282 */
3283static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
3284{
3285 unsigned int quad;
3286
3287 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++)
3288 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
3289}
3290
3291/**
3292 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
3293 * @hw: pointer to the HW struct
3294 *
3295 * Set the window length used for the vernier port calibration process.
3296 */
3297static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
3298{
3299 u8 port;
3300
3301 for (port = 0; port < hw->ptp.num_lports; port++) {
3302 int err;
3303
3304 err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
3305 PTP_VERNIER_WL);
3306 if (err) {
3307 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
3308 port, err);
3309 return err;
3310 }
3311 }
3312
3313 return 0;
3314}
3315
3316/**
3317 * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
3318 * @hw: pointer to HW struct
3319 *
3320 * Perform PHC initialization steps specific to E822 devices.
3321 */
3322static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
3323{
3324 int err;
3325 u32 val;
3326
3327 /* Enable reading switch and PHY registers over the sideband queue */
3328#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
3329#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
3330 val = rd32(hw, PF_SB_REM_DEV_CTL);
3331 val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
3332 wr32(hw, PF_SB_REM_DEV_CTL, val);
3333
3334 /* Initialize the Clock Generation Unit */
3335 err = ice_init_cgu_e82x(hw);
3336 if (err)
3337 return err;
3338
3339 /* Set window length for all the ports */
3340 return ice_ptp_set_vernier_wl(hw);
3341}
3342
3343/**
3344 * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
3345 * @hw: pointer to the HW struct
3346 * @time: Time to initialize the PHY port clocks to
3347 *
3348 * Program the PHY port registers with a new initial time value. The port
3349 * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
3350 * command. The time value is the upper 32 bits of the PHY timer, usually in
3351 * units of nominal nanoseconds.
3352 */
3353static int
3354ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
3355{
3356 u64 phy_time;
3357 u8 port;
3358 int err;
3359
3360 /* The time represents the upper 32 bits of the PHY timer, so we need
3361 * to shift to account for this when programming.
3362 */
3363 phy_time = (u64)time << 32;
3364
3365 for (port = 0; port < hw->ptp.num_lports; port++) {
3366 /* Tx case */
3367 err = ice_write_64b_phy_reg_e82x(hw, port,
3368 P_REG_TX_TIMER_INC_PRE_L,
3369 phy_time);
3370 if (err)
3371 goto exit_err;
3372
3373 /* Rx case */
3374 err = ice_write_64b_phy_reg_e82x(hw, port,
3375 P_REG_RX_TIMER_INC_PRE_L,
3376 phy_time);
3377 if (err)
3378 goto exit_err;
3379 }
3380
3381 return 0;
3382
3383exit_err:
3384 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
3385 port, err);
3386
3387 return err;
3388}
3389
3390/**
3391 * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
3392 * @hw: pointer to HW struct
3393 * @port: Port number to be programmed
3394 * @time: time in cycles to adjust the port Tx and Rx clocks
3395 *
3396 * Program the port for an atomic adjustment by writing the Tx and Rx timer
3397 * registers. The atomic adjustment won't be completed until the driver issues
3398 * an ICE_PTP_ADJ_TIME command.
3399 *
3400 * Note that time is not in units of nanoseconds. It is in clock time
3401 * including the lower sub-nanosecond portion of the port timer.
3402 *
3403 * Negative adjustments are supported using 2s complement arithmetic.
3404 */
3405static int
3406ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
3407{
3408 u32 l_time, u_time;
3409 int err;
3410
3411 l_time = lower_32_bits(time);
3412 u_time = upper_32_bits(time);
3413
3414 /* Tx case */
3415 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
3416 l_time);
3417 if (err)
3418 goto exit_err;
3419
3420 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
3421 u_time);
3422 if (err)
3423 goto exit_err;
3424
3425 /* Rx case */
3426 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
3427 l_time);
3428 if (err)
3429 goto exit_err;
3430
3431 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
3432 u_time);
3433 if (err)
3434 goto exit_err;
3435
3436 return 0;
3437
3438exit_err:
3439 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
3440 port, err);
3441 return err;
3442}
3443
3444/**
3445 * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
3446 * @hw: pointer to HW struct
3447 * @adj: adjustment in nanoseconds
3448 *
3449 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
3450 * Tx and Rx port registers. The actual adjustment is completed by issuing an
3451 * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
3452 */
3453static int
3454ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
3455{
3456 s64 cycles;
3457 u8 port;
3458
3459 /* The port clock supports adjustment of the sub-nanosecond portion of
3460 * the clock. We shift the provided adjustment in nanoseconds to
3461 * calculate the appropriate adjustment to program into the PHY ports.
3462 */
3463 if (adj > 0)
3464 cycles = (s64)adj << 32;
3465 else
3466 cycles = -(((s64)-adj) << 32);
3467
3468 for (port = 0; port < hw->ptp.num_lports; port++) {
3469 int err;
3470
3471 err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
3472 if (err)
3473 return err;
3474 }
3475
3476 return 0;
3477}
3478
3479/**
3480 * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
3481 * @hw: pointer to HW struct
3482 * @incval: new increment value to prepare
3483 *
3484 * Prepare each of the PHY ports for a new increment value by programming the
3485 * port's TIMETUS registers. The new increment value will be updated after
3486 * issuing an ICE_PTP_INIT_INCVAL command.
3487 */
3488static int
3489ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
3490{
3491 int err;
3492 u8 port;
3493
3494 for (port = 0; port < hw->ptp.num_lports; port++) {
3495 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
3496 incval);
3497 if (err)
3498 goto exit_err;
3499 }
3500
3501 return 0;
3502
3503exit_err:
3504 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
3505 port, err);
3506
3507 return err;
3508}
3509
3510/**
3511 * ice_ptp_read_port_capture - Read a port's local time capture
3512 * @hw: pointer to HW struct
3513 * @port: Port number to read
3514 * @tx_ts: on return, the Tx port time capture
3515 * @rx_ts: on return, the Rx port time capture
3516 *
3517 * Read the port's Tx and Rx local time capture values.
3518 *
3519 * Note this has no equivalent for the E810 devices.
3520 */
3521static int
3522ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
3523{
3524 int err;
3525
3526 /* Tx case */
3527 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
3528 if (err) {
3529 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
3530 err);
3531 return err;
3532 }
3533
3534 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
3535 (unsigned long long)*tx_ts);
3536
3537 /* Rx case */
3538 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
3539 if (err) {
3540 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
3541 err);
3542 return err;
3543 }
3544
3545 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
3546 (unsigned long long)*rx_ts);
3547
3548 return 0;
3549}
3550
3551/**
3552 * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
3553 * @hw: pointer to HW struct
3554 * @port: Port to which cmd has to be sent
3555 * @cmd: Command to be sent to the port
3556 *
3557 * Prepare the requested port for an upcoming timer sync command.
3558 *
3559 * Note there is no equivalent of this operation on E810, as that device
3560 * always handles all external PHYs internally.
3561 *
3562 * Return:
3563 * * %0 - success
3564 * * %other - failed to write to PHY
3565 */
3566static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
3567 enum ice_ptp_tmr_cmd cmd)
3568{
3569 u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
3570 int err;
3571
3572 /* Tx case */
3573 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
3574 if (err) {
3575 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
3576 err);
3577 return err;
3578 }
3579
3580 /* Rx case */
3581 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD,
3582 val | TS_CMD_RX_TYPE);
3583 if (err) {
3584 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
3585 err);
3586 return err;
3587 }
3588
3589 return 0;
3590}
3591
3592/* E822 Vernier calibration functions
3593 *
3594 * The following functions are used as part of the vernier calibration of
3595 * a port. This calibration increases the precision of the timestamps on the
3596 * port.
3597 */
3598
3599/**
3600 * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
3601 * @hw: pointer to HW struct
3602 * @port: the port to read from
3603 * @link_out: if non-NULL, holds link speed on success
3604 * @fec_out: if non-NULL, holds FEC algorithm on success
3605 *
3606 * Read the serdes data for the PHY port and extract the link speed and FEC
3607 * algorithm.
3608 */
3609static int
3610ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
3611 enum ice_ptp_link_spd *link_out,
3612 enum ice_ptp_fec_mode *fec_out)
3613{
3614 enum ice_ptp_link_spd link;
3615 enum ice_ptp_fec_mode fec;
3616 u32 serdes;
3617 int err;
3618
3619 err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
3620 if (err) {
3621 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
3622 return err;
3623 }
3624
3625 /* Determine the FEC algorithm */
3626 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
3627
3628 serdes &= P_REG_LINK_SPEED_SERDES_M;
3629
3630 /* Determine the link speed */
3631 if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
3632 switch (serdes) {
3633 case ICE_PTP_SERDES_25G:
3634 link = ICE_PTP_LNK_SPD_25G_RS;
3635 break;
3636 case ICE_PTP_SERDES_50G:
3637 link = ICE_PTP_LNK_SPD_50G_RS;
3638 break;
3639 case ICE_PTP_SERDES_100G:
3640 link = ICE_PTP_LNK_SPD_100G_RS;
3641 break;
3642 default:
3643 return -EIO;
3644 }
3645 } else {
3646 switch (serdes) {
3647 case ICE_PTP_SERDES_1G:
3648 link = ICE_PTP_LNK_SPD_1G;
3649 break;
3650 case ICE_PTP_SERDES_10G:
3651 link = ICE_PTP_LNK_SPD_10G;
3652 break;
3653 case ICE_PTP_SERDES_25G:
3654 link = ICE_PTP_LNK_SPD_25G;
3655 break;
3656 case ICE_PTP_SERDES_40G:
3657 link = ICE_PTP_LNK_SPD_40G;
3658 break;
3659 case ICE_PTP_SERDES_50G:
3660 link = ICE_PTP_LNK_SPD_50G;
3661 break;
3662 default:
3663 return -EIO;
3664 }
3665 }
3666
3667 if (link_out)
3668 *link_out = link;
3669 if (fec_out)
3670 *fec_out = fec;
3671
3672 return 0;
3673}
3674
3675/**
3676 * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
3677 * @hw: pointer to HW struct
3678 * @port: to configure the quad for
3679 */
3680static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
3681{
3682 enum ice_ptp_link_spd link_spd;
3683 int err;
3684 u32 val;
3685 u8 quad;
3686
3687 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
3688 if (err) {
3689 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
3690 err);
3691 return;
3692 }
3693
3694 quad = ICE_GET_QUAD_NUM(port);
3695
3696 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
3697 if (err) {
3698 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
3699 err);
3700 return;
3701 }
3702
3703 if (link_spd >= ICE_PTP_LNK_SPD_40G)
3704 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
3705 else
3706 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
3707
3708 err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
3709 if (err) {
3710 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
3711 err);
3712 return;
3713 }
3714}
3715
3716/**
3717 * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
3718 * @hw: pointer to the HW structure
3719 * @port: the port to configure
3720 *
3721 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
3722 * hardware clock time units (TUs). That is, determine the number of TUs per
3723 * serdes unit interval, and program the UIX registers with this conversion.
3724 *
3725 * This conversion is used as part of the calibration process when determining
3726 * the additional error of a timestamp vs the real time of transmission or
3727 * receipt of the packet.
3728 *
3729 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
3730 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
3731 *
3732 * To calculate the conversion ratio, we use the following facts:
3733 *
3734 * a) the clock frequency in Hz (cycles per second)
3735 * b) the number of TUs per cycle (the increment value of the clock)
3736 * c) 1 second per 1 billion nanoseconds
3737 * d) the duration of 66 UIs in nanoseconds
3738 *
3739 * Given these facts, we can use the following table to work out what ratios
3740 * to multiply in order to get the number of TUs per 66 UIs:
3741 *
3742 * cycles | 1 second | incval (TUs) | nanoseconds
3743 * -------+--------------+--------------+-------------
3744 * second | 1 billion ns | cycle | 66 UIs
3745 *
3746 * To perform the multiplication using integers without too much loss of
3747 * precision, we can take use the following equation:
3748 *
3749 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
3750 *
3751 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
3752 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
3753 *
3754 * The increment value has a maximum expected range of about 34 bits, while
3755 * the frequency value is about 29 bits. Multiplying these values shouldn't
3756 * overflow the 64 bits. However, we must then further multiply them again by
3757 * the Serdes unit interval duration. To avoid overflow here, we split the
3758 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
3759 * a divide by 390,625,000. This does lose some precision, but avoids
3760 * miscalculation due to arithmetic overflow.
3761 */
3762static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
3763{
3764 u64 cur_freq, clk_incval, tu_per_sec, uix;
3765 int err;
3766
3767 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
3768 clk_incval = ice_ptp_read_src_incval(hw);
3769
3770 /* Calculate TUs per second divided by 256 */
3771 tu_per_sec = (cur_freq * clk_incval) >> 8;
3772
3773#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
3774#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
3775
3776 /* Program the 10Gb/40Gb conversion ratio */
3777 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
3778
3779 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
3780 uix);
3781 if (err) {
3782 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
3783 err);
3784 return err;
3785 }
3786
3787 /* Program the 25Gb/100Gb conversion ratio */
3788 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
3789
3790 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
3791 uix);
3792 if (err) {
3793 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
3794 err);
3795 return err;
3796 }
3797
3798 return 0;
3799}
3800
3801/**
3802 * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
3803 * @hw: pointer to the HW struct
3804 * @port: port to configure
3805 *
3806 * Configure the number of TUs for the PAR and PCS clocks used as part of the
3807 * timestamp calibration process. This depends on the link speed, as the PHY
3808 * uses different markers depending on the speed.
3809 *
3810 * 1Gb/10Gb/25Gb:
3811 * - Tx/Rx PAR/PCS markers
3812 *
3813 * 25Gb RS:
3814 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
3815 *
3816 * 40Gb/50Gb:
3817 * - Tx/Rx PAR/PCS markers
3818 * - Rx Deskew PAR/PCS markers
3819 *
3820 * 50G RS and 100GB RS:
3821 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
3822 * - Rx Deskew PAR/PCS markers
3823 * - Tx PAR/PCS markers
3824 *
3825 * To calculate the conversion, we use the PHC clock frequency (cycles per
3826 * second), the increment value (TUs per cycle), and the related PHY clock
3827 * frequency to calculate the TUs per unit of the PHY link clock. The
3828 * following table shows how the units convert:
3829 *
3830 * cycles | TUs | second
3831 * -------+-------+--------
3832 * second | cycle | cycles
3833 *
3834 * For each conversion register, look up the appropriate frequency from the
3835 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
3836 * this to the appropriate register, preparing hardware to perform timestamp
3837 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
3838 * in order to calibrate for the internal PHY delays.
3839 *
3840 * Note that the increment value ranges up to ~34 bits, and the clock
3841 * frequency is ~29 bits, so multiplying them together should fit within the
3842 * 64 bit arithmetic.
3843 */
3844static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
3845{
3846 u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
3847 enum ice_ptp_link_spd link_spd;
3848 enum ice_ptp_fec_mode fec_mode;
3849 int err;
3850
3851 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
3852 if (err)
3853 return err;
3854
3855 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
3856 clk_incval = ice_ptp_read_src_incval(hw);
3857
3858 /* Calculate TUs per cycle of the PHC clock */
3859 tu_per_sec = cur_freq * clk_incval;
3860
3861 /* For each PHY conversion register, look up the appropriate link
3862 * speed frequency and determine the TUs per that clock's cycle time.
3863 * Split this into a high and low value and then program the
3864 * appropriate register. If that link speed does not use the
3865 * associated register, write zeros to clear it instead.
3866 */
3867
3868 /* P_REG_PAR_TX_TUS */
3869 if (e822_vernier[link_spd].tx_par_clk)
3870 phy_tus = div_u64(tu_per_sec,
3871 e822_vernier[link_spd].tx_par_clk);
3872 else
3873 phy_tus = 0;
3874
3875 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
3876 phy_tus);
3877 if (err)
3878 return err;
3879
3880 /* P_REG_PAR_RX_TUS */
3881 if (e822_vernier[link_spd].rx_par_clk)
3882 phy_tus = div_u64(tu_per_sec,
3883 e822_vernier[link_spd].rx_par_clk);
3884 else
3885 phy_tus = 0;
3886
3887 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
3888 phy_tus);
3889 if (err)
3890 return err;
3891
3892 /* P_REG_PCS_TX_TUS */
3893 if (e822_vernier[link_spd].tx_pcs_clk)
3894 phy_tus = div_u64(tu_per_sec,
3895 e822_vernier[link_spd].tx_pcs_clk);
3896 else
3897 phy_tus = 0;
3898
3899 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
3900 phy_tus);
3901 if (err)
3902 return err;
3903
3904 /* P_REG_PCS_RX_TUS */
3905 if (e822_vernier[link_spd].rx_pcs_clk)
3906 phy_tus = div_u64(tu_per_sec,
3907 e822_vernier[link_spd].rx_pcs_clk);
3908 else
3909 phy_tus = 0;
3910
3911 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
3912 phy_tus);
3913 if (err)
3914 return err;
3915
3916 /* P_REG_DESK_PAR_TX_TUS */
3917 if (e822_vernier[link_spd].tx_desk_rsgb_par)
3918 phy_tus = div_u64(tu_per_sec,
3919 e822_vernier[link_spd].tx_desk_rsgb_par);
3920 else
3921 phy_tus = 0;
3922
3923 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
3924 phy_tus);
3925 if (err)
3926 return err;
3927
3928 /* P_REG_DESK_PAR_RX_TUS */
3929 if (e822_vernier[link_spd].rx_desk_rsgb_par)
3930 phy_tus = div_u64(tu_per_sec,
3931 e822_vernier[link_spd].rx_desk_rsgb_par);
3932 else
3933 phy_tus = 0;
3934
3935 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
3936 phy_tus);
3937 if (err)
3938 return err;
3939
3940 /* P_REG_DESK_PCS_TX_TUS */
3941 if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
3942 phy_tus = div_u64(tu_per_sec,
3943 e822_vernier[link_spd].tx_desk_rsgb_pcs);
3944 else
3945 phy_tus = 0;
3946
3947 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
3948 phy_tus);
3949 if (err)
3950 return err;
3951
3952 /* P_REG_DESK_PCS_RX_TUS */
3953 if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
3954 phy_tus = div_u64(tu_per_sec,
3955 e822_vernier[link_spd].rx_desk_rsgb_pcs);
3956 else
3957 phy_tus = 0;
3958
3959 return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
3960 phy_tus);
3961}
3962
3963/**
3964 * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
3965 * @hw: pointer to the HW struct
3966 * @link_spd: the Link speed to calculate for
3967 *
3968 * Calculate the fixed offset due to known static latency data.
3969 */
3970static u64
3971ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
3972{
3973 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
3974
3975 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
3976 clk_incval = ice_ptp_read_src_incval(hw);
3977
3978 /* Calculate TUs per second */
3979 tu_per_sec = cur_freq * clk_incval;
3980
3981 /* Calculate number of TUs to add for the fixed Tx latency. Since the
3982 * latency measurement is in 1/100th of a nanosecond, we need to
3983 * multiply by tu_per_sec and then divide by 1e11. This calculation
3984 * overflows 64 bit integer arithmetic, so break it up into two
3985 * divisions by 1e4 first then by 1e7.
3986 */
3987 fixed_offset = div_u64(tu_per_sec, 10000);
3988 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
3989 fixed_offset = div_u64(fixed_offset, 10000000);
3990
3991 return fixed_offset;
3992}
3993
3994/**
3995 * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
3996 * @hw: pointer to the HW struct
3997 * @port: the PHY port to configure
3998 *
3999 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
4000 * adjust Tx timestamps by. This is calculated by combining some known static
4001 * latency along with the Vernier offset computations done by hardware.
4002 *
4003 * This function will not return successfully until the Tx offset calculations
4004 * have been completed, which requires waiting until at least one packet has
4005 * been transmitted by the device. It is safe to call this function
4006 * periodically until calibration succeeds, as it will only program the offset
4007 * once.
4008 *
4009 * To avoid overflow, when calculating the offset based on the known static
4010 * latency values, we use measurements in 1/100th of a nanosecond, and divide
4011 * the TUs per second up front. This avoids overflow while allowing
4012 * calculation of the adjustment using integer arithmetic.
4013 *
4014 * Returns zero on success, -EBUSY if the hardware vernier offset
4015 * calibration has not completed, or another error code on failure.
4016 */
4017int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
4018{
4019 enum ice_ptp_link_spd link_spd;
4020 enum ice_ptp_fec_mode fec_mode;
4021 u64 total_offset, val;
4022 int err;
4023 u32 reg;
4024
4025 /* Nothing to do if we've already programmed the offset */
4026 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®);
4027 if (err) {
4028 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
4029 port, err);
4030 return err;
4031 }
4032
4033 if (reg)
4034 return 0;
4035
4036 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®);
4037 if (err) {
4038 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
4039 port, err);
4040 return err;
4041 }
4042
4043 if (!(reg & P_REG_TX_OV_STATUS_OV_M))
4044 return -EBUSY;
4045
4046 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
4047 if (err)
4048 return err;
4049
4050 total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
4051
4052 /* Read the first Vernier offset from the PHY register and add it to
4053 * the total offset.
4054 */
4055 if (link_spd == ICE_PTP_LNK_SPD_1G ||
4056 link_spd == ICE_PTP_LNK_SPD_10G ||
4057 link_spd == ICE_PTP_LNK_SPD_25G ||
4058 link_spd == ICE_PTP_LNK_SPD_25G_RS ||
4059 link_spd == ICE_PTP_LNK_SPD_40G ||
4060 link_spd == ICE_PTP_LNK_SPD_50G) {
4061 err = ice_read_64b_phy_reg_e82x(hw, port,
4062 P_REG_PAR_PCS_TX_OFFSET_L,
4063 &val);
4064 if (err)
4065 return err;
4066
4067 total_offset += val;
4068 }
4069
4070 /* For Tx, we only need to use the second Vernier offset for
4071 * multi-lane link speeds with RS-FEC. The lanes will always be
4072 * aligned.
4073 */
4074 if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
4075 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
4076 err = ice_read_64b_phy_reg_e82x(hw, port,
4077 P_REG_PAR_TX_TIME_L,
4078 &val);
4079 if (err)
4080 return err;
4081
4082 total_offset += val;
4083 }
4084
4085 /* Now that the total offset has been calculated, program it to the
4086 * PHY and indicate that the Tx offset is ready. After this,
4087 * timestamps will be enabled.
4088 */
4089 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
4090 total_offset);
4091 if (err)
4092 return err;
4093
4094 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
4095 if (err)
4096 return err;
4097
4098 dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
4099 port);
4100
4101 return 0;
4102}
4103
4104/**
4105 * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
4106 * @hw: pointer to the HW struct
4107 * @port: the PHY port to adjust for
4108 * @link_spd: the current link speed of the PHY
4109 * @fec_mode: the current FEC mode of the PHY
4110 * @pmd_adj: on return, the amount to adjust the Rx total offset by
4111 *
4112 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
4113 * This varies by link speed and FEC mode. The value calculated accounts for
4114 * various delays caused when receiving a packet.
4115 */
4116static int
4117ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
4118 enum ice_ptp_link_spd link_spd,
4119 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
4120{
4121 u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
4122 u8 pmd_align;
4123 u32 val;
4124 int err;
4125
4126 err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
4127 if (err) {
4128 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
4129 err);
4130 return err;
4131 }
4132
4133 pmd_align = (u8)val;
4134
4135 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
4136 clk_incval = ice_ptp_read_src_incval(hw);
4137
4138 /* Calculate TUs per second */
4139 tu_per_sec = cur_freq * clk_incval;
4140
4141 /* The PMD alignment adjustment measurement depends on the link speed,
4142 * and whether FEC is enabled. For each link speed, the alignment
4143 * adjustment is calculated by dividing a value by the length of
4144 * a Time Unit in nanoseconds.
4145 *
4146 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
4147 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
4148 * 10G w/FEC: align * 0.1 * 32/33
4149 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
4150 * 25G w/FEC: align * 0.4 * 32/33
4151 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
4152 * 40G w/FEC: align * 0.1 * 32/33
4153 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
4154 * 50G w/FEC: align * 0.8 * 32/33
4155 *
4156 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
4157 *
4158 * To allow for calculating this value using integer arithmetic, we
4159 * instead start with the number of TUs per second, (inverse of the
4160 * length of a Time Unit in nanoseconds), multiply by a value based
4161 * on the PMD alignment register, and then divide by the right value
4162 * calculated based on the table above. To avoid integer overflow this
4163 * division is broken up into a step of dividing by 125 first.
4164 */
4165 if (link_spd == ICE_PTP_LNK_SPD_1G) {
4166 if (pmd_align == 4)
4167 mult = 10;
4168 else
4169 mult = (pmd_align + 6) % 10;
4170 } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
4171 link_spd == ICE_PTP_LNK_SPD_25G ||
4172 link_spd == ICE_PTP_LNK_SPD_40G ||
4173 link_spd == ICE_PTP_LNK_SPD_50G) {
4174 /* If Clause 74 FEC, always calculate PMD adjust */
4175 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
4176 mult = pmd_align;
4177 else
4178 mult = 0;
4179 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
4180 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
4181 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
4182 if (pmd_align < 17)
4183 mult = pmd_align + 40;
4184 else
4185 mult = pmd_align;
4186 } else {
4187 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
4188 link_spd);
4189 mult = 0;
4190 }
4191
4192 /* In some cases, there's no need to adjust for the PMD alignment */
4193 if (!mult) {
4194 *pmd_adj = 0;
4195 return 0;
4196 }
4197
4198 /* Calculate the adjustment by multiplying TUs per second by the
4199 * appropriate multiplier and divisor. To avoid overflow, we first
4200 * divide by 125, and then handle remaining divisor based on the link
4201 * speed pmd_adj_divisor value.
4202 */
4203 adj = div_u64(tu_per_sec, 125);
4204 adj *= mult;
4205 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
4206
4207 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
4208 * cycle count is necessary.
4209 */
4210 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
4211 u64 cycle_adj;
4212 u8 rx_cycle;
4213
4214 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
4215 &val);
4216 if (err) {
4217 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
4218 err);
4219 return err;
4220 }
4221
4222 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
4223 if (rx_cycle) {
4224 mult = (4 - rx_cycle) * 40;
4225
4226 cycle_adj = div_u64(tu_per_sec, 125);
4227 cycle_adj *= mult;
4228 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
4229
4230 adj += cycle_adj;
4231 }
4232 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
4233 u64 cycle_adj;
4234 u8 rx_cycle;
4235
4236 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
4237 &val);
4238 if (err) {
4239 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
4240 err);
4241 return err;
4242 }
4243
4244 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
4245 if (rx_cycle) {
4246 mult = rx_cycle * 40;
4247
4248 cycle_adj = div_u64(tu_per_sec, 125);
4249 cycle_adj *= mult;
4250 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
4251
4252 adj += cycle_adj;
4253 }
4254 }
4255
4256 /* Return the calculated adjustment */
4257 *pmd_adj = adj;
4258
4259 return 0;
4260}
4261
4262/**
4263 * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
4264 * @hw: pointer to HW struct
4265 * @link_spd: The Link speed to calculate for
4266 *
4267 * Determine the fixed Rx latency for a given link speed.
4268 */
4269static u64
4270ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
4271{
4272 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
4273
4274 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
4275 clk_incval = ice_ptp_read_src_incval(hw);
4276
4277 /* Calculate TUs per second */
4278 tu_per_sec = cur_freq * clk_incval;
4279
4280 /* Calculate number of TUs to add for the fixed Rx latency. Since the
4281 * latency measurement is in 1/100th of a nanosecond, we need to
4282 * multiply by tu_per_sec and then divide by 1e11. This calculation
4283 * overflows 64 bit integer arithmetic, so break it up into two
4284 * divisions by 1e4 first then by 1e7.
4285 */
4286 fixed_offset = div_u64(tu_per_sec, 10000);
4287 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
4288 fixed_offset = div_u64(fixed_offset, 10000000);
4289
4290 return fixed_offset;
4291}
4292
4293/**
4294 * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
4295 * @hw: pointer to the HW struct
4296 * @port: the PHY port to configure
4297 *
4298 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
4299 * adjust Rx timestamps by. This combines calculations from the Vernier offset
4300 * measurements taken in hardware with some data about known fixed delay as
4301 * well as adjusting for multi-lane alignment delay.
4302 *
4303 * This function will not return successfully until the Rx offset calculations
4304 * have been completed, which requires waiting until at least one packet has
4305 * been received by the device. It is safe to call this function periodically
4306 * until calibration succeeds, as it will only program the offset once.
4307 *
4308 * This function must be called only after the offset registers are valid,
4309 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
4310 * has measured the offset.
4311 *
4312 * To avoid overflow, when calculating the offset based on the known static
4313 * latency values, we use measurements in 1/100th of a nanosecond, and divide
4314 * the TUs per second up front. This avoids overflow while allowing
4315 * calculation of the adjustment using integer arithmetic.
4316 *
4317 * Returns zero on success, -EBUSY if the hardware vernier offset
4318 * calibration has not completed, or another error code on failure.
4319 */
4320int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
4321{
4322 enum ice_ptp_link_spd link_spd;
4323 enum ice_ptp_fec_mode fec_mode;
4324 u64 total_offset, pmd, val;
4325 int err;
4326 u32 reg;
4327
4328 /* Nothing to do if we've already programmed the offset */
4329 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®);
4330 if (err) {
4331 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
4332 port, err);
4333 return err;
4334 }
4335
4336 if (reg)
4337 return 0;
4338
4339 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®);
4340 if (err) {
4341 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
4342 port, err);
4343 return err;
4344 }
4345
4346 if (!(reg & P_REG_RX_OV_STATUS_OV_M))
4347 return -EBUSY;
4348
4349 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
4350 if (err)
4351 return err;
4352
4353 total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
4354
4355 /* Read the first Vernier offset from the PHY register and add it to
4356 * the total offset.
4357 */
4358 err = ice_read_64b_phy_reg_e82x(hw, port,
4359 P_REG_PAR_PCS_RX_OFFSET_L,
4360 &val);
4361 if (err)
4362 return err;
4363
4364 total_offset += val;
4365
4366 /* For Rx, all multi-lane link speeds include a second Vernier
4367 * calibration, because the lanes might not be aligned.
4368 */
4369 if (link_spd == ICE_PTP_LNK_SPD_40G ||
4370 link_spd == ICE_PTP_LNK_SPD_50G ||
4371 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
4372 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
4373 err = ice_read_64b_phy_reg_e82x(hw, port,
4374 P_REG_PAR_RX_TIME_L,
4375 &val);
4376 if (err)
4377 return err;
4378
4379 total_offset += val;
4380 }
4381
4382 /* In addition, Rx must account for the PMD alignment */
4383 err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
4384 if (err)
4385 return err;
4386
4387 /* For RS-FEC, this adjustment adds delay, but for other modes, it
4388 * subtracts delay.
4389 */
4390 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
4391 total_offset += pmd;
4392 else
4393 total_offset -= pmd;
4394
4395 /* Now that the total offset has been calculated, program it to the
4396 * PHY and indicate that the Rx offset is ready. After this,
4397 * timestamps will be enabled.
4398 */
4399 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
4400 total_offset);
4401 if (err)
4402 return err;
4403
4404 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
4405 if (err)
4406 return err;
4407
4408 dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
4409 port);
4410
4411 return 0;
4412}
4413
4414/**
4415 * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers
4416 * @hw: pointer to the HW struct
4417 *
4418 * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted
4419 * and received timestamps as invalid.
4420 *
4421 * Return: 0 on success, other error codes when failed to write to PHY
4422 */
4423int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw)
4424{
4425 u8 port;
4426
4427 for (port = 0; port < hw->ptp.num_lports; port++) {
4428 int err;
4429
4430 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
4431 if (err) {
4432 dev_warn(ice_hw_to_dev(hw),
4433 "Failed to clear PHY TX_OFFSET_READY register\n");
4434 return err;
4435 }
4436
4437 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
4438 if (err) {
4439 dev_warn(ice_hw_to_dev(hw),
4440 "Failed to clear PHY RX_OFFSET_READY register\n");
4441 return err;
4442 }
4443 }
4444
4445 return 0;
4446}
4447
4448/**
4449 * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
4450 * @hw: pointer to the HW struct
4451 * @port: the PHY port to read
4452 * @phy_time: on return, the 64bit PHY timer value
4453 * @phc_time: on return, the lower 64bits of PHC time
4454 *
4455 * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
4456 * and PHC timer values.
4457 */
4458static int
4459ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
4460 u64 *phc_time)
4461{
4462 u64 tx_time, rx_time;
4463 u32 zo, lo;
4464 u8 tmr_idx;
4465 int err;
4466
4467 tmr_idx = ice_get_ptp_src_clock_index(hw);
4468
4469 /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
4470 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
4471
4472 /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
4473 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
4474 if (err)
4475 return err;
4476
4477 /* Issue the sync to start the ICE_PTP_READ_TIME capture */
4478 ice_ptp_exec_tmr_cmd(hw);
4479
4480 /* Read the captured PHC time from the shadow time registers */
4481 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
4482 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
4483 *phc_time = (u64)lo << 32 | zo;
4484
4485 /* Read the captured PHY time from the PHY shadow registers */
4486 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
4487 if (err)
4488 return err;
4489
4490 /* If the PHY Tx and Rx timers don't match, log a warning message.
4491 * Note that this should not happen in normal circumstances since the
4492 * driver always programs them together.
4493 */
4494 if (tx_time != rx_time)
4495 dev_warn(ice_hw_to_dev(hw),
4496 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
4497 port, (unsigned long long)tx_time,
4498 (unsigned long long)rx_time);
4499
4500 *phy_time = tx_time;
4501
4502 return 0;
4503}
4504
4505/**
4506 * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
4507 * @hw: pointer to the HW struct
4508 * @port: the PHY port to synchronize
4509 *
4510 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
4511 * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
4512 * simultaneous read of the PHY timer and PHC timer. Then we use the
4513 * difference to calculate an appropriate 2s complement addition to add
4514 * to the PHY timer in order to ensure it reads the same value as the
4515 * primary PHC timer.
4516 */
4517static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
4518{
4519 u64 phc_time, phy_time, difference;
4520 int err;
4521
4522 if (!ice_ptp_lock(hw)) {
4523 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
4524 return -EBUSY;
4525 }
4526
4527 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
4528 if (err)
4529 goto err_unlock;
4530
4531 /* Calculate the amount required to add to the port time in order for
4532 * it to match the PHC time.
4533 *
4534 * Note that the port adjustment is done using 2s complement
4535 * arithmetic. This is convenient since it means that we can simply
4536 * calculate the difference between the PHC time and the port time,
4537 * and it will be interpreted correctly.
4538 */
4539 difference = phc_time - phy_time;
4540
4541 err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
4542 if (err)
4543 goto err_unlock;
4544
4545 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
4546 if (err)
4547 goto err_unlock;
4548
4549 /* Do not perform any action on the main timer */
4550 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
4551
4552 /* Issue the sync to activate the time adjustment */
4553 ice_ptp_exec_tmr_cmd(hw);
4554
4555 /* Re-capture the timer values to flush the command registers and
4556 * verify that the time was properly adjusted.
4557 */
4558 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
4559 if (err)
4560 goto err_unlock;
4561
4562 dev_info(ice_hw_to_dev(hw),
4563 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
4564 port, (unsigned long long)phy_time,
4565 (unsigned long long)phc_time);
4566
4567 ice_ptp_unlock(hw);
4568
4569 return 0;
4570
4571err_unlock:
4572 ice_ptp_unlock(hw);
4573 return err;
4574}
4575
4576/**
4577 * ice_stop_phy_timer_e82x - Stop the PHY clock timer
4578 * @hw: pointer to the HW struct
4579 * @port: the PHY port to stop
4580 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
4581 *
4582 * Stop the clock of a PHY port. This must be done as part of the flow to
4583 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
4584 * initialized or when link speed changes.
4585 */
4586int
4587ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
4588{
4589 int err;
4590 u32 val;
4591
4592 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
4593 if (err)
4594 return err;
4595
4596 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
4597 if (err)
4598 return err;
4599
4600 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
4601 if (err)
4602 return err;
4603
4604 val &= ~P_REG_PS_START_M;
4605 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4606 if (err)
4607 return err;
4608
4609 val &= ~P_REG_PS_ENA_CLK_M;
4610 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4611 if (err)
4612 return err;
4613
4614 if (soft_reset) {
4615 val |= P_REG_PS_SFT_RESET_M;
4616 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4617 if (err)
4618 return err;
4619 }
4620
4621 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
4622
4623 return 0;
4624}
4625
4626/**
4627 * ice_start_phy_timer_e82x - Start the PHY clock timer
4628 * @hw: pointer to the HW struct
4629 * @port: the PHY port to start
4630 *
4631 * Start the clock of a PHY port. This must be done as part of the flow to
4632 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
4633 * initialized or when link speed changes.
4634 *
4635 * Hardware will take Vernier measurements on Tx or Rx of packets.
4636 */
4637int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
4638{
4639 u32 lo, hi, val;
4640 u64 incval;
4641 u8 tmr_idx;
4642 int err;
4643
4644 tmr_idx = ice_get_ptp_src_clock_index(hw);
4645
4646 err = ice_stop_phy_timer_e82x(hw, port, false);
4647 if (err)
4648 return err;
4649
4650 ice_phy_cfg_lane_e82x(hw, port);
4651
4652 err = ice_phy_cfg_uix_e82x(hw, port);
4653 if (err)
4654 return err;
4655
4656 err = ice_phy_cfg_parpcs_e82x(hw, port);
4657 if (err)
4658 return err;
4659
4660 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
4661 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
4662 incval = (u64)hi << 32 | lo;
4663
4664 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
4665 if (err)
4666 return err;
4667
4668 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
4669 if (err)
4670 return err;
4671
4672 /* Do not perform any action on the main timer */
4673 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
4674
4675 ice_ptp_exec_tmr_cmd(hw);
4676
4677 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
4678 if (err)
4679 return err;
4680
4681 val |= P_REG_PS_SFT_RESET_M;
4682 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4683 if (err)
4684 return err;
4685
4686 val |= P_REG_PS_START_M;
4687 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4688 if (err)
4689 return err;
4690
4691 val &= ~P_REG_PS_SFT_RESET_M;
4692 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4693 if (err)
4694 return err;
4695
4696 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
4697 if (err)
4698 return err;
4699
4700 ice_ptp_exec_tmr_cmd(hw);
4701
4702 val |= P_REG_PS_ENA_CLK_M;
4703 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4704 if (err)
4705 return err;
4706
4707 val |= P_REG_PS_LOAD_OFFSET_M;
4708 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
4709 if (err)
4710 return err;
4711
4712 ice_ptp_exec_tmr_cmd(hw);
4713
4714 err = ice_sync_phy_timer_e82x(hw, port);
4715 if (err)
4716 return err;
4717
4718 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
4719
4720 return 0;
4721}
4722
4723/**
4724 * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
4725 * @hw: pointer to the HW struct
4726 * @quad: the timestamp quad to read from
4727 * @tstamp_ready: contents of the Tx memory status register
4728 *
4729 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
4730 * the PHY are ready. A set bit means the corresponding timestamp is valid and
4731 * ready to be captured from the PHY timestamp block.
4732 */
4733static int
4734ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
4735{
4736 u32 hi, lo;
4737 int err;
4738
4739 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
4740 if (err) {
4741 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
4742 quad, err);
4743 return err;
4744 }
4745
4746 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
4747 if (err) {
4748 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
4749 quad, err);
4750 return err;
4751 }
4752
4753 *tstamp_ready = (u64)hi << 32 | (u64)lo;
4754
4755 return 0;
4756}
4757
4758/**
4759 * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt
4760 * @hw: pointer to the HW struct
4761 * @quad: the timestamp quad
4762 * @ena: enable or disable interrupt
4763 * @threshold: interrupt threshold
4764 *
4765 * Configure TX timestamp interrupt for the specified quad
4766 *
4767 * Return: 0 on success, other error codes when failed to read/write quad
4768 */
4769
4770int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
4771{
4772 int err;
4773 u32 val;
4774
4775 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
4776 if (err)
4777 return err;
4778
4779 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
4780 if (ena) {
4781 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
4782 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
4783 val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold);
4784 }
4785
4786 return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
4787}
4788
4789/**
4790 * ice_ptp_init_phy_e82x - initialize PHY parameters
4791 * @ptp: pointer to the PTP HW struct
4792 */
4793static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
4794{
4795 ptp->phy_model = ICE_PHY_E82X;
4796 ptp->num_lports = 8;
4797 ptp->ports_per_phy = 8;
4798}
4799
4800/* E810 functions
4801 *
4802 * The following functions operate on the E810 series devices which use
4803 * a separate external PHY.
4804 */
4805
4806/**
4807 * ice_read_phy_reg_e810 - Read register from external PHY on E810
4808 * @hw: pointer to the HW struct
4809 * @addr: the address to read from
4810 * @val: On return, the value read from the PHY
4811 *
4812 * Read a register from the external PHY on the E810 device.
4813 */
4814static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
4815{
4816 struct ice_sbq_msg_input msg = {0};
4817 int err;
4818
4819 msg.msg_addr_low = lower_16_bits(addr);
4820 msg.msg_addr_high = upper_16_bits(addr);
4821 msg.opcode = ice_sbq_msg_rd;
4822 msg.dest_dev = rmn_0;
4823
4824 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
4825 if (err) {
4826 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
4827 err);
4828 return err;
4829 }
4830
4831 *val = msg.data;
4832
4833 return 0;
4834}
4835
4836/**
4837 * ice_write_phy_reg_e810 - Write register on external PHY on E810
4838 * @hw: pointer to the HW struct
4839 * @addr: the address to writem to
4840 * @val: the value to write to the PHY
4841 *
4842 * Write a value to a register of the external PHY on the E810 device.
4843 */
4844static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
4845{
4846 struct ice_sbq_msg_input msg = {0};
4847 int err;
4848
4849 msg.msg_addr_low = lower_16_bits(addr);
4850 msg.msg_addr_high = upper_16_bits(addr);
4851 msg.opcode = ice_sbq_msg_wr;
4852 msg.dest_dev = rmn_0;
4853 msg.data = val;
4854
4855 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
4856 if (err) {
4857 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
4858 err);
4859 return err;
4860 }
4861
4862 return 0;
4863}
4864
4865/**
4866 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
4867 * @hw: pointer to the HW struct
4868 * @idx: the timestamp index to read
4869 * @hi: 8 bit timestamp high value
4870 * @lo: 32 bit timestamp low value
4871 *
4872 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
4873 * timestamp block of the external PHY on the E810 device using the low latency
4874 * timestamp read.
4875 */
4876static int
4877ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
4878{
4879 u32 val;
4880 u8 i;
4881
4882 /* Write TS index to read to the PF register so the FW can read it */
4883 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
4884 wr32(hw, PF_SB_ATQBAL, val);
4885
4886 /* Read the register repeatedly until the FW provides us the TS */
4887 for (i = TS_LL_READ_RETRIES; i > 0; i--) {
4888 val = rd32(hw, PF_SB_ATQBAL);
4889
4890 /* When the bit is cleared, the TS is ready in the register */
4891 if (!(FIELD_GET(TS_LL_READ_TS, val))) {
4892 /* High 8 bit value of the TS is on the bits 16:23 */
4893 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
4894
4895 /* Read the low 32 bit value and set the TS valid bit */
4896 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
4897 return 0;
4898 }
4899
4900 udelay(10);
4901 }
4902
4903 /* FW failed to provide the TS in time */
4904 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
4905 return -EINVAL;
4906}
4907
4908/**
4909 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
4910 * @hw: pointer to the HW struct
4911 * @lport: the lport to read from
4912 * @idx: the timestamp index to read
4913 * @hi: 8 bit timestamp high value
4914 * @lo: 32 bit timestamp low value
4915 *
4916 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
4917 * timestamp block of the external PHY on the E810 device using sideband queue.
4918 */
4919static int
4920ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
4921 u32 *lo)
4922{
4923 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
4924 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
4925 u32 lo_val, hi_val;
4926 int err;
4927
4928 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
4929 if (err) {
4930 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
4931 err);
4932 return err;
4933 }
4934
4935 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
4936 if (err) {
4937 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
4938 err);
4939 return err;
4940 }
4941
4942 *lo = lo_val;
4943 *hi = (u8)hi_val;
4944
4945 return 0;
4946}
4947
4948/**
4949 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
4950 * @hw: pointer to the HW struct
4951 * @lport: the lport to read from
4952 * @idx: the timestamp index to read
4953 * @tstamp: on return, the 40bit timestamp value
4954 *
4955 * Read a 40bit timestamp value out of the timestamp block of the external PHY
4956 * on the E810 device.
4957 */
4958static int
4959ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
4960{
4961 u32 lo = 0;
4962 u8 hi = 0;
4963 int err;
4964
4965 if (hw->dev_caps.ts_dev_info.ts_ll_read)
4966 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
4967 else
4968 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
4969
4970 if (err)
4971 return err;
4972
4973 /* For E810 devices, the timestamp is reported with the lower 32 bits
4974 * in the low register, and the upper 8 bits in the high register.
4975 */
4976 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
4977
4978 return 0;
4979}
4980
4981/**
4982 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
4983 * @hw: pointer to the HW struct
4984 * @lport: the lport to read from
4985 * @idx: the timestamp index to reset
4986 *
4987 * Read the timestamp and then forcibly overwrite its value to clear the valid
4988 * bit from the timestamp block of the external PHY on the E810 device.
4989 *
4990 * This function should only be called on an idx whose bit is set according to
4991 * ice_get_phy_tx_tstamp_ready().
4992 */
4993static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
4994{
4995 u32 lo_addr, hi_addr;
4996 u64 unused_tstamp;
4997 int err;
4998
4999 err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
5000 if (err) {
5001 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
5002 lport, idx, err);
5003 return err;
5004 }
5005
5006 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
5007 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
5008
5009 err = ice_write_phy_reg_e810(hw, lo_addr, 0);
5010 if (err) {
5011 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
5012 lport, idx, err);
5013 return err;
5014 }
5015
5016 err = ice_write_phy_reg_e810(hw, hi_addr, 0);
5017 if (err) {
5018 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
5019 lport, idx, err);
5020 return err;
5021 }
5022
5023 return 0;
5024}
5025
5026/**
5027 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
5028 * @hw: pointer to HW struct
5029 *
5030 * Perform E810-specific PTP hardware clock initialization steps.
5031 *
5032 * Return: 0 on success, other error codes when failed to initialize TimeSync
5033 */
5034static int ice_ptp_init_phc_e810(struct ice_hw *hw)
5035{
5036 u8 tmr_idx;
5037 int err;
5038
5039 /* Ensure synchronization delay is zero */
5040 wr32(hw, GLTSYN_SYNC_DLAY, 0);
5041
5042 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5043 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
5044 GLTSYN_ENA_TSYN_ENA_M);
5045 if (err)
5046 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
5047 err);
5048
5049 return err;
5050}
5051
5052/**
5053 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
5054 * @hw: Board private structure
5055 * @time: Time to initialize the PHY port clock to
5056 *
5057 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
5058 * initial clock time. The time will not actually be programmed until the
5059 * driver issues an ICE_PTP_INIT_TIME command.
5060 *
5061 * The time value is the upper 32 bits of the PHY timer, usually in units of
5062 * nominal nanoseconds.
5063 */
5064static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
5065{
5066 u8 tmr_idx;
5067 int err;
5068
5069 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5070 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
5071 if (err) {
5072 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
5073 err);
5074 return err;
5075 }
5076
5077 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
5078 if (err) {
5079 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
5080 err);
5081 return err;
5082 }
5083
5084 return 0;
5085}
5086
5087/**
5088 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
5089 * @hw: pointer to HW struct
5090 * @adj: adjustment value to program
5091 *
5092 * Prepare the PHY port for an atomic adjustment by programming the PHY
5093 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
5094 * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
5095 *
5096 * The adjustment value only contains the portion used for the upper 32bits of
5097 * the PHY timer, usually in units of nominal nanoseconds. Negative
5098 * adjustments are supported using 2s complement arithmetic.
5099 */
5100static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
5101{
5102 u8 tmr_idx;
5103 int err;
5104
5105 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5106
5107 /* Adjustments are represented as signed 2's complement values in
5108 * nanoseconds. Sub-nanosecond adjustment is not supported.
5109 */
5110 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
5111 if (err) {
5112 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
5113 err);
5114 return err;
5115 }
5116
5117 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
5118 if (err) {
5119 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
5120 err);
5121 return err;
5122 }
5123
5124 return 0;
5125}
5126
5127/**
5128 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
5129 * @hw: pointer to HW struct
5130 * @incval: The new 40bit increment value to prepare
5131 *
5132 * Prepare the PHY port for a new increment value by programming the PHY
5133 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
5134 * completed by issuing an ICE_PTP_INIT_INCVAL command.
5135 */
5136static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
5137{
5138 u32 high, low;
5139 u8 tmr_idx;
5140 int err;
5141
5142 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5143 low = lower_32_bits(incval);
5144 high = upper_32_bits(incval);
5145
5146 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
5147 if (err) {
5148 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
5149 err);
5150 return err;
5151 }
5152
5153 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
5154 if (err) {
5155 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
5156 err);
5157 return err;
5158 }
5159
5160 return 0;
5161}
5162
5163/**
5164 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
5165 * @hw: pointer to HW struct
5166 * @cmd: Command to be sent to the port
5167 *
5168 * Prepare the external PHYs connected to this device for a timer sync
5169 * command.
5170 */
5171static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
5172{
5173 u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
5174
5175 return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val);
5176}
5177
5178/**
5179 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
5180 * @hw: pointer to the HW struct
5181 * @port: the PHY port to read
5182 * @tstamp_ready: contents of the Tx memory status register
5183 *
5184 * E810 devices do not use a Tx memory status register. Instead simply
5185 * indicate that all timestamps are currently ready.
5186 */
5187static int
5188ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
5189{
5190 *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
5191 return 0;
5192}
5193
5194/* E810 SMA functions
5195 *
5196 * The following functions operate specifically on E810 hardware and are used
5197 * to access the extended GPIOs available.
5198 */
5199
5200/**
5201 * ice_get_pca9575_handle
5202 * @hw: pointer to the hw struct
5203 * @pca9575_handle: GPIO controller's handle
5204 *
5205 * Find and return the GPIO controller's handle in the netlist.
5206 * When found - the value will be cached in the hw structure and following calls
5207 * will return cached value
5208 */
5209static int
5210ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
5211{
5212 struct ice_aqc_get_link_topo *cmd;
5213 struct ice_aq_desc desc;
5214 int status;
5215 u8 idx;
5216
5217 /* If handle was read previously return cached value */
5218 if (hw->io_expander_handle) {
5219 *pca9575_handle = hw->io_expander_handle;
5220 return 0;
5221 }
5222
5223 /* If handle was not detected read it from the netlist */
5224 cmd = &desc.params.get_link_topo;
5225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
5226
5227 /* Set node type to GPIO controller */
5228 cmd->addr.topo_params.node_type_ctx =
5229 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
5230 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
5231
5232#define SW_PCA9575_SFP_TOPO_IDX 2
5233#define SW_PCA9575_QSFP_TOPO_IDX 1
5234
5235 /* Check if the SW IO expander controlling SMA exists in the netlist. */
5236 if (hw->device_id == ICE_DEV_ID_E810C_SFP)
5237 idx = SW_PCA9575_SFP_TOPO_IDX;
5238 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
5239 idx = SW_PCA9575_QSFP_TOPO_IDX;
5240 else
5241 return -EOPNOTSUPP;
5242
5243 cmd->addr.topo_params.index = idx;
5244
5245 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5246 if (status)
5247 return -EOPNOTSUPP;
5248
5249 /* Verify if we found the right IO expander type */
5250 if (desc.params.get_link_topo.node_part_num !=
5251 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
5252 return -EOPNOTSUPP;
5253
5254 /* If present save the handle and return it */
5255 hw->io_expander_handle =
5256 le16_to_cpu(desc.params.get_link_topo.addr.handle);
5257 *pca9575_handle = hw->io_expander_handle;
5258
5259 return 0;
5260}
5261
5262/**
5263 * ice_read_sma_ctrl
5264 * @hw: pointer to the hw struct
5265 * @data: pointer to data to be read from the GPIO controller
5266 *
5267 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
5268 * PCA9575 expander, so only bits 3-7 in data are valid.
5269 */
5270int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data)
5271{
5272 int status;
5273 u16 handle;
5274 u8 i;
5275
5276 status = ice_get_pca9575_handle(hw, &handle);
5277 if (status)
5278 return status;
5279
5280 *data = 0;
5281
5282 for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) {
5283 bool pin;
5284
5285 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
5286 &pin, NULL);
5287 if (status)
5288 break;
5289 *data |= (u8)(!pin) << i;
5290 }
5291
5292 return status;
5293}
5294
5295/**
5296 * ice_write_sma_ctrl
5297 * @hw: pointer to the hw struct
5298 * @data: data to be written to the GPIO controller
5299 *
5300 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
5301 * of the PCA9575 expander, so only bits 3-7 in data are valid.
5302 */
5303int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
5304{
5305 int status;
5306 u16 handle;
5307 u8 i;
5308
5309 status = ice_get_pca9575_handle(hw, &handle);
5310 if (status)
5311 return status;
5312
5313 for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) {
5314 bool pin;
5315
5316 pin = !(data & (1 << i));
5317 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
5318 pin, NULL);
5319 if (status)
5320 break;
5321 }
5322
5323 return status;
5324}
5325
5326/**
5327 * ice_read_pca9575_reg
5328 * @hw: pointer to the hw struct
5329 * @offset: GPIO controller register offset
5330 * @data: pointer to data to be read from the GPIO controller
5331 *
5332 * Read the register from the GPIO controller
5333 */
5334int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
5335{
5336 struct ice_aqc_link_topo_addr link_topo;
5337 __le16 addr;
5338 u16 handle;
5339 int err;
5340
5341 memset(&link_topo, 0, sizeof(link_topo));
5342
5343 err = ice_get_pca9575_handle(hw, &handle);
5344 if (err)
5345 return err;
5346
5347 link_topo.handle = cpu_to_le16(handle);
5348 link_topo.topo_params.node_type_ctx =
5349 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
5350 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
5351
5352 addr = cpu_to_le16((u16)offset);
5353
5354 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
5355}
5356
5357/**
5358 * ice_ptp_read_sdp_ac - read SDP available connections section from NVM
5359 * @hw: pointer to the HW struct
5360 * @entries: returns the SDP available connections section from NVM
5361 * @num_entries: returns the number of valid entries
5362 *
5363 * Return: 0 on success, negative error code if NVM read failed or section does
5364 * not exist or is corrupted
5365 */
5366int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries)
5367{
5368 __le16 data;
5369 u32 offset;
5370 int err;
5371
5372 err = ice_acquire_nvm(hw, ICE_RES_READ);
5373 if (err)
5374 goto exit;
5375
5376 /* Read the offset of SDP_AC */
5377 offset = ICE_AQC_NVM_SDP_AC_PTR_OFFSET;
5378 err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true,
5379 NULL);
5380 if (err)
5381 goto exit;
5382
5383 /* Check if section exist */
5384 offset = FIELD_GET(ICE_AQC_NVM_SDP_AC_PTR_M, le16_to_cpu(data));
5385 if (offset == ICE_AQC_NVM_SDP_AC_PTR_INVAL) {
5386 err = -EINVAL;
5387 goto exit;
5388 }
5389
5390 if (offset & ICE_AQC_NVM_SDP_AC_PTR_TYPE_M) {
5391 offset &= ICE_AQC_NVM_SDP_AC_PTR_M;
5392 offset *= ICE_AQC_NVM_SECTOR_UNIT;
5393 } else {
5394 offset *= sizeof(data);
5395 }
5396
5397 /* Skip reading section length and read the number of valid entries */
5398 offset += sizeof(data);
5399 err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true,
5400 NULL);
5401 if (err)
5402 goto exit;
5403 *num_entries = le16_to_cpu(data);
5404
5405 /* Read SDP configuration section */
5406 offset += sizeof(data);
5407 err = ice_aq_read_nvm(hw, 0, offset, *num_entries * sizeof(data),
5408 entries, false, true, NULL);
5409
5410exit:
5411 if (err)
5412 dev_dbg(ice_hw_to_dev(hw), "Failed to configure SDP connection section\n");
5413 ice_release_nvm(hw);
5414 return err;
5415}
5416
5417/**
5418 * ice_ptp_init_phy_e810 - initialize PHY parameters
5419 * @ptp: pointer to the PTP HW struct
5420 */
5421static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
5422{
5423 ptp->phy_model = ICE_PHY_E810;
5424 ptp->num_lports = 8;
5425 ptp->ports_per_phy = 4;
5426}
5427
5428/* Device agnostic functions
5429 *
5430 * The following functions implement shared behavior common to both E822 and
5431 * E810 devices, possibly calling a device specific implementation where
5432 * necessary.
5433 */
5434
5435/**
5436 * ice_ptp_lock - Acquire PTP global semaphore register lock
5437 * @hw: pointer to the HW struct
5438 *
5439 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
5440 * was acquired, false otherwise.
5441 *
5442 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
5443 * value. If software sees the busy bit cleared, this means that this function
5444 * acquired the lock (and the busy bit is now set). If software sees the busy
5445 * bit set, it means that another function acquired the lock.
5446 *
5447 * Software must clear the busy bit with a write to release the lock for other
5448 * functions when done.
5449 */
5450bool ice_ptp_lock(struct ice_hw *hw)
5451{
5452 u32 hw_lock;
5453 int i;
5454
5455#define MAX_TRIES 15
5456
5457 for (i = 0; i < MAX_TRIES; i++) {
5458 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
5459 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
5460 if (hw_lock) {
5461 /* Somebody is holding the lock */
5462 usleep_range(5000, 6000);
5463 continue;
5464 }
5465
5466 break;
5467 }
5468
5469 return !hw_lock;
5470}
5471
5472/**
5473 * ice_ptp_unlock - Release PTP global semaphore register lock
5474 * @hw: pointer to the HW struct
5475 *
5476 * Release the global PTP hardware semaphore lock. This is done by writing to
5477 * the PFTSYN_SEM register.
5478 */
5479void ice_ptp_unlock(struct ice_hw *hw)
5480{
5481 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
5482}
5483
5484/**
5485 * ice_ptp_init_hw - Initialize hw based on device type
5486 * @hw: pointer to the HW structure
5487 *
5488 * Determine the PHY model for the device, and initialize hw
5489 * for use by other functions.
5490 */
5491void ice_ptp_init_hw(struct ice_hw *hw)
5492{
5493 struct ice_ptp_hw *ptp = &hw->ptp;
5494
5495 if (ice_is_e822(hw) || ice_is_e823(hw))
5496 ice_ptp_init_phy_e82x(ptp);
5497 else if (ice_is_e810(hw))
5498 ice_ptp_init_phy_e810(ptp);
5499 else if (ice_is_e825c(hw))
5500 ice_ptp_init_phy_e825(hw);
5501 else
5502 ptp->phy_model = ICE_PHY_UNSUP;
5503}
5504
5505/**
5506 * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command
5507 * @hw: pointer to HW struct
5508 * @port: Port to which cmd has to be sent
5509 * @cmd: Command to be sent to the port
5510 *
5511 * Prepare one port for the upcoming timer sync command. Do not use this for
5512 * programming only a single port, instead use ice_ptp_one_port_cmd() to
5513 * ensure non-modified ports get properly initialized to ICE_PTP_NOP.
5514 *
5515 * Return:
5516 * * %0 - success
5517 * %-EBUSY - PHY type not supported
5518 * * %other - failed to write port command
5519 */
5520static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
5521 enum ice_ptp_tmr_cmd cmd)
5522{
5523 switch (ice_get_phy_model(hw)) {
5524 case ICE_PHY_ETH56G:
5525 return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
5526 case ICE_PHY_E82X:
5527 return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
5528 default:
5529 return -EOPNOTSUPP;
5530 }
5531}
5532
5533/**
5534 * ice_ptp_one_port_cmd - Program one PHY port for a timer command
5535 * @hw: pointer to HW struct
5536 * @configured_port: the port that should execute the command
5537 * @configured_cmd: the command to be executed on the configured port
5538 *
5539 * Prepare one port for executing a timer command, while preparing all other
5540 * ports to ICE_PTP_NOP. This allows executing a command on a single port
5541 * while ensuring all other ports do not execute stale commands.
5542 *
5543 * Return:
5544 * * %0 - success
5545 * * %other - failed to write port command
5546 */
5547int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
5548 enum ice_ptp_tmr_cmd configured_cmd)
5549{
5550 u32 port;
5551
5552 for (port = 0; port < hw->ptp.num_lports; port++) {
5553 int err;
5554
5555 /* Program the configured port with the configured command,
5556 * program all other ports with ICE_PTP_NOP.
5557 */
5558 if (port == configured_port)
5559 err = ice_ptp_write_port_cmd(hw, port, configured_cmd);
5560 else
5561 err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP);
5562
5563 if (err)
5564 return err;
5565 }
5566
5567 return 0;
5568}
5569
5570/**
5571 * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command
5572 * @hw: pointer to HW struct
5573 * @cmd: the timer command to setup
5574 *
5575 * Prepare all PHY ports on this device for the requested timer command. For
5576 * some families this can be done in one shot, but for other families each
5577 * port must be configured individually.
5578 *
5579 * Return:
5580 * * %0 - success
5581 * * %other - failed to write port command
5582 */
5583static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
5584{
5585 u32 port;
5586
5587 /* PHY models which can program all ports simultaneously */
5588 switch (ice_get_phy_model(hw)) {
5589 case ICE_PHY_E810:
5590 return ice_ptp_port_cmd_e810(hw, cmd);
5591 default:
5592 break;
5593 }
5594
5595 /* PHY models which require programming each port separately */
5596 for (port = 0; port < hw->ptp.num_lports; port++) {
5597 int err;
5598
5599 err = ice_ptp_write_port_cmd(hw, port, cmd);
5600 if (err)
5601 return err;
5602 }
5603
5604 return 0;
5605}
5606
5607/**
5608 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
5609 * @hw: pointer to HW struct
5610 * @cmd: the command to issue
5611 *
5612 * Prepare the source timer and PHY timers and then trigger the requested
5613 * command. This causes the shadow registers previously written in preparation
5614 * for the command to be synchronously applied to both the source and PHY
5615 * timers.
5616 */
5617static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
5618{
5619 int err;
5620
5621 /* First, prepare the source timer */
5622 ice_ptp_src_cmd(hw, cmd);
5623
5624 /* Next, prepare the ports */
5625 err = ice_ptp_port_cmd(hw, cmd);
5626 if (err) {
5627 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
5628 cmd, err);
5629 return err;
5630 }
5631
5632 /* Write the sync command register to drive both source and PHY timer
5633 * commands synchronously
5634 */
5635 ice_ptp_exec_tmr_cmd(hw);
5636
5637 return 0;
5638}
5639
5640/**
5641 * ice_ptp_init_time - Initialize device time to provided value
5642 * @hw: pointer to HW struct
5643 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
5644 *
5645 * Initialize the device to the specified time provided. This requires a three
5646 * step process:
5647 *
5648 * 1) write the new init time to the source timer shadow registers
5649 * 2) write the new init time to the PHY timer shadow registers
5650 * 3) issue an init_time timer command to synchronously switch both the source
5651 * and port timers to the new init time value at the next clock cycle.
5652 */
5653int ice_ptp_init_time(struct ice_hw *hw, u64 time)
5654{
5655 u8 tmr_idx;
5656 int err;
5657
5658 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5659
5660 /* Source timers */
5661 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
5662 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
5663 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
5664
5665 /* PHY timers */
5666 /* Fill Rx and Tx ports and send msg to PHY */
5667 switch (ice_get_phy_model(hw)) {
5668 case ICE_PHY_ETH56G:
5669 err = ice_ptp_prep_phy_time_eth56g(hw,
5670 (u32)(time & 0xFFFFFFFF));
5671 break;
5672 case ICE_PHY_E810:
5673 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
5674 break;
5675 case ICE_PHY_E82X:
5676 err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
5677 break;
5678 default:
5679 err = -EOPNOTSUPP;
5680 }
5681
5682 if (err)
5683 return err;
5684
5685 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
5686}
5687
5688/**
5689 * ice_ptp_write_incval - Program PHC with new increment value
5690 * @hw: pointer to HW struct
5691 * @incval: Source timer increment value per clock cycle
5692 *
5693 * Program the PHC with a new increment value. This requires a three-step
5694 * process:
5695 *
5696 * 1) Write the increment value to the source timer shadow registers
5697 * 2) Write the increment value to the PHY timer shadow registers
5698 * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
5699 * the source and port timers to the new increment value at the next clock
5700 * cycle.
5701 */
5702int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
5703{
5704 u8 tmr_idx;
5705 int err;
5706
5707 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5708
5709 /* Shadow Adjust */
5710 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
5711 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
5712
5713 switch (ice_get_phy_model(hw)) {
5714 case ICE_PHY_ETH56G:
5715 err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
5716 break;
5717 case ICE_PHY_E810:
5718 err = ice_ptp_prep_phy_incval_e810(hw, incval);
5719 break;
5720 case ICE_PHY_E82X:
5721 err = ice_ptp_prep_phy_incval_e82x(hw, incval);
5722 break;
5723 default:
5724 err = -EOPNOTSUPP;
5725 }
5726
5727 if (err)
5728 return err;
5729
5730 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
5731}
5732
5733/**
5734 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
5735 * @hw: pointer to HW struct
5736 * @incval: Source timer increment value per clock cycle
5737 *
5738 * Program a new PHC incval while holding the PTP semaphore.
5739 */
5740int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
5741{
5742 int err;
5743
5744 if (!ice_ptp_lock(hw))
5745 return -EBUSY;
5746
5747 err = ice_ptp_write_incval(hw, incval);
5748
5749 ice_ptp_unlock(hw);
5750
5751 return err;
5752}
5753
5754/**
5755 * ice_ptp_adj_clock - Adjust PHC clock time atomically
5756 * @hw: pointer to HW struct
5757 * @adj: Adjustment in nanoseconds
5758 *
5759 * Perform an atomic adjustment of the PHC time by the specified number of
5760 * nanoseconds. This requires a three-step process:
5761 *
5762 * 1) Write the adjustment to the source timer shadow registers
5763 * 2) Write the adjustment to the PHY timer shadow registers
5764 * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
5765 * adjustment to both the source and port timers at the next clock cycle.
5766 */
5767int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
5768{
5769 u8 tmr_idx;
5770 int err;
5771
5772 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5773
5774 /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
5775 * For an ICE_PTP_ADJ_TIME command, this set of registers represents
5776 * the value to add to the clock time. It supports subtraction by
5777 * interpreting the value as a 2's complement integer.
5778 */
5779 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
5780 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
5781
5782 switch (ice_get_phy_model(hw)) {
5783 case ICE_PHY_ETH56G:
5784 err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
5785 break;
5786 case ICE_PHY_E810:
5787 err = ice_ptp_prep_phy_adj_e810(hw, adj);
5788 break;
5789 case ICE_PHY_E82X:
5790 err = ice_ptp_prep_phy_adj_e82x(hw, adj);
5791 break;
5792 default:
5793 err = -EOPNOTSUPP;
5794 }
5795
5796 if (err)
5797 return err;
5798
5799 return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
5800}
5801
5802/**
5803 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
5804 * @hw: pointer to the HW struct
5805 * @block: the block to read from
5806 * @idx: the timestamp index to read
5807 * @tstamp: on return, the 40bit timestamp value
5808 *
5809 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
5810 * the block is the quad to read from. For E810 devices, the block is the
5811 * logical port to read from.
5812 */
5813int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
5814{
5815 switch (ice_get_phy_model(hw)) {
5816 case ICE_PHY_ETH56G:
5817 return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
5818 case ICE_PHY_E810:
5819 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
5820 case ICE_PHY_E82X:
5821 return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
5822 default:
5823 return -EOPNOTSUPP;
5824 }
5825}
5826
5827/**
5828 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
5829 * @hw: pointer to the HW struct
5830 * @block: the block to read from
5831 * @idx: the timestamp index to reset
5832 *
5833 * Clear a timestamp from the timestamp block, discarding its value without
5834 * returning it. This resets the memory status bit for the timestamp index
5835 * allowing it to be reused for another timestamp in the future.
5836 *
5837 * For E822 devices, the block number is the PHY quad to clear from. For E810
5838 * devices, the block number is the logical port to clear from.
5839 *
5840 * This function must only be called on a timestamp index whose valid bit is
5841 * set according to ice_get_phy_tx_tstamp_ready().
5842 */
5843int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
5844{
5845 switch (ice_get_phy_model(hw)) {
5846 case ICE_PHY_ETH56G:
5847 return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
5848 case ICE_PHY_E810:
5849 return ice_clear_phy_tstamp_e810(hw, block, idx);
5850 case ICE_PHY_E82X:
5851 return ice_clear_phy_tstamp_e82x(hw, block, idx);
5852 default:
5853 return -EOPNOTSUPP;
5854 }
5855}
5856
5857/**
5858 * ice_get_pf_c827_idx - find and return the C827 index for the current pf
5859 * @hw: pointer to the hw struct
5860 * @idx: index of the found C827 PHY
5861 * Return:
5862 * * 0 - success
5863 * * negative - failure
5864 */
5865static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
5866{
5867 struct ice_aqc_get_link_topo cmd;
5868 u8 node_part_number;
5869 u16 node_handle;
5870 int status;
5871 u8 ctx;
5872
5873 if (hw->mac_type != ICE_MAC_E810)
5874 return -ENODEV;
5875
5876 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
5877 *idx = C827_0;
5878 return 0;
5879 }
5880
5881 memset(&cmd, 0, sizeof(cmd));
5882
5883 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
5884 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
5885 cmd.addr.topo_params.node_type_ctx = ctx;
5886
5887 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
5888 &node_handle);
5889 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
5890 return -ENOENT;
5891
5892 if (node_handle == E810C_QSFP_C827_0_HANDLE)
5893 *idx = C827_0;
5894 else if (node_handle == E810C_QSFP_C827_1_HANDLE)
5895 *idx = C827_1;
5896 else
5897 return -EIO;
5898
5899 return 0;
5900}
5901
5902/**
5903 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
5904 * @hw: pointer to the HW struct
5905 */
5906void ice_ptp_reset_ts_memory(struct ice_hw *hw)
5907{
5908 switch (ice_get_phy_model(hw)) {
5909 case ICE_PHY_ETH56G:
5910 ice_ptp_reset_ts_memory_eth56g(hw);
5911 break;
5912 case ICE_PHY_E82X:
5913 ice_ptp_reset_ts_memory_e82x(hw);
5914 break;
5915 case ICE_PHY_E810:
5916 default:
5917 return;
5918 }
5919}
5920
5921/**
5922 * ice_ptp_init_phc - Initialize PTP hardware clock
5923 * @hw: pointer to the HW struct
5924 *
5925 * Perform the steps required to initialize the PTP hardware clock.
5926 */
5927int ice_ptp_init_phc(struct ice_hw *hw)
5928{
5929 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
5930
5931 /* Enable source clocks */
5932 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
5933
5934 /* Clear event err indications for auxiliary pins */
5935 (void)rd32(hw, GLTSYN_STAT(src_idx));
5936
5937 switch (ice_get_phy_model(hw)) {
5938 case ICE_PHY_ETH56G:
5939 return ice_ptp_init_phc_eth56g(hw);
5940 case ICE_PHY_E810:
5941 return ice_ptp_init_phc_e810(hw);
5942 case ICE_PHY_E82X:
5943 return ice_ptp_init_phc_e82x(hw);
5944 default:
5945 return -EOPNOTSUPP;
5946 }
5947}
5948
5949/**
5950 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
5951 * @hw: pointer to the HW struct
5952 * @block: the timestamp block to check
5953 * @tstamp_ready: storage for the PHY Tx memory status information
5954 *
5955 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
5956 * which indicates which timestamps in the block may be captured. A set bit
5957 * means the timestamp can be read. An unset bit means the timestamp is not
5958 * ready and software should avoid reading the register.
5959 */
5960int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
5961{
5962 switch (ice_get_phy_model(hw)) {
5963 case ICE_PHY_ETH56G:
5964 return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
5965 tstamp_ready);
5966 case ICE_PHY_E810:
5967 return ice_get_phy_tx_tstamp_ready_e810(hw, block,
5968 tstamp_ready);
5969 case ICE_PHY_E82X:
5970 return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
5971 tstamp_ready);
5972 break;
5973 default:
5974 return -EOPNOTSUPP;
5975 }
5976}
5977
5978/**
5979 * ice_cgu_get_pin_desc_e823 - get pin description array
5980 * @hw: pointer to the hw struct
5981 * @input: if request is done against input or output pin
5982 * @size: number of inputs/outputs
5983 *
5984 * Return: pointer to pin description array associated to given hw.
5985 */
5986static const struct ice_cgu_pin_desc *
5987ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
5988{
5989 static const struct ice_cgu_pin_desc *t;
5990
5991 if (hw->cgu_part_number ==
5992 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
5993 if (input) {
5994 t = ice_e823_zl_cgu_inputs;
5995 *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
5996 } else {
5997 t = ice_e823_zl_cgu_outputs;
5998 *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
5999 }
6000 } else if (hw->cgu_part_number ==
6001 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
6002 if (input) {
6003 t = ice_e823_si_cgu_inputs;
6004 *size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
6005 } else {
6006 t = ice_e823_si_cgu_outputs;
6007 *size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
6008 }
6009 } else {
6010 t = NULL;
6011 *size = 0;
6012 }
6013
6014 return t;
6015}
6016
6017/**
6018 * ice_cgu_get_pin_desc - get pin description array
6019 * @hw: pointer to the hw struct
6020 * @input: if request is done against input or output pins
6021 * @size: size of array returned by function
6022 *
6023 * Return: pointer to pin description array associated to given hw.
6024 */
6025static const struct ice_cgu_pin_desc *
6026ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
6027{
6028 const struct ice_cgu_pin_desc *t = NULL;
6029
6030 switch (hw->device_id) {
6031 case ICE_DEV_ID_E810C_SFP:
6032 if (input) {
6033 t = ice_e810t_sfp_cgu_inputs;
6034 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
6035 } else {
6036 t = ice_e810t_sfp_cgu_outputs;
6037 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
6038 }
6039 break;
6040 case ICE_DEV_ID_E810C_QSFP:
6041 if (input) {
6042 t = ice_e810t_qsfp_cgu_inputs;
6043 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
6044 } else {
6045 t = ice_e810t_qsfp_cgu_outputs;
6046 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
6047 }
6048 break;
6049 case ICE_DEV_ID_E823L_10G_BASE_T:
6050 case ICE_DEV_ID_E823L_1GBE:
6051 case ICE_DEV_ID_E823L_BACKPLANE:
6052 case ICE_DEV_ID_E823L_QSFP:
6053 case ICE_DEV_ID_E823L_SFP:
6054 case ICE_DEV_ID_E823C_10G_BASE_T:
6055 case ICE_DEV_ID_E823C_BACKPLANE:
6056 case ICE_DEV_ID_E823C_QSFP:
6057 case ICE_DEV_ID_E823C_SFP:
6058 case ICE_DEV_ID_E823C_SGMII:
6059 t = ice_cgu_get_pin_desc_e823(hw, input, size);
6060 break;
6061 default:
6062 break;
6063 }
6064
6065 return t;
6066}
6067
6068/**
6069 * ice_cgu_get_num_pins - get pin description array size
6070 * @hw: pointer to the hw struct
6071 * @input: if request is done against input or output pins
6072 *
6073 * Return: size of pin description array for given hw.
6074 */
6075int ice_cgu_get_num_pins(struct ice_hw *hw, bool input)
6076{
6077 const struct ice_cgu_pin_desc *t;
6078 int size;
6079
6080 t = ice_cgu_get_pin_desc(hw, input, &size);
6081 if (t)
6082 return size;
6083
6084 return 0;
6085}
6086
6087/**
6088 * ice_cgu_get_pin_type - get pin's type
6089 * @hw: pointer to the hw struct
6090 * @pin: pin index
6091 * @input: if request is done against input or output pin
6092 *
6093 * Return: type of a pin.
6094 */
6095enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
6096{
6097 const struct ice_cgu_pin_desc *t;
6098 int t_size;
6099
6100 t = ice_cgu_get_pin_desc(hw, input, &t_size);
6101
6102 if (!t)
6103 return 0;
6104
6105 if (pin >= t_size)
6106 return 0;
6107
6108 return t[pin].type;
6109}
6110
6111/**
6112 * ice_cgu_get_pin_freq_supp - get pin's supported frequency
6113 * @hw: pointer to the hw struct
6114 * @pin: pin index
6115 * @input: if request is done against input or output pin
6116 * @num: output number of supported frequencies
6117 *
6118 * Get frequency supported number and array of supported frequencies.
6119 *
6120 * Return: array of supported frequencies for given pin.
6121 */
6122struct dpll_pin_frequency *
6123ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
6124{
6125 const struct ice_cgu_pin_desc *t;
6126 int t_size;
6127
6128 *num = 0;
6129 t = ice_cgu_get_pin_desc(hw, input, &t_size);
6130 if (!t)
6131 return NULL;
6132 if (pin >= t_size)
6133 return NULL;
6134 *num = t[pin].freq_supp_num;
6135
6136 return t[pin].freq_supp;
6137}
6138
6139/**
6140 * ice_cgu_get_pin_name - get pin's name
6141 * @hw: pointer to the hw struct
6142 * @pin: pin index
6143 * @input: if request is done against input or output pin
6144 *
6145 * Return:
6146 * * null terminated char array with name
6147 * * NULL in case of failure
6148 */
6149const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
6150{
6151 const struct ice_cgu_pin_desc *t;
6152 int t_size;
6153
6154 t = ice_cgu_get_pin_desc(hw, input, &t_size);
6155
6156 if (!t)
6157 return NULL;
6158
6159 if (pin >= t_size)
6160 return NULL;
6161
6162 return t[pin].name;
6163}
6164
6165/**
6166 * ice_get_cgu_state - get the state of the DPLL
6167 * @hw: pointer to the hw struct
6168 * @dpll_idx: Index of internal DPLL unit
6169 * @last_dpll_state: last known state of DPLL
6170 * @pin: pointer to a buffer for returning currently active pin
6171 * @ref_state: reference clock state
6172 * @eec_mode: eec mode of the DPLL
6173 * @phase_offset: pointer to a buffer for returning phase offset
6174 * @dpll_state: state of the DPLL (output)
6175 *
6176 * This function will read the state of the DPLL(dpll_idx). Non-null
6177 * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
6178 * retrieve currently active pin, state, mode and phase_offset respectively.
6179 *
6180 * Return: state of the DPLL
6181 */
6182int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
6183 enum dpll_lock_status last_dpll_state, u8 *pin,
6184 u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
6185 enum dpll_lock_status *dpll_state)
6186{
6187 u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
6188 s64 hw_phase_offset;
6189 int status;
6190
6191 status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
6192 &hw_dpll_state, &hw_config,
6193 &hw_phase_offset, &hw_eec_mode);
6194 if (status)
6195 return status;
6196
6197 if (pin)
6198 /* current ref pin in dpll_state_refsel_status_X register */
6199 *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
6200 if (phase_offset)
6201 *phase_offset = hw_phase_offset;
6202 if (ref_state)
6203 *ref_state = hw_ref_state;
6204 if (eec_mode)
6205 *eec_mode = hw_eec_mode;
6206 if (!dpll_state)
6207 return 0;
6208
6209 /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
6210 * it would never return to FREERUN. This aligns to ITU-T G.781
6211 * Recommendation. We cannot report HOLDOVER as HO memory is cleared
6212 * while switching to another reference.
6213 * Only for situations where previous state was either: "LOCKED without
6214 * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
6215 */
6216 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
6217 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
6218 *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
6219 else
6220 *dpll_state = DPLL_LOCK_STATUS_LOCKED;
6221 } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
6222 last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
6223 *dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
6224 } else {
6225 *dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
6226 }
6227
6228 return 0;
6229}
6230
6231/**
6232 * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
6233 * @hw: pointer to the hw struct
6234 * @base_idx: returns index of first recovered clock pin on device
6235 * @pin_num: returns number of recovered clock pins available on device
6236 *
6237 * Based on hw provide caller info about recovery clock pins available on the
6238 * board.
6239 *
6240 * Return:
6241 * * 0 - success, information is valid
6242 * * negative - failure, information is not valid
6243 */
6244int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
6245{
6246 u8 phy_idx;
6247 int ret;
6248
6249 switch (hw->device_id) {
6250 case ICE_DEV_ID_E810C_SFP:
6251 case ICE_DEV_ID_E810C_QSFP:
6252
6253 ret = ice_get_pf_c827_idx(hw, &phy_idx);
6254 if (ret)
6255 return ret;
6256 *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
6257 *pin_num = ICE_E810_RCLK_PINS_NUM;
6258 ret = 0;
6259 break;
6260 case ICE_DEV_ID_E823L_10G_BASE_T:
6261 case ICE_DEV_ID_E823L_1GBE:
6262 case ICE_DEV_ID_E823L_BACKPLANE:
6263 case ICE_DEV_ID_E823L_QSFP:
6264 case ICE_DEV_ID_E823L_SFP:
6265 case ICE_DEV_ID_E823C_10G_BASE_T:
6266 case ICE_DEV_ID_E823C_BACKPLANE:
6267 case ICE_DEV_ID_E823C_QSFP:
6268 case ICE_DEV_ID_E823C_SFP:
6269 case ICE_DEV_ID_E823C_SGMII:
6270 *pin_num = ICE_E82X_RCLK_PINS_NUM;
6271 ret = 0;
6272 if (hw->cgu_part_number ==
6273 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
6274 *base_idx = ZL_REF1P;
6275 else if (hw->cgu_part_number ==
6276 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
6277 *base_idx = SI_REF1P;
6278 else
6279 ret = -ENODEV;
6280
6281 break;
6282 default:
6283 ret = -ENODEV;
6284 break;
6285 }
6286
6287 return ret;
6288}
6289
6290/**
6291 * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
6292 * @hw: pointer to the hw struct
6293 * @pin_id: id of a pin
6294 * @caps: capabilities to modify
6295 *
6296 * Return:
6297 * * 0 - success, state capabilities were modified
6298 * * negative - failure, capabilities were not modified
6299 */
6300int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
6301 unsigned long *caps)
6302{
6303 bool can_change = true;
6304
6305 switch (hw->device_id) {
6306 case ICE_DEV_ID_E810C_SFP:
6307 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
6308 can_change = false;
6309 break;
6310 case ICE_DEV_ID_E810C_QSFP:
6311 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
6312 can_change = false;
6313 break;
6314 case ICE_DEV_ID_E823L_10G_BASE_T:
6315 case ICE_DEV_ID_E823L_1GBE:
6316 case ICE_DEV_ID_E823L_BACKPLANE:
6317 case ICE_DEV_ID_E823L_QSFP:
6318 case ICE_DEV_ID_E823L_SFP:
6319 case ICE_DEV_ID_E823C_10G_BASE_T:
6320 case ICE_DEV_ID_E823C_BACKPLANE:
6321 case ICE_DEV_ID_E823C_QSFP:
6322 case ICE_DEV_ID_E823C_SFP:
6323 case ICE_DEV_ID_E823C_SGMII:
6324 if (hw->cgu_part_number ==
6325 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
6326 pin_id == ZL_OUT2)
6327 can_change = false;
6328 else if (hw->cgu_part_number ==
6329 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
6330 pin_id == SI_OUT1)
6331 can_change = false;
6332 break;
6333 default:
6334 return -EINVAL;
6335 }
6336 if (can_change)
6337 *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
6338 else
6339 *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
6340
6341 return 0;
6342}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include <linux/delay.h>
5#include "ice_common.h"
6#include "ice_ptp_hw.h"
7#include "ice_ptp_consts.h"
8#include "ice_cgu_regs.h"
9
10static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
11 DPLL_PIN_FREQUENCY_1PPS,
12 DPLL_PIN_FREQUENCY_10MHZ,
13};
14
15static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
16 DPLL_PIN_FREQUENCY_1PPS,
17};
18
19static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
20 DPLL_PIN_FREQUENCY_10MHZ,
21};
22
23static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
24 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
25 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
26 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
27 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
28 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
29 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
30 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
31 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
32 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
33 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
34 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
35 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
36 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
37};
38
39static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
40 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
41 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
42 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
43 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
44 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
45 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
46 { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
47 { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
48 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
49 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
50 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
51 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
52 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
53 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
54 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
55};
56
57static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
58 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
59 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
60 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
61 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
62 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
63 { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
64 { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT,
65 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
66 { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT,
67 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
68};
69
70static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
71 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
72 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
73 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
74 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
75 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
76 { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
77 { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
78 { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT,
79 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
80 { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT,
81 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
82};
83
84static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
85 { "NONE", SI_REF0P, 0, 0 },
86 { "NONE", SI_REF0N, 0, 0 },
87 { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
88 { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
89 { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
90 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
91 { "NONE", SI_REF2N, 0, 0 },
92 { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT,
93 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
94 { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT,
95 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
96};
97
98static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
99 { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
100 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
101 { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
102 { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT,
103 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
104 { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT,
105 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
106};
107
108static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
109 { "NONE", ZL_REF0P, 0, 0 },
110 { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT,
111 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
112 { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
113 { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
114 { "NONE", ZL_REF2P, 0, 0 },
115 { "NONE", ZL_REF2N, 0, 0 },
116 { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
117 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
118 { "NONE", ZL_REF3N, 0, 0 },
119 { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT,
120 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
121 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
122};
123
124static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
125 { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
126 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
127 { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
128 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
129 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
130 { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
131 { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
132 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
133 { "NONE", ZL_OUT5, 0, 0 },
134};
135
136/* Low level functions for interacting with and managing the device clock used
137 * for the Precision Time Protocol.
138 *
139 * The ice hardware represents the current time using three registers:
140 *
141 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
142 * +---------------+ +---------------+ +---------------+
143 * | 32 bits | | 32 bits | | 32 bits |
144 * +---------------+ +---------------+ +---------------+
145 *
146 * The registers are incremented every clock tick using a 40bit increment
147 * value defined over two registers:
148 *
149 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
150 * +---------------+ +---------------+
151 * | 8 bit s | | 32 bits |
152 * +---------------+ +---------------+
153 *
154 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
155 * registers every clock source tick. Depending on the specific device
156 * configuration, the clock source frequency could be one of a number of
157 * values.
158 *
159 * For E810 devices, the increment frequency is 812.5 MHz
160 *
161 * For E822 devices the clock can be derived from different sources, and the
162 * increment has an effective frequency of one of the following:
163 * - 823.4375 MHz
164 * - 783.36 MHz
165 * - 796.875 MHz
166 * - 816 MHz
167 * - 830.078125 MHz
168 * - 783.36 MHz
169 *
170 * The hardware captures timestamps in the PHY for incoming packets, and for
171 * outgoing packets on request. To support this, the PHY maintains a timer
172 * that matches the lower 64 bits of the global source timer.
173 *
174 * In order to ensure that the PHY timers and the source timer are equivalent,
175 * shadow registers are used to prepare the desired initial values. A special
176 * sync command is issued to trigger copying from the shadow registers into
177 * the appropriate source and PHY registers simultaneously.
178 *
179 * The driver supports devices which have different PHYs with subtly different
180 * mechanisms to program and control the timers. We divide the devices into
181 * families named after the first major device, E810 and similar devices, and
182 * E822 and similar devices.
183 *
184 * - E822 based devices have additional support for fine grained Vernier
185 * calibration which requires significant setup
186 * - The layout of timestamp data in the PHY register blocks is different
187 * - The way timer synchronization commands are issued is different.
188 *
189 * To support this, very low level functions have an e810 or e822 suffix
190 * indicating what type of device they work on. Higher level abstractions for
191 * tasks that can be done on both devices do not have the suffix and will
192 * correctly look up the appropriate low level function when running.
193 *
194 * Functions which only make sense on a single device family may not have
195 * a suitable generic implementation
196 */
197
198/**
199 * ice_get_ptp_src_clock_index - determine source clock index
200 * @hw: pointer to HW struct
201 *
202 * Determine the source clock index currently in use, based on device
203 * capabilities reported during initialization.
204 */
205u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
206{
207 return hw->func_caps.ts_func_info.tmr_index_assoc;
208}
209
210/**
211 * ice_ptp_read_src_incval - Read source timer increment value
212 * @hw: pointer to HW struct
213 *
214 * Read the increment value of the source timer and return it.
215 */
216static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
217{
218 u32 lo, hi;
219 u8 tmr_idx;
220
221 tmr_idx = ice_get_ptp_src_clock_index(hw);
222
223 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
224 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
225
226 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
227}
228
229/**
230 * ice_ptp_src_cmd - Prepare source timer for a timer command
231 * @hw: pointer to HW structure
232 * @cmd: Timer command
233 *
234 * Prepare the source timer for an upcoming timer sync command.
235 */
236void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
237{
238 u32 cmd_val;
239 u8 tmr_idx;
240
241 tmr_idx = ice_get_ptp_src_clock_index(hw);
242 cmd_val = tmr_idx << SEL_CPK_SRC;
243
244 switch (cmd) {
245 case ICE_PTP_INIT_TIME:
246 cmd_val |= GLTSYN_CMD_INIT_TIME;
247 break;
248 case ICE_PTP_INIT_INCVAL:
249 cmd_val |= GLTSYN_CMD_INIT_INCVAL;
250 break;
251 case ICE_PTP_ADJ_TIME:
252 cmd_val |= GLTSYN_CMD_ADJ_TIME;
253 break;
254 case ICE_PTP_ADJ_TIME_AT_TIME:
255 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
256 break;
257 case ICE_PTP_READ_TIME:
258 cmd_val |= GLTSYN_CMD_READ_TIME;
259 break;
260 case ICE_PTP_NOP:
261 break;
262 }
263
264 wr32(hw, GLTSYN_CMD, cmd_val);
265}
266
267/**
268 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
269 * @hw: pointer to HW struct
270 *
271 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
272 * write immediately. This triggers the hardware to begin executing all of the
273 * source and PHY timer commands synchronously.
274 */
275static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
276{
277 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
278 ice_flush(hw);
279}
280
281/* E822 family functions
282 *
283 * The following functions operate on the E822 family of devices.
284 */
285
286/**
287 * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
288 * @msg: the PHY message buffer to fill in
289 * @port: the port to access
290 * @offset: the register offset
291 */
292static void
293ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
294{
295 int phy_port, phy, quadtype;
296
297 phy_port = port % ICE_PORTS_PER_PHY_E82X;
298 phy = port / ICE_PORTS_PER_PHY_E82X;
299 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
300
301 if (quadtype == 0) {
302 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
303 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
304 } else {
305 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
306 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
307 }
308
309 if (phy == 0)
310 msg->dest_dev = rmn_0;
311 else if (phy == 1)
312 msg->dest_dev = rmn_1;
313 else
314 msg->dest_dev = rmn_2;
315}
316
317/**
318 * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
319 * @low_addr: the low address to check
320 * @high_addr: on return, contains the high address of the 64bit register
321 *
322 * Checks if the provided low address is one of the known 64bit PHY values
323 * represented as two 32bit registers. If it is, return the appropriate high
324 * register offset to use.
325 */
326static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
327{
328 switch (low_addr) {
329 case P_REG_PAR_PCS_TX_OFFSET_L:
330 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
331 return true;
332 case P_REG_PAR_PCS_RX_OFFSET_L:
333 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
334 return true;
335 case P_REG_PAR_TX_TIME_L:
336 *high_addr = P_REG_PAR_TX_TIME_U;
337 return true;
338 case P_REG_PAR_RX_TIME_L:
339 *high_addr = P_REG_PAR_RX_TIME_U;
340 return true;
341 case P_REG_TOTAL_TX_OFFSET_L:
342 *high_addr = P_REG_TOTAL_TX_OFFSET_U;
343 return true;
344 case P_REG_TOTAL_RX_OFFSET_L:
345 *high_addr = P_REG_TOTAL_RX_OFFSET_U;
346 return true;
347 case P_REG_UIX66_10G_40G_L:
348 *high_addr = P_REG_UIX66_10G_40G_U;
349 return true;
350 case P_REG_UIX66_25G_100G_L:
351 *high_addr = P_REG_UIX66_25G_100G_U;
352 return true;
353 case P_REG_TX_CAPTURE_L:
354 *high_addr = P_REG_TX_CAPTURE_U;
355 return true;
356 case P_REG_RX_CAPTURE_L:
357 *high_addr = P_REG_RX_CAPTURE_U;
358 return true;
359 case P_REG_TX_TIMER_INC_PRE_L:
360 *high_addr = P_REG_TX_TIMER_INC_PRE_U;
361 return true;
362 case P_REG_RX_TIMER_INC_PRE_L:
363 *high_addr = P_REG_RX_TIMER_INC_PRE_U;
364 return true;
365 default:
366 return false;
367 }
368}
369
370/**
371 * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
372 * @low_addr: the low address to check
373 * @high_addr: on return, contains the high address of the 40bit value
374 *
375 * Checks if the provided low address is one of the known 40bit PHY values
376 * split into two registers with the lower 8 bits in the low register and the
377 * upper 32 bits in the high register. If it is, return the appropriate high
378 * register offset to use.
379 */
380static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
381{
382 switch (low_addr) {
383 case P_REG_TIMETUS_L:
384 *high_addr = P_REG_TIMETUS_U;
385 return true;
386 case P_REG_PAR_RX_TUS_L:
387 *high_addr = P_REG_PAR_RX_TUS_U;
388 return true;
389 case P_REG_PAR_TX_TUS_L:
390 *high_addr = P_REG_PAR_TX_TUS_U;
391 return true;
392 case P_REG_PCS_RX_TUS_L:
393 *high_addr = P_REG_PCS_RX_TUS_U;
394 return true;
395 case P_REG_PCS_TX_TUS_L:
396 *high_addr = P_REG_PCS_TX_TUS_U;
397 return true;
398 case P_REG_DESK_PAR_RX_TUS_L:
399 *high_addr = P_REG_DESK_PAR_RX_TUS_U;
400 return true;
401 case P_REG_DESK_PAR_TX_TUS_L:
402 *high_addr = P_REG_DESK_PAR_TX_TUS_U;
403 return true;
404 case P_REG_DESK_PCS_RX_TUS_L:
405 *high_addr = P_REG_DESK_PCS_RX_TUS_U;
406 return true;
407 case P_REG_DESK_PCS_TX_TUS_L:
408 *high_addr = P_REG_DESK_PCS_TX_TUS_U;
409 return true;
410 default:
411 return false;
412 }
413}
414
415/**
416 * ice_read_phy_reg_e82x - Read a PHY register
417 * @hw: pointer to the HW struct
418 * @port: PHY port to read from
419 * @offset: PHY register offset to read
420 * @val: on return, the contents read from the PHY
421 *
422 * Read a PHY register for the given port over the device sideband queue.
423 */
424static int
425ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
426{
427 struct ice_sbq_msg_input msg = {0};
428 int err;
429
430 ice_fill_phy_msg_e82x(&msg, port, offset);
431 msg.opcode = ice_sbq_msg_rd;
432
433 err = ice_sbq_rw_reg(hw, &msg);
434 if (err) {
435 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
436 err);
437 return err;
438 }
439
440 *val = msg.data;
441
442 return 0;
443}
444
445/**
446 * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
447 * @hw: pointer to the HW struct
448 * @port: PHY port to read from
449 * @low_addr: offset of the lower register to read from
450 * @val: on return, the contents of the 64bit value from the PHY registers
451 *
452 * Reads the two registers associated with a 64bit value and returns it in the
453 * val pointer. The offset always specifies the lower register offset to use.
454 * The high offset is looked up. This function only operates on registers
455 * known to be two parts of a 64bit value.
456 */
457static int
458ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
459{
460 u32 low, high;
461 u16 high_addr;
462 int err;
463
464 /* Only operate on registers known to be split into two 32bit
465 * registers.
466 */
467 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
468 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
469 low_addr);
470 return -EINVAL;
471 }
472
473 err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
474 if (err) {
475 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
476 low_addr, err);
477 return err;
478 }
479
480 err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
481 if (err) {
482 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
483 high_addr, err);
484 return err;
485 }
486
487 *val = (u64)high << 32 | low;
488
489 return 0;
490}
491
492/**
493 * ice_write_phy_reg_e82x - Write a PHY register
494 * @hw: pointer to the HW struct
495 * @port: PHY port to write to
496 * @offset: PHY register offset to write
497 * @val: The value to write to the register
498 *
499 * Write a PHY register for the given port over the device sideband queue.
500 */
501static int
502ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
503{
504 struct ice_sbq_msg_input msg = {0};
505 int err;
506
507 ice_fill_phy_msg_e82x(&msg, port, offset);
508 msg.opcode = ice_sbq_msg_wr;
509 msg.data = val;
510
511 err = ice_sbq_rw_reg(hw, &msg);
512 if (err) {
513 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
514 err);
515 return err;
516 }
517
518 return 0;
519}
520
521/**
522 * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
523 * @hw: pointer to the HW struct
524 * @port: port to write to
525 * @low_addr: offset of the low register
526 * @val: 40b value to write
527 *
528 * Write the provided 40b value to the two associated registers by splitting
529 * it up into two chunks, the lower 8 bits and the upper 32 bits.
530 */
531static int
532ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
533{
534 u32 low, high;
535 u16 high_addr;
536 int err;
537
538 /* Only operate on registers known to be split into a lower 8 bit
539 * register and an upper 32 bit register.
540 */
541 if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
542 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
543 low_addr);
544 return -EINVAL;
545 }
546
547 low = (u32)(val & P_REG_40B_LOW_M);
548 high = (u32)(val >> P_REG_40B_HIGH_S);
549
550 err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
551 if (err) {
552 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
553 low_addr, err);
554 return err;
555 }
556
557 err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
558 if (err) {
559 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
560 high_addr, err);
561 return err;
562 }
563
564 return 0;
565}
566
567/**
568 * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
569 * @hw: pointer to the HW struct
570 * @port: PHY port to read from
571 * @low_addr: offset of the lower register to read from
572 * @val: the contents of the 64bit value to write to PHY
573 *
574 * Write the 64bit value to the two associated 32bit PHY registers. The offset
575 * is always specified as the lower register, and the high address is looked
576 * up. This function only operates on registers known to be two parts of
577 * a 64bit value.
578 */
579static int
580ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
581{
582 u32 low, high;
583 u16 high_addr;
584 int err;
585
586 /* Only operate on registers known to be split into two 32bit
587 * registers.
588 */
589 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
590 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
591 low_addr);
592 return -EINVAL;
593 }
594
595 low = lower_32_bits(val);
596 high = upper_32_bits(val);
597
598 err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
599 if (err) {
600 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
601 low_addr, err);
602 return err;
603 }
604
605 err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
606 if (err) {
607 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
608 high_addr, err);
609 return err;
610 }
611
612 return 0;
613}
614
615/**
616 * ice_fill_quad_msg_e82x - Fill message data for quad register access
617 * @msg: the PHY message buffer to fill in
618 * @quad: the quad to access
619 * @offset: the register offset
620 *
621 * Fill a message buffer for accessing a register in a quad shared between
622 * multiple PHYs.
623 */
624static int
625ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
626{
627 u32 addr;
628
629 if (quad >= ICE_MAX_QUAD)
630 return -EINVAL;
631
632 msg->dest_dev = rmn_0;
633
634 if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
635 addr = Q_0_BASE + offset;
636 else
637 addr = Q_1_BASE + offset;
638
639 msg->msg_addr_low = lower_16_bits(addr);
640 msg->msg_addr_high = upper_16_bits(addr);
641
642 return 0;
643}
644
645/**
646 * ice_read_quad_reg_e82x - Read a PHY quad register
647 * @hw: pointer to the HW struct
648 * @quad: quad to read from
649 * @offset: quad register offset to read
650 * @val: on return, the contents read from the quad
651 *
652 * Read a quad register over the device sideband queue. Quad registers are
653 * shared between multiple PHYs.
654 */
655int
656ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
657{
658 struct ice_sbq_msg_input msg = {0};
659 int err;
660
661 err = ice_fill_quad_msg_e82x(&msg, quad, offset);
662 if (err)
663 return err;
664
665 msg.opcode = ice_sbq_msg_rd;
666
667 err = ice_sbq_rw_reg(hw, &msg);
668 if (err) {
669 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
670 err);
671 return err;
672 }
673
674 *val = msg.data;
675
676 return 0;
677}
678
679/**
680 * ice_write_quad_reg_e82x - Write a PHY quad register
681 * @hw: pointer to the HW struct
682 * @quad: quad to write to
683 * @offset: quad register offset to write
684 * @val: The value to write to the register
685 *
686 * Write a quad register over the device sideband queue. Quad registers are
687 * shared between multiple PHYs.
688 */
689int
690ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
691{
692 struct ice_sbq_msg_input msg = {0};
693 int err;
694
695 err = ice_fill_quad_msg_e82x(&msg, quad, offset);
696 if (err)
697 return err;
698
699 msg.opcode = ice_sbq_msg_wr;
700 msg.data = val;
701
702 err = ice_sbq_rw_reg(hw, &msg);
703 if (err) {
704 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
705 err);
706 return err;
707 }
708
709 return 0;
710}
711
712/**
713 * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
714 * @hw: pointer to the HW struct
715 * @quad: the quad to read from
716 * @idx: the timestamp index to read
717 * @tstamp: on return, the 40bit timestamp value
718 *
719 * Read a 40bit timestamp value out of the two associated registers in the
720 * quad memory block that is shared between the internal PHYs of the E822
721 * family of devices.
722 */
723static int
724ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
725{
726 u16 lo_addr, hi_addr;
727 u32 lo, hi;
728 int err;
729
730 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
731 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
732
733 err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
734 if (err) {
735 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
736 err);
737 return err;
738 }
739
740 err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
741 if (err) {
742 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
743 err);
744 return err;
745 }
746
747 /* For E822 based internal PHYs, the timestamp is reported with the
748 * lower 8 bits in the low register, and the upper 32 bits in the high
749 * register.
750 */
751 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
752
753 return 0;
754}
755
756/**
757 * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
758 * @hw: pointer to the HW struct
759 * @quad: the quad to read from
760 * @idx: the timestamp index to reset
761 *
762 * Read the timestamp out of the quad to clear its timestamp status bit from
763 * the PHY quad block that is shared between the internal PHYs of the E822
764 * devices.
765 *
766 * Note that unlike E810, software cannot directly write to the quad memory
767 * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
768 * to determine which timestamps are valid. Reading a timestamp auto-clears
769 * the valid bit.
770 *
771 * To directly clear the contents of the timestamp block entirely, discarding
772 * all timestamp data at once, software should instead use
773 * ice_ptp_reset_ts_memory_quad_e82x().
774 *
775 * This function should only be called on an idx whose bit is set according to
776 * ice_get_phy_tx_tstamp_ready().
777 */
778static int
779ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
780{
781 u64 unused_tstamp;
782 int err;
783
784 err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
785 if (err) {
786 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
787 quad, idx, err);
788 return err;
789 }
790
791 return 0;
792}
793
794/**
795 * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
796 * @hw: pointer to the HW struct
797 * @quad: the quad to read from
798 *
799 * Clear all timestamps from the PHY quad block that is shared between the
800 * internal PHYs on the E822 devices.
801 */
802void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
803{
804 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
805 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
806}
807
808/**
809 * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
810 * @hw: pointer to the HW struct
811 */
812static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
813{
814 unsigned int quad;
815
816 for (quad = 0; quad < ICE_MAX_QUAD; quad++)
817 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
818}
819
820/**
821 * ice_read_cgu_reg_e82x - Read a CGU register
822 * @hw: pointer to the HW struct
823 * @addr: Register address to read
824 * @val: storage for register value read
825 *
826 * Read the contents of a register of the Clock Generation Unit. Only
827 * applicable to E822 devices.
828 */
829static int
830ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
831{
832 struct ice_sbq_msg_input cgu_msg;
833 int err;
834
835 cgu_msg.opcode = ice_sbq_msg_rd;
836 cgu_msg.dest_dev = cgu;
837 cgu_msg.msg_addr_low = addr;
838 cgu_msg.msg_addr_high = 0x0;
839
840 err = ice_sbq_rw_reg(hw, &cgu_msg);
841 if (err) {
842 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
843 addr, err);
844 return err;
845 }
846
847 *val = cgu_msg.data;
848
849 return err;
850}
851
852/**
853 * ice_write_cgu_reg_e82x - Write a CGU register
854 * @hw: pointer to the HW struct
855 * @addr: Register address to write
856 * @val: value to write into the register
857 *
858 * Write the specified value to a register of the Clock Generation Unit. Only
859 * applicable to E822 devices.
860 */
861static int
862ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
863{
864 struct ice_sbq_msg_input cgu_msg;
865 int err;
866
867 cgu_msg.opcode = ice_sbq_msg_wr;
868 cgu_msg.dest_dev = cgu;
869 cgu_msg.msg_addr_low = addr;
870 cgu_msg.msg_addr_high = 0x0;
871 cgu_msg.data = val;
872
873 err = ice_sbq_rw_reg(hw, &cgu_msg);
874 if (err) {
875 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
876 addr, err);
877 return err;
878 }
879
880 return err;
881}
882
883/**
884 * ice_clk_freq_str - Convert time_ref_freq to string
885 * @clk_freq: Clock frequency
886 *
887 * Convert the specified TIME_REF clock frequency to a string.
888 */
889static const char *ice_clk_freq_str(u8 clk_freq)
890{
891 switch ((enum ice_time_ref_freq)clk_freq) {
892 case ICE_TIME_REF_FREQ_25_000:
893 return "25 MHz";
894 case ICE_TIME_REF_FREQ_122_880:
895 return "122.88 MHz";
896 case ICE_TIME_REF_FREQ_125_000:
897 return "125 MHz";
898 case ICE_TIME_REF_FREQ_153_600:
899 return "153.6 MHz";
900 case ICE_TIME_REF_FREQ_156_250:
901 return "156.25 MHz";
902 case ICE_TIME_REF_FREQ_245_760:
903 return "245.76 MHz";
904 default:
905 return "Unknown";
906 }
907}
908
909/**
910 * ice_clk_src_str - Convert time_ref_src to string
911 * @clk_src: Clock source
912 *
913 * Convert the specified clock source to its string name.
914 */
915static const char *ice_clk_src_str(u8 clk_src)
916{
917 switch ((enum ice_clk_src)clk_src) {
918 case ICE_CLK_SRC_TCX0:
919 return "TCX0";
920 case ICE_CLK_SRC_TIME_REF:
921 return "TIME_REF";
922 default:
923 return "Unknown";
924 }
925}
926
927/**
928 * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
929 * @hw: pointer to the HW struct
930 * @clk_freq: Clock frequency to program
931 * @clk_src: Clock source to select (TIME_REF, or TCX0)
932 *
933 * Configure the Clock Generation Unit with the desired clock frequency and
934 * time reference, enabling the PLL which drives the PTP hardware clock.
935 */
936static int
937ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
938 enum ice_clk_src clk_src)
939{
940 union tspll_ro_bwm_lf bwm_lf;
941 union nac_cgu_dword19 dw19;
942 union nac_cgu_dword22 dw22;
943 union nac_cgu_dword24 dw24;
944 union nac_cgu_dword9 dw9;
945 int err;
946
947 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
948 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
949 clk_freq);
950 return -EINVAL;
951 }
952
953 if (clk_src >= NUM_ICE_CLK_SRC) {
954 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
955 clk_src);
956 return -EINVAL;
957 }
958
959 if (clk_src == ICE_CLK_SRC_TCX0 &&
960 clk_freq != ICE_TIME_REF_FREQ_25_000) {
961 dev_warn(ice_hw_to_dev(hw),
962 "TCX0 only supports 25 MHz frequency\n");
963 return -EINVAL;
964 }
965
966 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
967 if (err)
968 return err;
969
970 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
971 if (err)
972 return err;
973
974 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
975 if (err)
976 return err;
977
978 /* Log the current clock configuration */
979 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
980 dw24.field.ts_pll_enable ? "enabled" : "disabled",
981 ice_clk_src_str(dw24.field.time_ref_sel),
982 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
983 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
984
985 /* Disable the PLL before changing the clock source or frequency */
986 if (dw24.field.ts_pll_enable) {
987 dw24.field.ts_pll_enable = 0;
988
989 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
990 if (err)
991 return err;
992 }
993
994 /* Set the frequency */
995 dw9.field.time_ref_freq_sel = clk_freq;
996 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
997 if (err)
998 return err;
999
1000 /* Configure the TS PLL feedback divisor */
1001 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
1002 if (err)
1003 return err;
1004
1005 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
1006 dw19.field.tspll_ndivratio = 1;
1007
1008 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
1009 if (err)
1010 return err;
1011
1012 /* Configure the TS PLL post divisor */
1013 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
1014 if (err)
1015 return err;
1016
1017 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
1018 dw22.field.time1588clk_sel_div2 = 0;
1019
1020 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
1021 if (err)
1022 return err;
1023
1024 /* Configure the TS PLL pre divisor and clock source */
1025 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
1026 if (err)
1027 return err;
1028
1029 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
1030 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
1031 dw24.field.time_ref_sel = clk_src;
1032
1033 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
1034 if (err)
1035 return err;
1036
1037 /* Finally, enable the PLL */
1038 dw24.field.ts_pll_enable = 1;
1039
1040 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
1041 if (err)
1042 return err;
1043
1044 /* Wait to verify if the PLL locks */
1045 usleep_range(1000, 5000);
1046
1047 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
1048 if (err)
1049 return err;
1050
1051 if (!bwm_lf.field.plllock_true_lock_cri) {
1052 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
1053 return -EBUSY;
1054 }
1055
1056 /* Log the current clock configuration */
1057 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
1058 dw24.field.ts_pll_enable ? "enabled" : "disabled",
1059 ice_clk_src_str(dw24.field.time_ref_sel),
1060 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
1061 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
1062
1063 return 0;
1064}
1065
1066/**
1067 * ice_init_cgu_e82x - Initialize CGU with settings from firmware
1068 * @hw: pointer to the HW structure
1069 *
1070 * Initialize the Clock Generation Unit of the E822 device.
1071 */
1072static int ice_init_cgu_e82x(struct ice_hw *hw)
1073{
1074 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
1075 union tspll_cntr_bist_settings cntr_bist;
1076 int err;
1077
1078 err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
1079 &cntr_bist.val);
1080 if (err)
1081 return err;
1082
1083 /* Disable sticky lock detection so lock err reported is accurate */
1084 cntr_bist.field.i_plllock_sel_0 = 0;
1085 cntr_bist.field.i_plllock_sel_1 = 0;
1086
1087 err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
1088 cntr_bist.val);
1089 if (err)
1090 return err;
1091
1092 /* Configure the CGU PLL using the parameters from the function
1093 * capabilities.
1094 */
1095 err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
1096 (enum ice_clk_src)ts_info->clk_src);
1097 if (err)
1098 return err;
1099
1100 return 0;
1101}
1102
1103/**
1104 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
1105 * @hw: pointer to the HW struct
1106 *
1107 * Set the window length used for the vernier port calibration process.
1108 */
1109static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
1110{
1111 u8 port;
1112
1113 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1114 int err;
1115
1116 err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
1117 PTP_VERNIER_WL);
1118 if (err) {
1119 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
1120 port, err);
1121 return err;
1122 }
1123 }
1124
1125 return 0;
1126}
1127
1128/**
1129 * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
1130 * @hw: pointer to HW struct
1131 *
1132 * Perform PHC initialization steps specific to E822 devices.
1133 */
1134static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
1135{
1136 int err;
1137 u32 regval;
1138
1139 /* Enable reading switch and PHY registers over the sideband queue */
1140#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
1141#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
1142 regval = rd32(hw, PF_SB_REM_DEV_CTL);
1143 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
1144 PF_SB_REM_DEV_CTL_PHY0);
1145 wr32(hw, PF_SB_REM_DEV_CTL, regval);
1146
1147 /* Initialize the Clock Generation Unit */
1148 err = ice_init_cgu_e82x(hw);
1149 if (err)
1150 return err;
1151
1152 /* Set window length for all the ports */
1153 return ice_ptp_set_vernier_wl(hw);
1154}
1155
1156/**
1157 * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
1158 * @hw: pointer to the HW struct
1159 * @time: Time to initialize the PHY port clocks to
1160 *
1161 * Program the PHY port registers with a new initial time value. The port
1162 * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
1163 * command. The time value is the upper 32 bits of the PHY timer, usually in
1164 * units of nominal nanoseconds.
1165 */
1166static int
1167ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
1168{
1169 u64 phy_time;
1170 u8 port;
1171 int err;
1172
1173 /* The time represents the upper 32 bits of the PHY timer, so we need
1174 * to shift to account for this when programming.
1175 */
1176 phy_time = (u64)time << 32;
1177
1178 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1179 /* Tx case */
1180 err = ice_write_64b_phy_reg_e82x(hw, port,
1181 P_REG_TX_TIMER_INC_PRE_L,
1182 phy_time);
1183 if (err)
1184 goto exit_err;
1185
1186 /* Rx case */
1187 err = ice_write_64b_phy_reg_e82x(hw, port,
1188 P_REG_RX_TIMER_INC_PRE_L,
1189 phy_time);
1190 if (err)
1191 goto exit_err;
1192 }
1193
1194 return 0;
1195
1196exit_err:
1197 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1198 port, err);
1199
1200 return err;
1201}
1202
1203/**
1204 * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
1205 * @hw: pointer to HW struct
1206 * @port: Port number to be programmed
1207 * @time: time in cycles to adjust the port Tx and Rx clocks
1208 *
1209 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1210 * registers. The atomic adjustment won't be completed until the driver issues
1211 * an ICE_PTP_ADJ_TIME command.
1212 *
1213 * Note that time is not in units of nanoseconds. It is in clock time
1214 * including the lower sub-nanosecond portion of the port timer.
1215 *
1216 * Negative adjustments are supported using 2s complement arithmetic.
1217 */
1218static int
1219ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
1220{
1221 u32 l_time, u_time;
1222 int err;
1223
1224 l_time = lower_32_bits(time);
1225 u_time = upper_32_bits(time);
1226
1227 /* Tx case */
1228 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1229 l_time);
1230 if (err)
1231 goto exit_err;
1232
1233 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1234 u_time);
1235 if (err)
1236 goto exit_err;
1237
1238 /* Rx case */
1239 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1240 l_time);
1241 if (err)
1242 goto exit_err;
1243
1244 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1245 u_time);
1246 if (err)
1247 goto exit_err;
1248
1249 return 0;
1250
1251exit_err:
1252 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1253 port, err);
1254 return err;
1255}
1256
1257/**
1258 * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
1259 * @hw: pointer to HW struct
1260 * @adj: adjustment in nanoseconds
1261 *
1262 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1263 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1264 * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
1265 */
1266static int
1267ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
1268{
1269 s64 cycles;
1270 u8 port;
1271
1272 /* The port clock supports adjustment of the sub-nanosecond portion of
1273 * the clock. We shift the provided adjustment in nanoseconds to
1274 * calculate the appropriate adjustment to program into the PHY ports.
1275 */
1276 if (adj > 0)
1277 cycles = (s64)adj << 32;
1278 else
1279 cycles = -(((s64)-adj) << 32);
1280
1281 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1282 int err;
1283
1284 err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
1285 if (err)
1286 return err;
1287 }
1288
1289 return 0;
1290}
1291
1292/**
1293 * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
1294 * @hw: pointer to HW struct
1295 * @incval: new increment value to prepare
1296 *
1297 * Prepare each of the PHY ports for a new increment value by programming the
1298 * port's TIMETUS registers. The new increment value will be updated after
1299 * issuing an ICE_PTP_INIT_INCVAL command.
1300 */
1301static int
1302ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
1303{
1304 int err;
1305 u8 port;
1306
1307 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1308 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
1309 incval);
1310 if (err)
1311 goto exit_err;
1312 }
1313
1314 return 0;
1315
1316exit_err:
1317 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1318 port, err);
1319
1320 return err;
1321}
1322
1323/**
1324 * ice_ptp_read_port_capture - Read a port's local time capture
1325 * @hw: pointer to HW struct
1326 * @port: Port number to read
1327 * @tx_ts: on return, the Tx port time capture
1328 * @rx_ts: on return, the Rx port time capture
1329 *
1330 * Read the port's Tx and Rx local time capture values.
1331 *
1332 * Note this has no equivalent for the E810 devices.
1333 */
1334static int
1335ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1336{
1337 int err;
1338
1339 /* Tx case */
1340 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1341 if (err) {
1342 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1343 err);
1344 return err;
1345 }
1346
1347 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1348 (unsigned long long)*tx_ts);
1349
1350 /* Rx case */
1351 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1352 if (err) {
1353 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1354 err);
1355 return err;
1356 }
1357
1358 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1359 (unsigned long long)*rx_ts);
1360
1361 return 0;
1362}
1363
1364/**
1365 * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
1366 * @hw: pointer to HW struct
1367 * @port: Port to which cmd has to be sent
1368 * @cmd: Command to be sent to the port
1369 *
1370 * Prepare the requested port for an upcoming timer sync command.
1371 *
1372 * Do not use this function directly. If you want to configure exactly one
1373 * port, use ice_ptp_one_port_cmd() instead.
1374 */
1375static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
1376 enum ice_ptp_tmr_cmd cmd)
1377{
1378 u32 cmd_val, val;
1379 u8 tmr_idx;
1380 int err;
1381
1382 tmr_idx = ice_get_ptp_src_clock_index(hw);
1383 cmd_val = tmr_idx << SEL_PHY_SRC;
1384 switch (cmd) {
1385 case ICE_PTP_INIT_TIME:
1386 cmd_val |= PHY_CMD_INIT_TIME;
1387 break;
1388 case ICE_PTP_INIT_INCVAL:
1389 cmd_val |= PHY_CMD_INIT_INCVAL;
1390 break;
1391 case ICE_PTP_ADJ_TIME:
1392 cmd_val |= PHY_CMD_ADJ_TIME;
1393 break;
1394 case ICE_PTP_READ_TIME:
1395 cmd_val |= PHY_CMD_READ_TIME;
1396 break;
1397 case ICE_PTP_ADJ_TIME_AT_TIME:
1398 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1399 break;
1400 case ICE_PTP_NOP:
1401 break;
1402 }
1403
1404 /* Tx case */
1405 /* Read, modify, write */
1406 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
1407 if (err) {
1408 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1409 err);
1410 return err;
1411 }
1412
1413 /* Modify necessary bits only and perform write */
1414 val &= ~TS_CMD_MASK;
1415 val |= cmd_val;
1416
1417 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
1418 if (err) {
1419 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1420 err);
1421 return err;
1422 }
1423
1424 /* Rx case */
1425 /* Read, modify, write */
1426 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
1427 if (err) {
1428 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1429 err);
1430 return err;
1431 }
1432
1433 /* Modify necessary bits only and perform write */
1434 val &= ~TS_CMD_MASK;
1435 val |= cmd_val;
1436
1437 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
1438 if (err) {
1439 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1440 err);
1441 return err;
1442 }
1443
1444 return 0;
1445}
1446
1447/**
1448 * ice_ptp_one_port_cmd - Prepare one port for a timer command
1449 * @hw: pointer to the HW struct
1450 * @configured_port: the port to configure with configured_cmd
1451 * @configured_cmd: timer command to prepare on the configured_port
1452 *
1453 * Prepare the configured_port for the configured_cmd, and prepare all other
1454 * ports for ICE_PTP_NOP. This causes the configured_port to execute the
1455 * desired command while all other ports perform no operation.
1456 */
1457static int
1458ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
1459 enum ice_ptp_tmr_cmd configured_cmd)
1460{
1461 u8 port;
1462
1463 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1464 enum ice_ptp_tmr_cmd cmd;
1465 int err;
1466
1467 if (port == configured_port)
1468 cmd = configured_cmd;
1469 else
1470 cmd = ICE_PTP_NOP;
1471
1472 err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
1473 if (err)
1474 return err;
1475 }
1476
1477 return 0;
1478}
1479
1480/**
1481 * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
1482 * @hw: pointer to the HW struct
1483 * @cmd: timer command to prepare
1484 *
1485 * Prepare all ports connected to this device for an upcoming timer sync
1486 * command.
1487 */
1488static int
1489ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1490{
1491 u8 port;
1492
1493 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1494 int err;
1495
1496 err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
1497 if (err)
1498 return err;
1499 }
1500
1501 return 0;
1502}
1503
1504/* E822 Vernier calibration functions
1505 *
1506 * The following functions are used as part of the vernier calibration of
1507 * a port. This calibration increases the precision of the timestamps on the
1508 * port.
1509 */
1510
1511/**
1512 * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
1513 * @hw: pointer to HW struct
1514 * @port: the port to read from
1515 * @link_out: if non-NULL, holds link speed on success
1516 * @fec_out: if non-NULL, holds FEC algorithm on success
1517 *
1518 * Read the serdes data for the PHY port and extract the link speed and FEC
1519 * algorithm.
1520 */
1521static int
1522ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
1523 enum ice_ptp_link_spd *link_out,
1524 enum ice_ptp_fec_mode *fec_out)
1525{
1526 enum ice_ptp_link_spd link;
1527 enum ice_ptp_fec_mode fec;
1528 u32 serdes;
1529 int err;
1530
1531 err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
1532 if (err) {
1533 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1534 return err;
1535 }
1536
1537 /* Determine the FEC algorithm */
1538 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1539
1540 serdes &= P_REG_LINK_SPEED_SERDES_M;
1541
1542 /* Determine the link speed */
1543 if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1544 switch (serdes) {
1545 case ICE_PTP_SERDES_25G:
1546 link = ICE_PTP_LNK_SPD_25G_RS;
1547 break;
1548 case ICE_PTP_SERDES_50G:
1549 link = ICE_PTP_LNK_SPD_50G_RS;
1550 break;
1551 case ICE_PTP_SERDES_100G:
1552 link = ICE_PTP_LNK_SPD_100G_RS;
1553 break;
1554 default:
1555 return -EIO;
1556 }
1557 } else {
1558 switch (serdes) {
1559 case ICE_PTP_SERDES_1G:
1560 link = ICE_PTP_LNK_SPD_1G;
1561 break;
1562 case ICE_PTP_SERDES_10G:
1563 link = ICE_PTP_LNK_SPD_10G;
1564 break;
1565 case ICE_PTP_SERDES_25G:
1566 link = ICE_PTP_LNK_SPD_25G;
1567 break;
1568 case ICE_PTP_SERDES_40G:
1569 link = ICE_PTP_LNK_SPD_40G;
1570 break;
1571 case ICE_PTP_SERDES_50G:
1572 link = ICE_PTP_LNK_SPD_50G;
1573 break;
1574 default:
1575 return -EIO;
1576 }
1577 }
1578
1579 if (link_out)
1580 *link_out = link;
1581 if (fec_out)
1582 *fec_out = fec;
1583
1584 return 0;
1585}
1586
1587/**
1588 * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
1589 * @hw: pointer to HW struct
1590 * @port: to configure the quad for
1591 */
1592static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
1593{
1594 enum ice_ptp_link_spd link_spd;
1595 int err;
1596 u32 val;
1597 u8 quad;
1598
1599 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
1600 if (err) {
1601 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1602 err);
1603 return;
1604 }
1605
1606 quad = port / ICE_PORTS_PER_QUAD;
1607
1608 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1609 if (err) {
1610 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1611 err);
1612 return;
1613 }
1614
1615 if (link_spd >= ICE_PTP_LNK_SPD_40G)
1616 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1617 else
1618 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1619
1620 err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1621 if (err) {
1622 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1623 err);
1624 return;
1625 }
1626}
1627
1628/**
1629 * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
1630 * @hw: pointer to the HW structure
1631 * @port: the port to configure
1632 *
1633 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1634 * hardware clock time units (TUs). That is, determine the number of TUs per
1635 * serdes unit interval, and program the UIX registers with this conversion.
1636 *
1637 * This conversion is used as part of the calibration process when determining
1638 * the additional error of a timestamp vs the real time of transmission or
1639 * receipt of the packet.
1640 *
1641 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1642 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1643 *
1644 * To calculate the conversion ratio, we use the following facts:
1645 *
1646 * a) the clock frequency in Hz (cycles per second)
1647 * b) the number of TUs per cycle (the increment value of the clock)
1648 * c) 1 second per 1 billion nanoseconds
1649 * d) the duration of 66 UIs in nanoseconds
1650 *
1651 * Given these facts, we can use the following table to work out what ratios
1652 * to multiply in order to get the number of TUs per 66 UIs:
1653 *
1654 * cycles | 1 second | incval (TUs) | nanoseconds
1655 * -------+--------------+--------------+-------------
1656 * second | 1 billion ns | cycle | 66 UIs
1657 *
1658 * To perform the multiplication using integers without too much loss of
1659 * precision, we can take use the following equation:
1660 *
1661 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1662 *
1663 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1664 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1665 *
1666 * The increment value has a maximum expected range of about 34 bits, while
1667 * the frequency value is about 29 bits. Multiplying these values shouldn't
1668 * overflow the 64 bits. However, we must then further multiply them again by
1669 * the Serdes unit interval duration. To avoid overflow here, we split the
1670 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1671 * a divide by 390,625,000. This does lose some precision, but avoids
1672 * miscalculation due to arithmetic overflow.
1673 */
1674static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
1675{
1676 u64 cur_freq, clk_incval, tu_per_sec, uix;
1677 int err;
1678
1679 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1680 clk_incval = ice_ptp_read_src_incval(hw);
1681
1682 /* Calculate TUs per second divided by 256 */
1683 tu_per_sec = (cur_freq * clk_incval) >> 8;
1684
1685#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1686#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1687
1688 /* Program the 10Gb/40Gb conversion ratio */
1689 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1690
1691 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
1692 uix);
1693 if (err) {
1694 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1695 err);
1696 return err;
1697 }
1698
1699 /* Program the 25Gb/100Gb conversion ratio */
1700 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1701
1702 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
1703 uix);
1704 if (err) {
1705 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1706 err);
1707 return err;
1708 }
1709
1710 return 0;
1711}
1712
1713/**
1714 * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
1715 * @hw: pointer to the HW struct
1716 * @port: port to configure
1717 *
1718 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1719 * timestamp calibration process. This depends on the link speed, as the PHY
1720 * uses different markers depending on the speed.
1721 *
1722 * 1Gb/10Gb/25Gb:
1723 * - Tx/Rx PAR/PCS markers
1724 *
1725 * 25Gb RS:
1726 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1727 *
1728 * 40Gb/50Gb:
1729 * - Tx/Rx PAR/PCS markers
1730 * - Rx Deskew PAR/PCS markers
1731 *
1732 * 50G RS and 100GB RS:
1733 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1734 * - Rx Deskew PAR/PCS markers
1735 * - Tx PAR/PCS markers
1736 *
1737 * To calculate the conversion, we use the PHC clock frequency (cycles per
1738 * second), the increment value (TUs per cycle), and the related PHY clock
1739 * frequency to calculate the TUs per unit of the PHY link clock. The
1740 * following table shows how the units convert:
1741 *
1742 * cycles | TUs | second
1743 * -------+-------+--------
1744 * second | cycle | cycles
1745 *
1746 * For each conversion register, look up the appropriate frequency from the
1747 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1748 * this to the appropriate register, preparing hardware to perform timestamp
1749 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1750 * in order to calibrate for the internal PHY delays.
1751 *
1752 * Note that the increment value ranges up to ~34 bits, and the clock
1753 * frequency is ~29 bits, so multiplying them together should fit within the
1754 * 64 bit arithmetic.
1755 */
1756static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
1757{
1758 u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1759 enum ice_ptp_link_spd link_spd;
1760 enum ice_ptp_fec_mode fec_mode;
1761 int err;
1762
1763 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
1764 if (err)
1765 return err;
1766
1767 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1768 clk_incval = ice_ptp_read_src_incval(hw);
1769
1770 /* Calculate TUs per cycle of the PHC clock */
1771 tu_per_sec = cur_freq * clk_incval;
1772
1773 /* For each PHY conversion register, look up the appropriate link
1774 * speed frequency and determine the TUs per that clock's cycle time.
1775 * Split this into a high and low value and then program the
1776 * appropriate register. If that link speed does not use the
1777 * associated register, write zeros to clear it instead.
1778 */
1779
1780 /* P_REG_PAR_TX_TUS */
1781 if (e822_vernier[link_spd].tx_par_clk)
1782 phy_tus = div_u64(tu_per_sec,
1783 e822_vernier[link_spd].tx_par_clk);
1784 else
1785 phy_tus = 0;
1786
1787 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
1788 phy_tus);
1789 if (err)
1790 return err;
1791
1792 /* P_REG_PAR_RX_TUS */
1793 if (e822_vernier[link_spd].rx_par_clk)
1794 phy_tus = div_u64(tu_per_sec,
1795 e822_vernier[link_spd].rx_par_clk);
1796 else
1797 phy_tus = 0;
1798
1799 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
1800 phy_tus);
1801 if (err)
1802 return err;
1803
1804 /* P_REG_PCS_TX_TUS */
1805 if (e822_vernier[link_spd].tx_pcs_clk)
1806 phy_tus = div_u64(tu_per_sec,
1807 e822_vernier[link_spd].tx_pcs_clk);
1808 else
1809 phy_tus = 0;
1810
1811 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
1812 phy_tus);
1813 if (err)
1814 return err;
1815
1816 /* P_REG_PCS_RX_TUS */
1817 if (e822_vernier[link_spd].rx_pcs_clk)
1818 phy_tus = div_u64(tu_per_sec,
1819 e822_vernier[link_spd].rx_pcs_clk);
1820 else
1821 phy_tus = 0;
1822
1823 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
1824 phy_tus);
1825 if (err)
1826 return err;
1827
1828 /* P_REG_DESK_PAR_TX_TUS */
1829 if (e822_vernier[link_spd].tx_desk_rsgb_par)
1830 phy_tus = div_u64(tu_per_sec,
1831 e822_vernier[link_spd].tx_desk_rsgb_par);
1832 else
1833 phy_tus = 0;
1834
1835 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1836 phy_tus);
1837 if (err)
1838 return err;
1839
1840 /* P_REG_DESK_PAR_RX_TUS */
1841 if (e822_vernier[link_spd].rx_desk_rsgb_par)
1842 phy_tus = div_u64(tu_per_sec,
1843 e822_vernier[link_spd].rx_desk_rsgb_par);
1844 else
1845 phy_tus = 0;
1846
1847 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1848 phy_tus);
1849 if (err)
1850 return err;
1851
1852 /* P_REG_DESK_PCS_TX_TUS */
1853 if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1854 phy_tus = div_u64(tu_per_sec,
1855 e822_vernier[link_spd].tx_desk_rsgb_pcs);
1856 else
1857 phy_tus = 0;
1858
1859 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1860 phy_tus);
1861 if (err)
1862 return err;
1863
1864 /* P_REG_DESK_PCS_RX_TUS */
1865 if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1866 phy_tus = div_u64(tu_per_sec,
1867 e822_vernier[link_spd].rx_desk_rsgb_pcs);
1868 else
1869 phy_tus = 0;
1870
1871 return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1872 phy_tus);
1873}
1874
1875/**
1876 * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
1877 * @hw: pointer to the HW struct
1878 * @link_spd: the Link speed to calculate for
1879 *
1880 * Calculate the fixed offset due to known static latency data.
1881 */
1882static u64
1883ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1884{
1885 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1886
1887 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1888 clk_incval = ice_ptp_read_src_incval(hw);
1889
1890 /* Calculate TUs per second */
1891 tu_per_sec = cur_freq * clk_incval;
1892
1893 /* Calculate number of TUs to add for the fixed Tx latency. Since the
1894 * latency measurement is in 1/100th of a nanosecond, we need to
1895 * multiply by tu_per_sec and then divide by 1e11. This calculation
1896 * overflows 64 bit integer arithmetic, so break it up into two
1897 * divisions by 1e4 first then by 1e7.
1898 */
1899 fixed_offset = div_u64(tu_per_sec, 10000);
1900 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1901 fixed_offset = div_u64(fixed_offset, 10000000);
1902
1903 return fixed_offset;
1904}
1905
1906/**
1907 * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
1908 * @hw: pointer to the HW struct
1909 * @port: the PHY port to configure
1910 *
1911 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1912 * adjust Tx timestamps by. This is calculated by combining some known static
1913 * latency along with the Vernier offset computations done by hardware.
1914 *
1915 * This function will not return successfully until the Tx offset calculations
1916 * have been completed, which requires waiting until at least one packet has
1917 * been transmitted by the device. It is safe to call this function
1918 * periodically until calibration succeeds, as it will only program the offset
1919 * once.
1920 *
1921 * To avoid overflow, when calculating the offset based on the known static
1922 * latency values, we use measurements in 1/100th of a nanosecond, and divide
1923 * the TUs per second up front. This avoids overflow while allowing
1924 * calculation of the adjustment using integer arithmetic.
1925 *
1926 * Returns zero on success, -EBUSY if the hardware vernier offset
1927 * calibration has not completed, or another error code on failure.
1928 */
1929int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
1930{
1931 enum ice_ptp_link_spd link_spd;
1932 enum ice_ptp_fec_mode fec_mode;
1933 u64 total_offset, val;
1934 int err;
1935 u32 reg;
1936
1937 /* Nothing to do if we've already programmed the offset */
1938 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®);
1939 if (err) {
1940 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
1941 port, err);
1942 return err;
1943 }
1944
1945 if (reg)
1946 return 0;
1947
1948 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®);
1949 if (err) {
1950 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
1951 port, err);
1952 return err;
1953 }
1954
1955 if (!(reg & P_REG_TX_OV_STATUS_OV_M))
1956 return -EBUSY;
1957
1958 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
1959 if (err)
1960 return err;
1961
1962 total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
1963
1964 /* Read the first Vernier offset from the PHY register and add it to
1965 * the total offset.
1966 */
1967 if (link_spd == ICE_PTP_LNK_SPD_1G ||
1968 link_spd == ICE_PTP_LNK_SPD_10G ||
1969 link_spd == ICE_PTP_LNK_SPD_25G ||
1970 link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1971 link_spd == ICE_PTP_LNK_SPD_40G ||
1972 link_spd == ICE_PTP_LNK_SPD_50G) {
1973 err = ice_read_64b_phy_reg_e82x(hw, port,
1974 P_REG_PAR_PCS_TX_OFFSET_L,
1975 &val);
1976 if (err)
1977 return err;
1978
1979 total_offset += val;
1980 }
1981
1982 /* For Tx, we only need to use the second Vernier offset for
1983 * multi-lane link speeds with RS-FEC. The lanes will always be
1984 * aligned.
1985 */
1986 if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1987 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1988 err = ice_read_64b_phy_reg_e82x(hw, port,
1989 P_REG_PAR_TX_TIME_L,
1990 &val);
1991 if (err)
1992 return err;
1993
1994 total_offset += val;
1995 }
1996
1997 /* Now that the total offset has been calculated, program it to the
1998 * PHY and indicate that the Tx offset is ready. After this,
1999 * timestamps will be enabled.
2000 */
2001 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
2002 total_offset);
2003 if (err)
2004 return err;
2005
2006 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
2007 if (err)
2008 return err;
2009
2010 dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
2011 port);
2012
2013 return 0;
2014}
2015
2016/**
2017 * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
2018 * @hw: pointer to the HW struct
2019 * @port: the PHY port to adjust for
2020 * @link_spd: the current link speed of the PHY
2021 * @fec_mode: the current FEC mode of the PHY
2022 * @pmd_adj: on return, the amount to adjust the Rx total offset by
2023 *
2024 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
2025 * This varies by link speed and FEC mode. The value calculated accounts for
2026 * various delays caused when receiving a packet.
2027 */
2028static int
2029ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
2030 enum ice_ptp_link_spd link_spd,
2031 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
2032{
2033 u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
2034 u8 pmd_align;
2035 u32 val;
2036 int err;
2037
2038 err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
2039 if (err) {
2040 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
2041 err);
2042 return err;
2043 }
2044
2045 pmd_align = (u8)val;
2046
2047 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
2048 clk_incval = ice_ptp_read_src_incval(hw);
2049
2050 /* Calculate TUs per second */
2051 tu_per_sec = cur_freq * clk_incval;
2052
2053 /* The PMD alignment adjustment measurement depends on the link speed,
2054 * and whether FEC is enabled. For each link speed, the alignment
2055 * adjustment is calculated by dividing a value by the length of
2056 * a Time Unit in nanoseconds.
2057 *
2058 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
2059 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
2060 * 10G w/FEC: align * 0.1 * 32/33
2061 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
2062 * 25G w/FEC: align * 0.4 * 32/33
2063 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
2064 * 40G w/FEC: align * 0.1 * 32/33
2065 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
2066 * 50G w/FEC: align * 0.8 * 32/33
2067 *
2068 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
2069 *
2070 * To allow for calculating this value using integer arithmetic, we
2071 * instead start with the number of TUs per second, (inverse of the
2072 * length of a Time Unit in nanoseconds), multiply by a value based
2073 * on the PMD alignment register, and then divide by the right value
2074 * calculated based on the table above. To avoid integer overflow this
2075 * division is broken up into a step of dividing by 125 first.
2076 */
2077 if (link_spd == ICE_PTP_LNK_SPD_1G) {
2078 if (pmd_align == 4)
2079 mult = 10;
2080 else
2081 mult = (pmd_align + 6) % 10;
2082 } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
2083 link_spd == ICE_PTP_LNK_SPD_25G ||
2084 link_spd == ICE_PTP_LNK_SPD_40G ||
2085 link_spd == ICE_PTP_LNK_SPD_50G) {
2086 /* If Clause 74 FEC, always calculate PMD adjust */
2087 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
2088 mult = pmd_align;
2089 else
2090 mult = 0;
2091 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
2092 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2093 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2094 if (pmd_align < 17)
2095 mult = pmd_align + 40;
2096 else
2097 mult = pmd_align;
2098 } else {
2099 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
2100 link_spd);
2101 mult = 0;
2102 }
2103
2104 /* In some cases, there's no need to adjust for the PMD alignment */
2105 if (!mult) {
2106 *pmd_adj = 0;
2107 return 0;
2108 }
2109
2110 /* Calculate the adjustment by multiplying TUs per second by the
2111 * appropriate multiplier and divisor. To avoid overflow, we first
2112 * divide by 125, and then handle remaining divisor based on the link
2113 * speed pmd_adj_divisor value.
2114 */
2115 adj = div_u64(tu_per_sec, 125);
2116 adj *= mult;
2117 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
2118
2119 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
2120 * cycle count is necessary.
2121 */
2122 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
2123 u64 cycle_adj;
2124 u8 rx_cycle;
2125
2126 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
2127 &val);
2128 if (err) {
2129 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
2130 err);
2131 return err;
2132 }
2133
2134 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
2135 if (rx_cycle) {
2136 mult = (4 - rx_cycle) * 40;
2137
2138 cycle_adj = div_u64(tu_per_sec, 125);
2139 cycle_adj *= mult;
2140 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2141
2142 adj += cycle_adj;
2143 }
2144 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
2145 u64 cycle_adj;
2146 u8 rx_cycle;
2147
2148 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
2149 &val);
2150 if (err) {
2151 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
2152 err);
2153 return err;
2154 }
2155
2156 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
2157 if (rx_cycle) {
2158 mult = rx_cycle * 40;
2159
2160 cycle_adj = div_u64(tu_per_sec, 125);
2161 cycle_adj *= mult;
2162 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2163
2164 adj += cycle_adj;
2165 }
2166 }
2167
2168 /* Return the calculated adjustment */
2169 *pmd_adj = adj;
2170
2171 return 0;
2172}
2173
2174/**
2175 * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
2176 * @hw: pointer to HW struct
2177 * @link_spd: The Link speed to calculate for
2178 *
2179 * Determine the fixed Rx latency for a given link speed.
2180 */
2181static u64
2182ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
2183{
2184 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
2185
2186 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
2187 clk_incval = ice_ptp_read_src_incval(hw);
2188
2189 /* Calculate TUs per second */
2190 tu_per_sec = cur_freq * clk_incval;
2191
2192 /* Calculate number of TUs to add for the fixed Rx latency. Since the
2193 * latency measurement is in 1/100th of a nanosecond, we need to
2194 * multiply by tu_per_sec and then divide by 1e11. This calculation
2195 * overflows 64 bit integer arithmetic, so break it up into two
2196 * divisions by 1e4 first then by 1e7.
2197 */
2198 fixed_offset = div_u64(tu_per_sec, 10000);
2199 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2200 fixed_offset = div_u64(fixed_offset, 10000000);
2201
2202 return fixed_offset;
2203}
2204
2205/**
2206 * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
2207 * @hw: pointer to the HW struct
2208 * @port: the PHY port to configure
2209 *
2210 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2211 * adjust Rx timestamps by. This combines calculations from the Vernier offset
2212 * measurements taken in hardware with some data about known fixed delay as
2213 * well as adjusting for multi-lane alignment delay.
2214 *
2215 * This function will not return successfully until the Rx offset calculations
2216 * have been completed, which requires waiting until at least one packet has
2217 * been received by the device. It is safe to call this function periodically
2218 * until calibration succeeds, as it will only program the offset once.
2219 *
2220 * This function must be called only after the offset registers are valid,
2221 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2222 * has measured the offset.
2223 *
2224 * To avoid overflow, when calculating the offset based on the known static
2225 * latency values, we use measurements in 1/100th of a nanosecond, and divide
2226 * the TUs per second up front. This avoids overflow while allowing
2227 * calculation of the adjustment using integer arithmetic.
2228 *
2229 * Returns zero on success, -EBUSY if the hardware vernier offset
2230 * calibration has not completed, or another error code on failure.
2231 */
2232int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
2233{
2234 enum ice_ptp_link_spd link_spd;
2235 enum ice_ptp_fec_mode fec_mode;
2236 u64 total_offset, pmd, val;
2237 int err;
2238 u32 reg;
2239
2240 /* Nothing to do if we've already programmed the offset */
2241 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®);
2242 if (err) {
2243 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
2244 port, err);
2245 return err;
2246 }
2247
2248 if (reg)
2249 return 0;
2250
2251 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®);
2252 if (err) {
2253 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2254 port, err);
2255 return err;
2256 }
2257
2258 if (!(reg & P_REG_RX_OV_STATUS_OV_M))
2259 return -EBUSY;
2260
2261 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
2262 if (err)
2263 return err;
2264
2265 total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
2266
2267 /* Read the first Vernier offset from the PHY register and add it to
2268 * the total offset.
2269 */
2270 err = ice_read_64b_phy_reg_e82x(hw, port,
2271 P_REG_PAR_PCS_RX_OFFSET_L,
2272 &val);
2273 if (err)
2274 return err;
2275
2276 total_offset += val;
2277
2278 /* For Rx, all multi-lane link speeds include a second Vernier
2279 * calibration, because the lanes might not be aligned.
2280 */
2281 if (link_spd == ICE_PTP_LNK_SPD_40G ||
2282 link_spd == ICE_PTP_LNK_SPD_50G ||
2283 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2284 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2285 err = ice_read_64b_phy_reg_e82x(hw, port,
2286 P_REG_PAR_RX_TIME_L,
2287 &val);
2288 if (err)
2289 return err;
2290
2291 total_offset += val;
2292 }
2293
2294 /* In addition, Rx must account for the PMD alignment */
2295 err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
2296 if (err)
2297 return err;
2298
2299 /* For RS-FEC, this adjustment adds delay, but for other modes, it
2300 * subtracts delay.
2301 */
2302 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2303 total_offset += pmd;
2304 else
2305 total_offset -= pmd;
2306
2307 /* Now that the total offset has been calculated, program it to the
2308 * PHY and indicate that the Rx offset is ready. After this,
2309 * timestamps will be enabled.
2310 */
2311 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2312 total_offset);
2313 if (err)
2314 return err;
2315
2316 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
2317 if (err)
2318 return err;
2319
2320 dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
2321 port);
2322
2323 return 0;
2324}
2325
2326/**
2327 * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
2328 * @hw: pointer to the HW struct
2329 * @port: the PHY port to read
2330 * @phy_time: on return, the 64bit PHY timer value
2331 * @phc_time: on return, the lower 64bits of PHC time
2332 *
2333 * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
2334 * and PHC timer values.
2335 */
2336static int
2337ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
2338 u64 *phc_time)
2339{
2340 u64 tx_time, rx_time;
2341 u32 zo, lo;
2342 u8 tmr_idx;
2343 int err;
2344
2345 tmr_idx = ice_get_ptp_src_clock_index(hw);
2346
2347 /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
2348 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2349
2350 /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
2351 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
2352 if (err)
2353 return err;
2354
2355 /* Issue the sync to start the ICE_PTP_READ_TIME capture */
2356 ice_ptp_exec_tmr_cmd(hw);
2357
2358 /* Read the captured PHC time from the shadow time registers */
2359 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2360 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2361 *phc_time = (u64)lo << 32 | zo;
2362
2363 /* Read the captured PHY time from the PHY shadow registers */
2364 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2365 if (err)
2366 return err;
2367
2368 /* If the PHY Tx and Rx timers don't match, log a warning message.
2369 * Note that this should not happen in normal circumstances since the
2370 * driver always programs them together.
2371 */
2372 if (tx_time != rx_time)
2373 dev_warn(ice_hw_to_dev(hw),
2374 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2375 port, (unsigned long long)tx_time,
2376 (unsigned long long)rx_time);
2377
2378 *phy_time = tx_time;
2379
2380 return 0;
2381}
2382
2383/**
2384 * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
2385 * @hw: pointer to the HW struct
2386 * @port: the PHY port to synchronize
2387 *
2388 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2389 * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
2390 * simultaneous read of the PHY timer and PHC timer. Then we use the
2391 * difference to calculate an appropriate 2s complement addition to add
2392 * to the PHY timer in order to ensure it reads the same value as the
2393 * primary PHC timer.
2394 */
2395static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
2396{
2397 u64 phc_time, phy_time, difference;
2398 int err;
2399
2400 if (!ice_ptp_lock(hw)) {
2401 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2402 return -EBUSY;
2403 }
2404
2405 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
2406 if (err)
2407 goto err_unlock;
2408
2409 /* Calculate the amount required to add to the port time in order for
2410 * it to match the PHC time.
2411 *
2412 * Note that the port adjustment is done using 2s complement
2413 * arithmetic. This is convenient since it means that we can simply
2414 * calculate the difference between the PHC time and the port time,
2415 * and it will be interpreted correctly.
2416 */
2417 difference = phc_time - phy_time;
2418
2419 err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
2420 if (err)
2421 goto err_unlock;
2422
2423 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
2424 if (err)
2425 goto err_unlock;
2426
2427 /* Do not perform any action on the main timer */
2428 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2429
2430 /* Issue the sync to activate the time adjustment */
2431 ice_ptp_exec_tmr_cmd(hw);
2432
2433 /* Re-capture the timer values to flush the command registers and
2434 * verify that the time was properly adjusted.
2435 */
2436 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
2437 if (err)
2438 goto err_unlock;
2439
2440 dev_info(ice_hw_to_dev(hw),
2441 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2442 port, (unsigned long long)phy_time,
2443 (unsigned long long)phc_time);
2444
2445 ice_ptp_unlock(hw);
2446
2447 return 0;
2448
2449err_unlock:
2450 ice_ptp_unlock(hw);
2451 return err;
2452}
2453
2454/**
2455 * ice_stop_phy_timer_e82x - Stop the PHY clock timer
2456 * @hw: pointer to the HW struct
2457 * @port: the PHY port to stop
2458 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2459 *
2460 * Stop the clock of a PHY port. This must be done as part of the flow to
2461 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2462 * initialized or when link speed changes.
2463 */
2464int
2465ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
2466{
2467 int err;
2468 u32 val;
2469
2470 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
2471 if (err)
2472 return err;
2473
2474 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
2475 if (err)
2476 return err;
2477
2478 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
2479 if (err)
2480 return err;
2481
2482 val &= ~P_REG_PS_START_M;
2483 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2484 if (err)
2485 return err;
2486
2487 val &= ~P_REG_PS_ENA_CLK_M;
2488 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2489 if (err)
2490 return err;
2491
2492 if (soft_reset) {
2493 val |= P_REG_PS_SFT_RESET_M;
2494 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2495 if (err)
2496 return err;
2497 }
2498
2499 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2500
2501 return 0;
2502}
2503
2504/**
2505 * ice_start_phy_timer_e82x - Start the PHY clock timer
2506 * @hw: pointer to the HW struct
2507 * @port: the PHY port to start
2508 *
2509 * Start the clock of a PHY port. This must be done as part of the flow to
2510 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2511 * initialized or when link speed changes.
2512 *
2513 * Hardware will take Vernier measurements on Tx or Rx of packets.
2514 */
2515int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
2516{
2517 u32 lo, hi, val;
2518 u64 incval;
2519 u8 tmr_idx;
2520 int err;
2521
2522 tmr_idx = ice_get_ptp_src_clock_index(hw);
2523
2524 err = ice_stop_phy_timer_e82x(hw, port, false);
2525 if (err)
2526 return err;
2527
2528 ice_phy_cfg_lane_e82x(hw, port);
2529
2530 err = ice_phy_cfg_uix_e82x(hw, port);
2531 if (err)
2532 return err;
2533
2534 err = ice_phy_cfg_parpcs_e82x(hw, port);
2535 if (err)
2536 return err;
2537
2538 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2539 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2540 incval = (u64)hi << 32 | lo;
2541
2542 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
2543 if (err)
2544 return err;
2545
2546 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2547 if (err)
2548 return err;
2549
2550 /* Do not perform any action on the main timer */
2551 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2552
2553 ice_ptp_exec_tmr_cmd(hw);
2554
2555 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
2556 if (err)
2557 return err;
2558
2559 val |= P_REG_PS_SFT_RESET_M;
2560 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2561 if (err)
2562 return err;
2563
2564 val |= P_REG_PS_START_M;
2565 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2566 if (err)
2567 return err;
2568
2569 val &= ~P_REG_PS_SFT_RESET_M;
2570 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2571 if (err)
2572 return err;
2573
2574 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2575 if (err)
2576 return err;
2577
2578 ice_ptp_exec_tmr_cmd(hw);
2579
2580 val |= P_REG_PS_ENA_CLK_M;
2581 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2582 if (err)
2583 return err;
2584
2585 val |= P_REG_PS_LOAD_OFFSET_M;
2586 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2587 if (err)
2588 return err;
2589
2590 ice_ptp_exec_tmr_cmd(hw);
2591
2592 err = ice_sync_phy_timer_e82x(hw, port);
2593 if (err)
2594 return err;
2595
2596 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2597
2598 return 0;
2599}
2600
2601/**
2602 * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
2603 * @hw: pointer to the HW struct
2604 * @quad: the timestamp quad to read from
2605 * @tstamp_ready: contents of the Tx memory status register
2606 *
2607 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
2608 * the PHY are ready. A set bit means the corresponding timestamp is valid and
2609 * ready to be captured from the PHY timestamp block.
2610 */
2611static int
2612ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
2613{
2614 u32 hi, lo;
2615 int err;
2616
2617 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
2618 if (err) {
2619 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
2620 quad, err);
2621 return err;
2622 }
2623
2624 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
2625 if (err) {
2626 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
2627 quad, err);
2628 return err;
2629 }
2630
2631 *tstamp_ready = (u64)hi << 32 | (u64)lo;
2632
2633 return 0;
2634}
2635
2636/* E810 functions
2637 *
2638 * The following functions operate on the E810 series devices which use
2639 * a separate external PHY.
2640 */
2641
2642/**
2643 * ice_read_phy_reg_e810 - Read register from external PHY on E810
2644 * @hw: pointer to the HW struct
2645 * @addr: the address to read from
2646 * @val: On return, the value read from the PHY
2647 *
2648 * Read a register from the external PHY on the E810 device.
2649 */
2650static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2651{
2652 struct ice_sbq_msg_input msg = {0};
2653 int err;
2654
2655 msg.msg_addr_low = lower_16_bits(addr);
2656 msg.msg_addr_high = upper_16_bits(addr);
2657 msg.opcode = ice_sbq_msg_rd;
2658 msg.dest_dev = rmn_0;
2659
2660 err = ice_sbq_rw_reg(hw, &msg);
2661 if (err) {
2662 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2663 err);
2664 return err;
2665 }
2666
2667 *val = msg.data;
2668
2669 return 0;
2670}
2671
2672/**
2673 * ice_write_phy_reg_e810 - Write register on external PHY on E810
2674 * @hw: pointer to the HW struct
2675 * @addr: the address to writem to
2676 * @val: the value to write to the PHY
2677 *
2678 * Write a value to a register of the external PHY on the E810 device.
2679 */
2680static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2681{
2682 struct ice_sbq_msg_input msg = {0};
2683 int err;
2684
2685 msg.msg_addr_low = lower_16_bits(addr);
2686 msg.msg_addr_high = upper_16_bits(addr);
2687 msg.opcode = ice_sbq_msg_wr;
2688 msg.dest_dev = rmn_0;
2689 msg.data = val;
2690
2691 err = ice_sbq_rw_reg(hw, &msg);
2692 if (err) {
2693 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2694 err);
2695 return err;
2696 }
2697
2698 return 0;
2699}
2700
2701/**
2702 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2703 * @hw: pointer to the HW struct
2704 * @idx: the timestamp index to read
2705 * @hi: 8 bit timestamp high value
2706 * @lo: 32 bit timestamp low value
2707 *
2708 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2709 * timestamp block of the external PHY on the E810 device using the low latency
2710 * timestamp read.
2711 */
2712static int
2713ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2714{
2715 u32 val;
2716 u8 i;
2717
2718 /* Write TS index to read to the PF register so the FW can read it */
2719 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2720 wr32(hw, PF_SB_ATQBAL, val);
2721
2722 /* Read the register repeatedly until the FW provides us the TS */
2723 for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2724 val = rd32(hw, PF_SB_ATQBAL);
2725
2726 /* When the bit is cleared, the TS is ready in the register */
2727 if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2728 /* High 8 bit value of the TS is on the bits 16:23 */
2729 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2730
2731 /* Read the low 32 bit value and set the TS valid bit */
2732 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2733 return 0;
2734 }
2735
2736 udelay(10);
2737 }
2738
2739 /* FW failed to provide the TS in time */
2740 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2741 return -EINVAL;
2742}
2743
2744/**
2745 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2746 * @hw: pointer to the HW struct
2747 * @lport: the lport to read from
2748 * @idx: the timestamp index to read
2749 * @hi: 8 bit timestamp high value
2750 * @lo: 32 bit timestamp low value
2751 *
2752 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2753 * timestamp block of the external PHY on the E810 device using sideband queue.
2754 */
2755static int
2756ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2757 u32 *lo)
2758{
2759 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2760 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2761 u32 lo_val, hi_val;
2762 int err;
2763
2764 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2765 if (err) {
2766 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2767 err);
2768 return err;
2769 }
2770
2771 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2772 if (err) {
2773 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2774 err);
2775 return err;
2776 }
2777
2778 *lo = lo_val;
2779 *hi = (u8)hi_val;
2780
2781 return 0;
2782}
2783
2784/**
2785 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2786 * @hw: pointer to the HW struct
2787 * @lport: the lport to read from
2788 * @idx: the timestamp index to read
2789 * @tstamp: on return, the 40bit timestamp value
2790 *
2791 * Read a 40bit timestamp value out of the timestamp block of the external PHY
2792 * on the E810 device.
2793 */
2794static int
2795ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2796{
2797 u32 lo = 0;
2798 u8 hi = 0;
2799 int err;
2800
2801 if (hw->dev_caps.ts_dev_info.ts_ll_read)
2802 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2803 else
2804 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2805
2806 if (err)
2807 return err;
2808
2809 /* For E810 devices, the timestamp is reported with the lower 32 bits
2810 * in the low register, and the upper 8 bits in the high register.
2811 */
2812 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2813
2814 return 0;
2815}
2816
2817/**
2818 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2819 * @hw: pointer to the HW struct
2820 * @lport: the lport to read from
2821 * @idx: the timestamp index to reset
2822 *
2823 * Read the timestamp and then forcibly overwrite its value to clear the valid
2824 * bit from the timestamp block of the external PHY on the E810 device.
2825 *
2826 * This function should only be called on an idx whose bit is set according to
2827 * ice_get_phy_tx_tstamp_ready().
2828 */
2829static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2830{
2831 u32 lo_addr, hi_addr;
2832 u64 unused_tstamp;
2833 int err;
2834
2835 err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
2836 if (err) {
2837 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
2838 lport, idx, err);
2839 return err;
2840 }
2841
2842 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2843 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2844
2845 err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2846 if (err) {
2847 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
2848 lport, idx, err);
2849 return err;
2850 }
2851
2852 err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2853 if (err) {
2854 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
2855 lport, idx, err);
2856 return err;
2857 }
2858
2859 return 0;
2860}
2861
2862/**
2863 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2864 * @hw: pointer to HW struct
2865 *
2866 * Enable the timesync PTP functionality for the external PHY connected to
2867 * this function.
2868 */
2869int ice_ptp_init_phy_e810(struct ice_hw *hw)
2870{
2871 u8 tmr_idx;
2872 int err;
2873
2874 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2875 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2876 GLTSYN_ENA_TSYN_ENA_M);
2877 if (err)
2878 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2879 err);
2880
2881 return err;
2882}
2883
2884/**
2885 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2886 * @hw: pointer to HW struct
2887 *
2888 * Perform E810-specific PTP hardware clock initialization steps.
2889 */
2890static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2891{
2892 /* Ensure synchronization delay is zero */
2893 wr32(hw, GLTSYN_SYNC_DLAY, 0);
2894
2895 /* Initialize the PHY */
2896 return ice_ptp_init_phy_e810(hw);
2897}
2898
2899/**
2900 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2901 * @hw: Board private structure
2902 * @time: Time to initialize the PHY port clock to
2903 *
2904 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2905 * initial clock time. The time will not actually be programmed until the
2906 * driver issues an ICE_PTP_INIT_TIME command.
2907 *
2908 * The time value is the upper 32 bits of the PHY timer, usually in units of
2909 * nominal nanoseconds.
2910 */
2911static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2912{
2913 u8 tmr_idx;
2914 int err;
2915
2916 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2917 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2918 if (err) {
2919 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2920 err);
2921 return err;
2922 }
2923
2924 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2925 if (err) {
2926 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2927 err);
2928 return err;
2929 }
2930
2931 return 0;
2932}
2933
2934/**
2935 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2936 * @hw: pointer to HW struct
2937 * @adj: adjustment value to program
2938 *
2939 * Prepare the PHY port for an atomic adjustment by programming the PHY
2940 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2941 * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
2942 *
2943 * The adjustment value only contains the portion used for the upper 32bits of
2944 * the PHY timer, usually in units of nominal nanoseconds. Negative
2945 * adjustments are supported using 2s complement arithmetic.
2946 */
2947static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2948{
2949 u8 tmr_idx;
2950 int err;
2951
2952 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2953
2954 /* Adjustments are represented as signed 2's complement values in
2955 * nanoseconds. Sub-nanosecond adjustment is not supported.
2956 */
2957 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2958 if (err) {
2959 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2960 err);
2961 return err;
2962 }
2963
2964 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2965 if (err) {
2966 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2967 err);
2968 return err;
2969 }
2970
2971 return 0;
2972}
2973
2974/**
2975 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2976 * @hw: pointer to HW struct
2977 * @incval: The new 40bit increment value to prepare
2978 *
2979 * Prepare the PHY port for a new increment value by programming the PHY
2980 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2981 * completed by issuing an ICE_PTP_INIT_INCVAL command.
2982 */
2983static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2984{
2985 u32 high, low;
2986 u8 tmr_idx;
2987 int err;
2988
2989 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2990 low = lower_32_bits(incval);
2991 high = upper_32_bits(incval);
2992
2993 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2994 if (err) {
2995 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2996 err);
2997 return err;
2998 }
2999
3000 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
3001 if (err) {
3002 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
3003 err);
3004 return err;
3005 }
3006
3007 return 0;
3008}
3009
3010/**
3011 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
3012 * @hw: pointer to HW struct
3013 * @cmd: Command to be sent to the port
3014 *
3015 * Prepare the external PHYs connected to this device for a timer sync
3016 * command.
3017 */
3018static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3019{
3020 u32 cmd_val, val;
3021 int err;
3022
3023 switch (cmd) {
3024 case ICE_PTP_INIT_TIME:
3025 cmd_val = GLTSYN_CMD_INIT_TIME;
3026 break;
3027 case ICE_PTP_INIT_INCVAL:
3028 cmd_val = GLTSYN_CMD_INIT_INCVAL;
3029 break;
3030 case ICE_PTP_ADJ_TIME:
3031 cmd_val = GLTSYN_CMD_ADJ_TIME;
3032 break;
3033 case ICE_PTP_READ_TIME:
3034 cmd_val = GLTSYN_CMD_READ_TIME;
3035 break;
3036 case ICE_PTP_ADJ_TIME_AT_TIME:
3037 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
3038 break;
3039 case ICE_PTP_NOP:
3040 return 0;
3041 }
3042
3043 /* Read, modify, write */
3044 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
3045 if (err) {
3046 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
3047 return err;
3048 }
3049
3050 /* Modify necessary bits only and perform write */
3051 val &= ~TS_CMD_MASK_E810;
3052 val |= cmd_val;
3053
3054 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
3055 if (err) {
3056 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
3057 return err;
3058 }
3059
3060 return 0;
3061}
3062
3063/**
3064 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
3065 * @hw: pointer to the HW struct
3066 * @port: the PHY port to read
3067 * @tstamp_ready: contents of the Tx memory status register
3068 *
3069 * E810 devices do not use a Tx memory status register. Instead simply
3070 * indicate that all timestamps are currently ready.
3071 */
3072static int
3073ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
3074{
3075 *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
3076 return 0;
3077}
3078
3079/* E810T SMA functions
3080 *
3081 * The following functions operate specifically on E810T hardware and are used
3082 * to access the extended GPIOs available.
3083 */
3084
3085/**
3086 * ice_get_pca9575_handle
3087 * @hw: pointer to the hw struct
3088 * @pca9575_handle: GPIO controller's handle
3089 *
3090 * Find and return the GPIO controller's handle in the netlist.
3091 * When found - the value will be cached in the hw structure and following calls
3092 * will return cached value
3093 */
3094static int
3095ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3096{
3097 struct ice_aqc_get_link_topo *cmd;
3098 struct ice_aq_desc desc;
3099 int status;
3100 u8 idx;
3101
3102 /* If handle was read previously return cached value */
3103 if (hw->io_expander_handle) {
3104 *pca9575_handle = hw->io_expander_handle;
3105 return 0;
3106 }
3107
3108 /* If handle was not detected read it from the netlist */
3109 cmd = &desc.params.get_link_topo;
3110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3111
3112 /* Set node type to GPIO controller */
3113 cmd->addr.topo_params.node_type_ctx =
3114 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3115 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3116
3117#define SW_PCA9575_SFP_TOPO_IDX 2
3118#define SW_PCA9575_QSFP_TOPO_IDX 1
3119
3120 /* Check if the SW IO expander controlling SMA exists in the netlist. */
3121 if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3122 idx = SW_PCA9575_SFP_TOPO_IDX;
3123 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3124 idx = SW_PCA9575_QSFP_TOPO_IDX;
3125 else
3126 return -EOPNOTSUPP;
3127
3128 cmd->addr.topo_params.index = idx;
3129
3130 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3131 if (status)
3132 return -EOPNOTSUPP;
3133
3134 /* Verify if we found the right IO expander type */
3135 if (desc.params.get_link_topo.node_part_num !=
3136 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3137 return -EOPNOTSUPP;
3138
3139 /* If present save the handle and return it */
3140 hw->io_expander_handle =
3141 le16_to_cpu(desc.params.get_link_topo.addr.handle);
3142 *pca9575_handle = hw->io_expander_handle;
3143
3144 return 0;
3145}
3146
3147/**
3148 * ice_read_sma_ctrl_e810t
3149 * @hw: pointer to the hw struct
3150 * @data: pointer to data to be read from the GPIO controller
3151 *
3152 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3153 * PCA9575 expander, so only bits 3-7 in data are valid.
3154 */
3155int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3156{
3157 int status;
3158 u16 handle;
3159 u8 i;
3160
3161 status = ice_get_pca9575_handle(hw, &handle);
3162 if (status)
3163 return status;
3164
3165 *data = 0;
3166
3167 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3168 bool pin;
3169
3170 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3171 &pin, NULL);
3172 if (status)
3173 break;
3174 *data |= (u8)(!pin) << i;
3175 }
3176
3177 return status;
3178}
3179
3180/**
3181 * ice_write_sma_ctrl_e810t
3182 * @hw: pointer to the hw struct
3183 * @data: data to be written to the GPIO controller
3184 *
3185 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3186 * of the PCA9575 expander, so only bits 3-7 in data are valid.
3187 */
3188int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3189{
3190 int status;
3191 u16 handle;
3192 u8 i;
3193
3194 status = ice_get_pca9575_handle(hw, &handle);
3195 if (status)
3196 return status;
3197
3198 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3199 bool pin;
3200
3201 pin = !(data & (1 << i));
3202 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3203 pin, NULL);
3204 if (status)
3205 break;
3206 }
3207
3208 return status;
3209}
3210
3211/**
3212 * ice_read_pca9575_reg_e810t
3213 * @hw: pointer to the hw struct
3214 * @offset: GPIO controller register offset
3215 * @data: pointer to data to be read from the GPIO controller
3216 *
3217 * Read the register from the GPIO controller
3218 */
3219int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3220{
3221 struct ice_aqc_link_topo_addr link_topo;
3222 __le16 addr;
3223 u16 handle;
3224 int err;
3225
3226 memset(&link_topo, 0, sizeof(link_topo));
3227
3228 err = ice_get_pca9575_handle(hw, &handle);
3229 if (err)
3230 return err;
3231
3232 link_topo.handle = cpu_to_le16(handle);
3233 link_topo.topo_params.node_type_ctx =
3234 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3235 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3236
3237 addr = cpu_to_le16((u16)offset);
3238
3239 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3240}
3241
3242/* Device agnostic functions
3243 *
3244 * The following functions implement shared behavior common to both E822 and
3245 * E810 devices, possibly calling a device specific implementation where
3246 * necessary.
3247 */
3248
3249/**
3250 * ice_ptp_lock - Acquire PTP global semaphore register lock
3251 * @hw: pointer to the HW struct
3252 *
3253 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
3254 * was acquired, false otherwise.
3255 *
3256 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
3257 * value. If software sees the busy bit cleared, this means that this function
3258 * acquired the lock (and the busy bit is now set). If software sees the busy
3259 * bit set, it means that another function acquired the lock.
3260 *
3261 * Software must clear the busy bit with a write to release the lock for other
3262 * functions when done.
3263 */
3264bool ice_ptp_lock(struct ice_hw *hw)
3265{
3266 u32 hw_lock;
3267 int i;
3268
3269#define MAX_TRIES 15
3270
3271 for (i = 0; i < MAX_TRIES; i++) {
3272 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
3273 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
3274 if (hw_lock) {
3275 /* Somebody is holding the lock */
3276 usleep_range(5000, 6000);
3277 continue;
3278 }
3279
3280 break;
3281 }
3282
3283 return !hw_lock;
3284}
3285
3286/**
3287 * ice_ptp_unlock - Release PTP global semaphore register lock
3288 * @hw: pointer to the HW struct
3289 *
3290 * Release the global PTP hardware semaphore lock. This is done by writing to
3291 * the PFTSYN_SEM register.
3292 */
3293void ice_ptp_unlock(struct ice_hw *hw)
3294{
3295 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
3296}
3297
3298/**
3299 * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
3300 * @hw: pointer to the HW structure
3301 *
3302 * Determine the PHY model for the device, and initialize hw->phy_model
3303 * for use by other functions.
3304 */
3305void ice_ptp_init_phy_model(struct ice_hw *hw)
3306{
3307 if (ice_is_e810(hw))
3308 hw->phy_model = ICE_PHY_E810;
3309 else
3310 hw->phy_model = ICE_PHY_E82X;
3311}
3312
3313/**
3314 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
3315 * @hw: pointer to HW struct
3316 * @cmd: the command to issue
3317 *
3318 * Prepare the source timer and PHY timers and then trigger the requested
3319 * command. This causes the shadow registers previously written in preparation
3320 * for the command to be synchronously applied to both the source and PHY
3321 * timers.
3322 */
3323static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3324{
3325 int err;
3326
3327 /* First, prepare the source timer */
3328 ice_ptp_src_cmd(hw, cmd);
3329
3330 /* Next, prepare the ports */
3331 switch (hw->phy_model) {
3332 case ICE_PHY_E810:
3333 err = ice_ptp_port_cmd_e810(hw, cmd);
3334 break;
3335 case ICE_PHY_E82X:
3336 err = ice_ptp_port_cmd_e82x(hw, cmd);
3337 break;
3338 default:
3339 err = -EOPNOTSUPP;
3340 }
3341
3342 if (err) {
3343 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
3344 cmd, err);
3345 return err;
3346 }
3347
3348 /* Write the sync command register to drive both source and PHY timer
3349 * commands synchronously
3350 */
3351 ice_ptp_exec_tmr_cmd(hw);
3352
3353 return 0;
3354}
3355
3356/**
3357 * ice_ptp_init_time - Initialize device time to provided value
3358 * @hw: pointer to HW struct
3359 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
3360 *
3361 * Initialize the device to the specified time provided. This requires a three
3362 * step process:
3363 *
3364 * 1) write the new init time to the source timer shadow registers
3365 * 2) write the new init time to the PHY timer shadow registers
3366 * 3) issue an init_time timer command to synchronously switch both the source
3367 * and port timers to the new init time value at the next clock cycle.
3368 */
3369int ice_ptp_init_time(struct ice_hw *hw, u64 time)
3370{
3371 u8 tmr_idx;
3372 int err;
3373
3374 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3375
3376 /* Source timers */
3377 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
3378 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
3379 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
3380
3381 /* PHY timers */
3382 /* Fill Rx and Tx ports and send msg to PHY */
3383 switch (hw->phy_model) {
3384 case ICE_PHY_E810:
3385 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
3386 break;
3387 case ICE_PHY_E82X:
3388 err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
3389 break;
3390 default:
3391 err = -EOPNOTSUPP;
3392 }
3393
3394 if (err)
3395 return err;
3396
3397 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
3398}
3399
3400/**
3401 * ice_ptp_write_incval - Program PHC with new increment value
3402 * @hw: pointer to HW struct
3403 * @incval: Source timer increment value per clock cycle
3404 *
3405 * Program the PHC with a new increment value. This requires a three-step
3406 * process:
3407 *
3408 * 1) Write the increment value to the source timer shadow registers
3409 * 2) Write the increment value to the PHY timer shadow registers
3410 * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
3411 * the source and port timers to the new increment value at the next clock
3412 * cycle.
3413 */
3414int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3415{
3416 u8 tmr_idx;
3417 int err;
3418
3419 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3420
3421 /* Shadow Adjust */
3422 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3423 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3424
3425 switch (hw->phy_model) {
3426 case ICE_PHY_E810:
3427 err = ice_ptp_prep_phy_incval_e810(hw, incval);
3428 break;
3429 case ICE_PHY_E82X:
3430 err = ice_ptp_prep_phy_incval_e82x(hw, incval);
3431 break;
3432 default:
3433 err = -EOPNOTSUPP;
3434 }
3435
3436 if (err)
3437 return err;
3438
3439 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
3440}
3441
3442/**
3443 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3444 * @hw: pointer to HW struct
3445 * @incval: Source timer increment value per clock cycle
3446 *
3447 * Program a new PHC incval while holding the PTP semaphore.
3448 */
3449int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3450{
3451 int err;
3452
3453 if (!ice_ptp_lock(hw))
3454 return -EBUSY;
3455
3456 err = ice_ptp_write_incval(hw, incval);
3457
3458 ice_ptp_unlock(hw);
3459
3460 return err;
3461}
3462
3463/**
3464 * ice_ptp_adj_clock - Adjust PHC clock time atomically
3465 * @hw: pointer to HW struct
3466 * @adj: Adjustment in nanoseconds
3467 *
3468 * Perform an atomic adjustment of the PHC time by the specified number of
3469 * nanoseconds. This requires a three-step process:
3470 *
3471 * 1) Write the adjustment to the source timer shadow registers
3472 * 2) Write the adjustment to the PHY timer shadow registers
3473 * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
3474 * adjustment to both the source and port timers at the next clock cycle.
3475 */
3476int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3477{
3478 u8 tmr_idx;
3479 int err;
3480
3481 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3482
3483 /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3484 * For an ICE_PTP_ADJ_TIME command, this set of registers represents
3485 * the value to add to the clock time. It supports subtraction by
3486 * interpreting the value as a 2's complement integer.
3487 */
3488 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3489 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3490
3491 switch (hw->phy_model) {
3492 case ICE_PHY_E810:
3493 err = ice_ptp_prep_phy_adj_e810(hw, adj);
3494 break;
3495 case ICE_PHY_E82X:
3496 err = ice_ptp_prep_phy_adj_e82x(hw, adj);
3497 break;
3498 default:
3499 err = -EOPNOTSUPP;
3500 }
3501
3502 if (err)
3503 return err;
3504
3505 return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
3506}
3507
3508/**
3509 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3510 * @hw: pointer to the HW struct
3511 * @block: the block to read from
3512 * @idx: the timestamp index to read
3513 * @tstamp: on return, the 40bit timestamp value
3514 *
3515 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3516 * the block is the quad to read from. For E810 devices, the block is the
3517 * logical port to read from.
3518 */
3519int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3520{
3521 switch (hw->phy_model) {
3522 case ICE_PHY_E810:
3523 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3524 case ICE_PHY_E82X:
3525 return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
3526 default:
3527 return -EOPNOTSUPP;
3528 }
3529}
3530
3531/**
3532 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3533 * @hw: pointer to the HW struct
3534 * @block: the block to read from
3535 * @idx: the timestamp index to reset
3536 *
3537 * Clear a timestamp from the timestamp block, discarding its value without
3538 * returning it. This resets the memory status bit for the timestamp index
3539 * allowing it to be reused for another timestamp in the future.
3540 *
3541 * For E822 devices, the block number is the PHY quad to clear from. For E810
3542 * devices, the block number is the logical port to clear from.
3543 *
3544 * This function must only be called on a timestamp index whose valid bit is
3545 * set according to ice_get_phy_tx_tstamp_ready().
3546 */
3547int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3548{
3549 switch (hw->phy_model) {
3550 case ICE_PHY_E810:
3551 return ice_clear_phy_tstamp_e810(hw, block, idx);
3552 case ICE_PHY_E82X:
3553 return ice_clear_phy_tstamp_e82x(hw, block, idx);
3554 default:
3555 return -EOPNOTSUPP;
3556 }
3557}
3558
3559/**
3560 * ice_get_pf_c827_idx - find and return the C827 index for the current pf
3561 * @hw: pointer to the hw struct
3562 * @idx: index of the found C827 PHY
3563 * Return:
3564 * * 0 - success
3565 * * negative - failure
3566 */
3567static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
3568{
3569 struct ice_aqc_get_link_topo cmd;
3570 u8 node_part_number;
3571 u16 node_handle;
3572 int status;
3573 u8 ctx;
3574
3575 if (hw->mac_type != ICE_MAC_E810)
3576 return -ENODEV;
3577
3578 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
3579 *idx = C827_0;
3580 return 0;
3581 }
3582
3583 memset(&cmd, 0, sizeof(cmd));
3584
3585 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
3586 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
3587 cmd.addr.topo_params.node_type_ctx = ctx;
3588
3589 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
3590 &node_handle);
3591 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
3592 return -ENOENT;
3593
3594 if (node_handle == E810C_QSFP_C827_0_HANDLE)
3595 *idx = C827_0;
3596 else if (node_handle == E810C_QSFP_C827_1_HANDLE)
3597 *idx = C827_1;
3598 else
3599 return -EIO;
3600
3601 return 0;
3602}
3603
3604/**
3605 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
3606 * @hw: pointer to the HW struct
3607 */
3608void ice_ptp_reset_ts_memory(struct ice_hw *hw)
3609{
3610 switch (hw->phy_model) {
3611 case ICE_PHY_E82X:
3612 ice_ptp_reset_ts_memory_e82x(hw);
3613 break;
3614 case ICE_PHY_E810:
3615 default:
3616 return;
3617 }
3618}
3619
3620/**
3621 * ice_ptp_init_phc - Initialize PTP hardware clock
3622 * @hw: pointer to the HW struct
3623 *
3624 * Perform the steps required to initialize the PTP hardware clock.
3625 */
3626int ice_ptp_init_phc(struct ice_hw *hw)
3627{
3628 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3629
3630 /* Enable source clocks */
3631 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3632
3633 /* Clear event err indications for auxiliary pins */
3634 (void)rd32(hw, GLTSYN_STAT(src_idx));
3635
3636 switch (hw->phy_model) {
3637 case ICE_PHY_E810:
3638 return ice_ptp_init_phc_e810(hw);
3639 case ICE_PHY_E82X:
3640 return ice_ptp_init_phc_e82x(hw);
3641 default:
3642 return -EOPNOTSUPP;
3643 }
3644}
3645
3646/**
3647 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
3648 * @hw: pointer to the HW struct
3649 * @block: the timestamp block to check
3650 * @tstamp_ready: storage for the PHY Tx memory status information
3651 *
3652 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
3653 * which indicates which timestamps in the block may be captured. A set bit
3654 * means the timestamp can be read. An unset bit means the timestamp is not
3655 * ready and software should avoid reading the register.
3656 */
3657int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
3658{
3659 switch (hw->phy_model) {
3660 case ICE_PHY_E810:
3661 return ice_get_phy_tx_tstamp_ready_e810(hw, block,
3662 tstamp_ready);
3663 case ICE_PHY_E82X:
3664 return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
3665 tstamp_ready);
3666 break;
3667 default:
3668 return -EOPNOTSUPP;
3669 }
3670}
3671
3672/**
3673 * ice_cgu_get_pin_desc_e823 - get pin description array
3674 * @hw: pointer to the hw struct
3675 * @input: if request is done against input or output pin
3676 * @size: number of inputs/outputs
3677 *
3678 * Return: pointer to pin description array associated to given hw.
3679 */
3680static const struct ice_cgu_pin_desc *
3681ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
3682{
3683 static const struct ice_cgu_pin_desc *t;
3684
3685 if (hw->cgu_part_number ==
3686 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
3687 if (input) {
3688 t = ice_e823_zl_cgu_inputs;
3689 *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
3690 } else {
3691 t = ice_e823_zl_cgu_outputs;
3692 *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
3693 }
3694 } else if (hw->cgu_part_number ==
3695 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
3696 if (input) {
3697 t = ice_e823_si_cgu_inputs;
3698 *size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
3699 } else {
3700 t = ice_e823_si_cgu_outputs;
3701 *size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
3702 }
3703 } else {
3704 t = NULL;
3705 *size = 0;
3706 }
3707
3708 return t;
3709}
3710
3711/**
3712 * ice_cgu_get_pin_desc - get pin description array
3713 * @hw: pointer to the hw struct
3714 * @input: if request is done against input or output pins
3715 * @size: size of array returned by function
3716 *
3717 * Return: pointer to pin description array associated to given hw.
3718 */
3719static const struct ice_cgu_pin_desc *
3720ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
3721{
3722 const struct ice_cgu_pin_desc *t = NULL;
3723
3724 switch (hw->device_id) {
3725 case ICE_DEV_ID_E810C_SFP:
3726 if (input) {
3727 t = ice_e810t_sfp_cgu_inputs;
3728 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
3729 } else {
3730 t = ice_e810t_sfp_cgu_outputs;
3731 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
3732 }
3733 break;
3734 case ICE_DEV_ID_E810C_QSFP:
3735 if (input) {
3736 t = ice_e810t_qsfp_cgu_inputs;
3737 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
3738 } else {
3739 t = ice_e810t_qsfp_cgu_outputs;
3740 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
3741 }
3742 break;
3743 case ICE_DEV_ID_E823L_10G_BASE_T:
3744 case ICE_DEV_ID_E823L_1GBE:
3745 case ICE_DEV_ID_E823L_BACKPLANE:
3746 case ICE_DEV_ID_E823L_QSFP:
3747 case ICE_DEV_ID_E823L_SFP:
3748 case ICE_DEV_ID_E823C_10G_BASE_T:
3749 case ICE_DEV_ID_E823C_BACKPLANE:
3750 case ICE_DEV_ID_E823C_QSFP:
3751 case ICE_DEV_ID_E823C_SFP:
3752 case ICE_DEV_ID_E823C_SGMII:
3753 t = ice_cgu_get_pin_desc_e823(hw, input, size);
3754 break;
3755 default:
3756 break;
3757 }
3758
3759 return t;
3760}
3761
3762/**
3763 * ice_cgu_get_pin_type - get pin's type
3764 * @hw: pointer to the hw struct
3765 * @pin: pin index
3766 * @input: if request is done against input or output pin
3767 *
3768 * Return: type of a pin.
3769 */
3770enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
3771{
3772 const struct ice_cgu_pin_desc *t;
3773 int t_size;
3774
3775 t = ice_cgu_get_pin_desc(hw, input, &t_size);
3776
3777 if (!t)
3778 return 0;
3779
3780 if (pin >= t_size)
3781 return 0;
3782
3783 return t[pin].type;
3784}
3785
3786/**
3787 * ice_cgu_get_pin_freq_supp - get pin's supported frequency
3788 * @hw: pointer to the hw struct
3789 * @pin: pin index
3790 * @input: if request is done against input or output pin
3791 * @num: output number of supported frequencies
3792 *
3793 * Get frequency supported number and array of supported frequencies.
3794 *
3795 * Return: array of supported frequencies for given pin.
3796 */
3797struct dpll_pin_frequency *
3798ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
3799{
3800 const struct ice_cgu_pin_desc *t;
3801 int t_size;
3802
3803 *num = 0;
3804 t = ice_cgu_get_pin_desc(hw, input, &t_size);
3805 if (!t)
3806 return NULL;
3807 if (pin >= t_size)
3808 return NULL;
3809 *num = t[pin].freq_supp_num;
3810
3811 return t[pin].freq_supp;
3812}
3813
3814/**
3815 * ice_cgu_get_pin_name - get pin's name
3816 * @hw: pointer to the hw struct
3817 * @pin: pin index
3818 * @input: if request is done against input or output pin
3819 *
3820 * Return:
3821 * * null terminated char array with name
3822 * * NULL in case of failure
3823 */
3824const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
3825{
3826 const struct ice_cgu_pin_desc *t;
3827 int t_size;
3828
3829 t = ice_cgu_get_pin_desc(hw, input, &t_size);
3830
3831 if (!t)
3832 return NULL;
3833
3834 if (pin >= t_size)
3835 return NULL;
3836
3837 return t[pin].name;
3838}
3839
3840/**
3841 * ice_get_cgu_state - get the state of the DPLL
3842 * @hw: pointer to the hw struct
3843 * @dpll_idx: Index of internal DPLL unit
3844 * @last_dpll_state: last known state of DPLL
3845 * @pin: pointer to a buffer for returning currently active pin
3846 * @ref_state: reference clock state
3847 * @eec_mode: eec mode of the DPLL
3848 * @phase_offset: pointer to a buffer for returning phase offset
3849 * @dpll_state: state of the DPLL (output)
3850 *
3851 * This function will read the state of the DPLL(dpll_idx). Non-null
3852 * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
3853 * retrieve currently active pin, state, mode and phase_offset respectively.
3854 *
3855 * Return: state of the DPLL
3856 */
3857int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
3858 enum dpll_lock_status last_dpll_state, u8 *pin,
3859 u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
3860 enum dpll_lock_status *dpll_state)
3861{
3862 u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
3863 s64 hw_phase_offset;
3864 int status;
3865
3866 status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
3867 &hw_dpll_state, &hw_config,
3868 &hw_phase_offset, &hw_eec_mode);
3869 if (status)
3870 return status;
3871
3872 if (pin)
3873 /* current ref pin in dpll_state_refsel_status_X register */
3874 *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
3875 if (phase_offset)
3876 *phase_offset = hw_phase_offset;
3877 if (ref_state)
3878 *ref_state = hw_ref_state;
3879 if (eec_mode)
3880 *eec_mode = hw_eec_mode;
3881 if (!dpll_state)
3882 return 0;
3883
3884 /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
3885 * it would never return to FREERUN. This aligns to ITU-T G.781
3886 * Recommendation. We cannot report HOLDOVER as HO memory is cleared
3887 * while switching to another reference.
3888 * Only for situations where previous state was either: "LOCKED without
3889 * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
3890 */
3891 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
3892 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
3893 *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
3894 else
3895 *dpll_state = DPLL_LOCK_STATUS_LOCKED;
3896 } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
3897 last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
3898 *dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
3899 } else {
3900 *dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
3901 }
3902
3903 return 0;
3904}
3905
3906/**
3907 * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
3908 * @hw: pointer to the hw struct
3909 * @base_idx: returns index of first recovered clock pin on device
3910 * @pin_num: returns number of recovered clock pins available on device
3911 *
3912 * Based on hw provide caller info about recovery clock pins available on the
3913 * board.
3914 *
3915 * Return:
3916 * * 0 - success, information is valid
3917 * * negative - failure, information is not valid
3918 */
3919int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
3920{
3921 u8 phy_idx;
3922 int ret;
3923
3924 switch (hw->device_id) {
3925 case ICE_DEV_ID_E810C_SFP:
3926 case ICE_DEV_ID_E810C_QSFP:
3927
3928 ret = ice_get_pf_c827_idx(hw, &phy_idx);
3929 if (ret)
3930 return ret;
3931 *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
3932 *pin_num = ICE_E810_RCLK_PINS_NUM;
3933 ret = 0;
3934 break;
3935 case ICE_DEV_ID_E823L_10G_BASE_T:
3936 case ICE_DEV_ID_E823L_1GBE:
3937 case ICE_DEV_ID_E823L_BACKPLANE:
3938 case ICE_DEV_ID_E823L_QSFP:
3939 case ICE_DEV_ID_E823L_SFP:
3940 case ICE_DEV_ID_E823C_10G_BASE_T:
3941 case ICE_DEV_ID_E823C_BACKPLANE:
3942 case ICE_DEV_ID_E823C_QSFP:
3943 case ICE_DEV_ID_E823C_SFP:
3944 case ICE_DEV_ID_E823C_SGMII:
3945 *pin_num = ICE_E82X_RCLK_PINS_NUM;
3946 ret = 0;
3947 if (hw->cgu_part_number ==
3948 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
3949 *base_idx = ZL_REF1P;
3950 else if (hw->cgu_part_number ==
3951 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
3952 *base_idx = SI_REF1P;
3953 else
3954 ret = -ENODEV;
3955
3956 break;
3957 default:
3958 ret = -ENODEV;
3959 break;
3960 }
3961
3962 return ret;
3963}
3964
3965/**
3966 * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
3967 * @hw: pointer to the hw struct
3968 * @pin_id: id of a pin
3969 * @caps: capabilities to modify
3970 *
3971 * Return:
3972 * * 0 - success, state capabilities were modified
3973 * * negative - failure, capabilities were not modified
3974 */
3975int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
3976 unsigned long *caps)
3977{
3978 bool can_change = true;
3979
3980 switch (hw->device_id) {
3981 case ICE_DEV_ID_E810C_SFP:
3982 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
3983 can_change = false;
3984 break;
3985 case ICE_DEV_ID_E810C_QSFP:
3986 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
3987 can_change = false;
3988 break;
3989 case ICE_DEV_ID_E823L_10G_BASE_T:
3990 case ICE_DEV_ID_E823L_1GBE:
3991 case ICE_DEV_ID_E823L_BACKPLANE:
3992 case ICE_DEV_ID_E823L_QSFP:
3993 case ICE_DEV_ID_E823L_SFP:
3994 case ICE_DEV_ID_E823C_10G_BASE_T:
3995 case ICE_DEV_ID_E823C_BACKPLANE:
3996 case ICE_DEV_ID_E823C_QSFP:
3997 case ICE_DEV_ID_E823C_SFP:
3998 case ICE_DEV_ID_E823C_SGMII:
3999 if (hw->cgu_part_number ==
4000 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
4001 pin_id == ZL_OUT2)
4002 can_change = false;
4003 else if (hw->cgu_part_number ==
4004 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
4005 pin_id == SI_OUT1)
4006 can_change = false;
4007 break;
4008 default:
4009 return -EINVAL;
4010 }
4011 if (can_change)
4012 *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
4013 else
4014 *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
4015
4016 return 0;
4017}