Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include <linux/delay.h>
5#include "ice_common.h"
6#include "ice_ptp_hw.h"
7#include "ice_ptp_consts.h"
8#include "ice_cgu_regs.h"
9
10static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
11 DPLL_PIN_FREQUENCY_1PPS,
12 DPLL_PIN_FREQUENCY_10MHZ,
13};
14
15static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
16 DPLL_PIN_FREQUENCY_1PPS,
17};
18
19static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
20 DPLL_PIN_FREQUENCY_10MHZ,
21};
22
23static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
24 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
25 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
26 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
27 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
28 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
29 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
30 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
31 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
32 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
33 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
34 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
35 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
36 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
37};
38
39static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
40 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
41 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
42 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
43 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
44 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
45 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
46 { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
47 { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
48 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
49 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
50 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
51 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
52 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
53 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
54 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
55};
56
57static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
58 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
59 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
60 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
61 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
62 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
63 { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
64 { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT,
65 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
66 { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT,
67 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
68};
69
70static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
71 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
72 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
73 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
74 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
75 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
76 { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
77 { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
78 { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT,
79 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
80 { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT,
81 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
82};
83
84static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
85 { "NONE", SI_REF0P, 0, 0 },
86 { "NONE", SI_REF0N, 0, 0 },
87 { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
88 { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
89 { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
90 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
91 { "NONE", SI_REF2N, 0, 0 },
92 { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT,
93 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
94 { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT,
95 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
96};
97
98static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
99 { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
100 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
101 { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
102 { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT,
103 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
104 { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT,
105 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
106};
107
108static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
109 { "NONE", ZL_REF0P, 0, 0 },
110 { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT,
111 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
112 { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
113 { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
114 { "NONE", ZL_REF2P, 0, 0 },
115 { "NONE", ZL_REF2N, 0, 0 },
116 { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
117 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
118 { "NONE", ZL_REF3N, 0, 0 },
119 { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT,
120 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
121 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
122};
123
124static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
125 { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
126 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
127 { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
128 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
129 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
130 { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
131 { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
132 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
133 { "NONE", ZL_OUT5, 0, 0 },
134};
135
136/* Low level functions for interacting with and managing the device clock used
137 * for the Precision Time Protocol.
138 *
139 * The ice hardware represents the current time using three registers:
140 *
141 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
142 * +---------------+ +---------------+ +---------------+
143 * | 32 bits | | 32 bits | | 32 bits |
144 * +---------------+ +---------------+ +---------------+
145 *
146 * The registers are incremented every clock tick using a 40bit increment
147 * value defined over two registers:
148 *
149 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
150 * +---------------+ +---------------+
151 * | 8 bit s | | 32 bits |
152 * +---------------+ +---------------+
153 *
154 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
155 * registers every clock source tick. Depending on the specific device
156 * configuration, the clock source frequency could be one of a number of
157 * values.
158 *
159 * For E810 devices, the increment frequency is 812.5 MHz
160 *
161 * For E822 devices the clock can be derived from different sources, and the
162 * increment has an effective frequency of one of the following:
163 * - 823.4375 MHz
164 * - 783.36 MHz
165 * - 796.875 MHz
166 * - 816 MHz
167 * - 830.078125 MHz
168 * - 783.36 MHz
169 *
170 * The hardware captures timestamps in the PHY for incoming packets, and for
171 * outgoing packets on request. To support this, the PHY maintains a timer
172 * that matches the lower 64 bits of the global source timer.
173 *
174 * In order to ensure that the PHY timers and the source timer are equivalent,
175 * shadow registers are used to prepare the desired initial values. A special
176 * sync command is issued to trigger copying from the shadow registers into
177 * the appropriate source and PHY registers simultaneously.
178 *
179 * The driver supports devices which have different PHYs with subtly different
180 * mechanisms to program and control the timers. We divide the devices into
181 * families named after the first major device, E810 and similar devices, and
182 * E822 and similar devices.
183 *
184 * - E822 based devices have additional support for fine grained Vernier
185 * calibration which requires significant setup
186 * - The layout of timestamp data in the PHY register blocks is different
187 * - The way timer synchronization commands are issued is different.
188 *
189 * To support this, very low level functions have an e810 or e822 suffix
190 * indicating what type of device they work on. Higher level abstractions for
191 * tasks that can be done on both devices do not have the suffix and will
192 * correctly look up the appropriate low level function when running.
193 *
194 * Functions which only make sense on a single device family may not have
195 * a suitable generic implementation
196 */
197
198/**
199 * ice_get_ptp_src_clock_index - determine source clock index
200 * @hw: pointer to HW struct
201 *
202 * Determine the source clock index currently in use, based on device
203 * capabilities reported during initialization.
204 */
205u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
206{
207 return hw->func_caps.ts_func_info.tmr_index_assoc;
208}
209
210/**
211 * ice_ptp_read_src_incval - Read source timer increment value
212 * @hw: pointer to HW struct
213 *
214 * Read the increment value of the source timer and return it.
215 */
216static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
217{
218 u32 lo, hi;
219 u8 tmr_idx;
220
221 tmr_idx = ice_get_ptp_src_clock_index(hw);
222
223 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
224 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
225
226 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
227}
228
229/**
230 * ice_ptp_src_cmd - Prepare source timer for a timer command
231 * @hw: pointer to HW structure
232 * @cmd: Timer command
233 *
234 * Prepare the source timer for an upcoming timer sync command.
235 */
236void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
237{
238 u32 cmd_val;
239 u8 tmr_idx;
240
241 tmr_idx = ice_get_ptp_src_clock_index(hw);
242 cmd_val = tmr_idx << SEL_CPK_SRC;
243
244 switch (cmd) {
245 case ICE_PTP_INIT_TIME:
246 cmd_val |= GLTSYN_CMD_INIT_TIME;
247 break;
248 case ICE_PTP_INIT_INCVAL:
249 cmd_val |= GLTSYN_CMD_INIT_INCVAL;
250 break;
251 case ICE_PTP_ADJ_TIME:
252 cmd_val |= GLTSYN_CMD_ADJ_TIME;
253 break;
254 case ICE_PTP_ADJ_TIME_AT_TIME:
255 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
256 break;
257 case ICE_PTP_READ_TIME:
258 cmd_val |= GLTSYN_CMD_READ_TIME;
259 break;
260 case ICE_PTP_NOP:
261 break;
262 }
263
264 wr32(hw, GLTSYN_CMD, cmd_val);
265}
266
267/**
268 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
269 * @hw: pointer to HW struct
270 *
271 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
272 * write immediately. This triggers the hardware to begin executing all of the
273 * source and PHY timer commands synchronously.
274 */
275static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
276{
277 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
278 ice_flush(hw);
279}
280
281/* E822 family functions
282 *
283 * The following functions operate on the E822 family of devices.
284 */
285
286/**
287 * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
288 * @msg: the PHY message buffer to fill in
289 * @port: the port to access
290 * @offset: the register offset
291 */
292static void
293ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
294{
295 int phy_port, phy, quadtype;
296
297 phy_port = port % ICE_PORTS_PER_PHY_E82X;
298 phy = port / ICE_PORTS_PER_PHY_E82X;
299 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
300
301 if (quadtype == 0) {
302 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
303 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
304 } else {
305 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
306 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
307 }
308
309 if (phy == 0)
310 msg->dest_dev = rmn_0;
311 else if (phy == 1)
312 msg->dest_dev = rmn_1;
313 else
314 msg->dest_dev = rmn_2;
315}
316
317/**
318 * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
319 * @low_addr: the low address to check
320 * @high_addr: on return, contains the high address of the 64bit register
321 *
322 * Checks if the provided low address is one of the known 64bit PHY values
323 * represented as two 32bit registers. If it is, return the appropriate high
324 * register offset to use.
325 */
326static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
327{
328 switch (low_addr) {
329 case P_REG_PAR_PCS_TX_OFFSET_L:
330 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
331 return true;
332 case P_REG_PAR_PCS_RX_OFFSET_L:
333 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
334 return true;
335 case P_REG_PAR_TX_TIME_L:
336 *high_addr = P_REG_PAR_TX_TIME_U;
337 return true;
338 case P_REG_PAR_RX_TIME_L:
339 *high_addr = P_REG_PAR_RX_TIME_U;
340 return true;
341 case P_REG_TOTAL_TX_OFFSET_L:
342 *high_addr = P_REG_TOTAL_TX_OFFSET_U;
343 return true;
344 case P_REG_TOTAL_RX_OFFSET_L:
345 *high_addr = P_REG_TOTAL_RX_OFFSET_U;
346 return true;
347 case P_REG_UIX66_10G_40G_L:
348 *high_addr = P_REG_UIX66_10G_40G_U;
349 return true;
350 case P_REG_UIX66_25G_100G_L:
351 *high_addr = P_REG_UIX66_25G_100G_U;
352 return true;
353 case P_REG_TX_CAPTURE_L:
354 *high_addr = P_REG_TX_CAPTURE_U;
355 return true;
356 case P_REG_RX_CAPTURE_L:
357 *high_addr = P_REG_RX_CAPTURE_U;
358 return true;
359 case P_REG_TX_TIMER_INC_PRE_L:
360 *high_addr = P_REG_TX_TIMER_INC_PRE_U;
361 return true;
362 case P_REG_RX_TIMER_INC_PRE_L:
363 *high_addr = P_REG_RX_TIMER_INC_PRE_U;
364 return true;
365 default:
366 return false;
367 }
368}
369
370/**
371 * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
372 * @low_addr: the low address to check
373 * @high_addr: on return, contains the high address of the 40bit value
374 *
375 * Checks if the provided low address is one of the known 40bit PHY values
376 * split into two registers with the lower 8 bits in the low register and the
377 * upper 32 bits in the high register. If it is, return the appropriate high
378 * register offset to use.
379 */
380static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
381{
382 switch (low_addr) {
383 case P_REG_TIMETUS_L:
384 *high_addr = P_REG_TIMETUS_U;
385 return true;
386 case P_REG_PAR_RX_TUS_L:
387 *high_addr = P_REG_PAR_RX_TUS_U;
388 return true;
389 case P_REG_PAR_TX_TUS_L:
390 *high_addr = P_REG_PAR_TX_TUS_U;
391 return true;
392 case P_REG_PCS_RX_TUS_L:
393 *high_addr = P_REG_PCS_RX_TUS_U;
394 return true;
395 case P_REG_PCS_TX_TUS_L:
396 *high_addr = P_REG_PCS_TX_TUS_U;
397 return true;
398 case P_REG_DESK_PAR_RX_TUS_L:
399 *high_addr = P_REG_DESK_PAR_RX_TUS_U;
400 return true;
401 case P_REG_DESK_PAR_TX_TUS_L:
402 *high_addr = P_REG_DESK_PAR_TX_TUS_U;
403 return true;
404 case P_REG_DESK_PCS_RX_TUS_L:
405 *high_addr = P_REG_DESK_PCS_RX_TUS_U;
406 return true;
407 case P_REG_DESK_PCS_TX_TUS_L:
408 *high_addr = P_REG_DESK_PCS_TX_TUS_U;
409 return true;
410 default:
411 return false;
412 }
413}
414
415/**
416 * ice_read_phy_reg_e82x - Read a PHY register
417 * @hw: pointer to the HW struct
418 * @port: PHY port to read from
419 * @offset: PHY register offset to read
420 * @val: on return, the contents read from the PHY
421 *
422 * Read a PHY register for the given port over the device sideband queue.
423 */
424static int
425ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
426{
427 struct ice_sbq_msg_input msg = {0};
428 int err;
429
430 ice_fill_phy_msg_e82x(&msg, port, offset);
431 msg.opcode = ice_sbq_msg_rd;
432
433 err = ice_sbq_rw_reg(hw, &msg);
434 if (err) {
435 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
436 err);
437 return err;
438 }
439
440 *val = msg.data;
441
442 return 0;
443}
444
445/**
446 * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
447 * @hw: pointer to the HW struct
448 * @port: PHY port to read from
449 * @low_addr: offset of the lower register to read from
450 * @val: on return, the contents of the 64bit value from the PHY registers
451 *
452 * Reads the two registers associated with a 64bit value and returns it in the
453 * val pointer. The offset always specifies the lower register offset to use.
454 * The high offset is looked up. This function only operates on registers
455 * known to be two parts of a 64bit value.
456 */
457static int
458ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
459{
460 u32 low, high;
461 u16 high_addr;
462 int err;
463
464 /* Only operate on registers known to be split into two 32bit
465 * registers.
466 */
467 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
468 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
469 low_addr);
470 return -EINVAL;
471 }
472
473 err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
474 if (err) {
475 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
476 low_addr, err);
477 return err;
478 }
479
480 err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
481 if (err) {
482 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
483 high_addr, err);
484 return err;
485 }
486
487 *val = (u64)high << 32 | low;
488
489 return 0;
490}
491
492/**
493 * ice_write_phy_reg_e82x - Write a PHY register
494 * @hw: pointer to the HW struct
495 * @port: PHY port to write to
496 * @offset: PHY register offset to write
497 * @val: The value to write to the register
498 *
499 * Write a PHY register for the given port over the device sideband queue.
500 */
501static int
502ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
503{
504 struct ice_sbq_msg_input msg = {0};
505 int err;
506
507 ice_fill_phy_msg_e82x(&msg, port, offset);
508 msg.opcode = ice_sbq_msg_wr;
509 msg.data = val;
510
511 err = ice_sbq_rw_reg(hw, &msg);
512 if (err) {
513 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
514 err);
515 return err;
516 }
517
518 return 0;
519}
520
521/**
522 * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
523 * @hw: pointer to the HW struct
524 * @port: port to write to
525 * @low_addr: offset of the low register
526 * @val: 40b value to write
527 *
528 * Write the provided 40b value to the two associated registers by splitting
529 * it up into two chunks, the lower 8 bits and the upper 32 bits.
530 */
531static int
532ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
533{
534 u32 low, high;
535 u16 high_addr;
536 int err;
537
538 /* Only operate on registers known to be split into a lower 8 bit
539 * register and an upper 32 bit register.
540 */
541 if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
542 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
543 low_addr);
544 return -EINVAL;
545 }
546
547 low = (u32)(val & P_REG_40B_LOW_M);
548 high = (u32)(val >> P_REG_40B_HIGH_S);
549
550 err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
551 if (err) {
552 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
553 low_addr, err);
554 return err;
555 }
556
557 err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
558 if (err) {
559 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
560 high_addr, err);
561 return err;
562 }
563
564 return 0;
565}
566
567/**
568 * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
569 * @hw: pointer to the HW struct
570 * @port: PHY port to read from
571 * @low_addr: offset of the lower register to read from
572 * @val: the contents of the 64bit value to write to PHY
573 *
574 * Write the 64bit value to the two associated 32bit PHY registers. The offset
575 * is always specified as the lower register, and the high address is looked
576 * up. This function only operates on registers known to be two parts of
577 * a 64bit value.
578 */
579static int
580ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
581{
582 u32 low, high;
583 u16 high_addr;
584 int err;
585
586 /* Only operate on registers known to be split into two 32bit
587 * registers.
588 */
589 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
590 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
591 low_addr);
592 return -EINVAL;
593 }
594
595 low = lower_32_bits(val);
596 high = upper_32_bits(val);
597
598 err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
599 if (err) {
600 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
601 low_addr, err);
602 return err;
603 }
604
605 err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
606 if (err) {
607 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
608 high_addr, err);
609 return err;
610 }
611
612 return 0;
613}
614
615/**
616 * ice_fill_quad_msg_e82x - Fill message data for quad register access
617 * @msg: the PHY message buffer to fill in
618 * @quad: the quad to access
619 * @offset: the register offset
620 *
621 * Fill a message buffer for accessing a register in a quad shared between
622 * multiple PHYs.
623 */
624static int
625ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
626{
627 u32 addr;
628
629 if (quad >= ICE_MAX_QUAD)
630 return -EINVAL;
631
632 msg->dest_dev = rmn_0;
633
634 if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
635 addr = Q_0_BASE + offset;
636 else
637 addr = Q_1_BASE + offset;
638
639 msg->msg_addr_low = lower_16_bits(addr);
640 msg->msg_addr_high = upper_16_bits(addr);
641
642 return 0;
643}
644
645/**
646 * ice_read_quad_reg_e82x - Read a PHY quad register
647 * @hw: pointer to the HW struct
648 * @quad: quad to read from
649 * @offset: quad register offset to read
650 * @val: on return, the contents read from the quad
651 *
652 * Read a quad register over the device sideband queue. Quad registers are
653 * shared between multiple PHYs.
654 */
655int
656ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
657{
658 struct ice_sbq_msg_input msg = {0};
659 int err;
660
661 err = ice_fill_quad_msg_e82x(&msg, quad, offset);
662 if (err)
663 return err;
664
665 msg.opcode = ice_sbq_msg_rd;
666
667 err = ice_sbq_rw_reg(hw, &msg);
668 if (err) {
669 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
670 err);
671 return err;
672 }
673
674 *val = msg.data;
675
676 return 0;
677}
678
679/**
680 * ice_write_quad_reg_e82x - Write a PHY quad register
681 * @hw: pointer to the HW struct
682 * @quad: quad to write to
683 * @offset: quad register offset to write
684 * @val: The value to write to the register
685 *
686 * Write a quad register over the device sideband queue. Quad registers are
687 * shared between multiple PHYs.
688 */
689int
690ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
691{
692 struct ice_sbq_msg_input msg = {0};
693 int err;
694
695 err = ice_fill_quad_msg_e82x(&msg, quad, offset);
696 if (err)
697 return err;
698
699 msg.opcode = ice_sbq_msg_wr;
700 msg.data = val;
701
702 err = ice_sbq_rw_reg(hw, &msg);
703 if (err) {
704 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
705 err);
706 return err;
707 }
708
709 return 0;
710}
711
712/**
713 * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
714 * @hw: pointer to the HW struct
715 * @quad: the quad to read from
716 * @idx: the timestamp index to read
717 * @tstamp: on return, the 40bit timestamp value
718 *
719 * Read a 40bit timestamp value out of the two associated registers in the
720 * quad memory block that is shared between the internal PHYs of the E822
721 * family of devices.
722 */
723static int
724ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
725{
726 u16 lo_addr, hi_addr;
727 u32 lo, hi;
728 int err;
729
730 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
731 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
732
733 err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
734 if (err) {
735 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
736 err);
737 return err;
738 }
739
740 err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
741 if (err) {
742 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
743 err);
744 return err;
745 }
746
747 /* For E822 based internal PHYs, the timestamp is reported with the
748 * lower 8 bits in the low register, and the upper 32 bits in the high
749 * register.
750 */
751 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
752
753 return 0;
754}
755
756/**
757 * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
758 * @hw: pointer to the HW struct
759 * @quad: the quad to read from
760 * @idx: the timestamp index to reset
761 *
762 * Read the timestamp out of the quad to clear its timestamp status bit from
763 * the PHY quad block that is shared between the internal PHYs of the E822
764 * devices.
765 *
766 * Note that unlike E810, software cannot directly write to the quad memory
767 * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
768 * to determine which timestamps are valid. Reading a timestamp auto-clears
769 * the valid bit.
770 *
771 * To directly clear the contents of the timestamp block entirely, discarding
772 * all timestamp data at once, software should instead use
773 * ice_ptp_reset_ts_memory_quad_e82x().
774 *
775 * This function should only be called on an idx whose bit is set according to
776 * ice_get_phy_tx_tstamp_ready().
777 */
778static int
779ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
780{
781 u64 unused_tstamp;
782 int err;
783
784 err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
785 if (err) {
786 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
787 quad, idx, err);
788 return err;
789 }
790
791 return 0;
792}
793
794/**
795 * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
796 * @hw: pointer to the HW struct
797 * @quad: the quad to read from
798 *
799 * Clear all timestamps from the PHY quad block that is shared between the
800 * internal PHYs on the E822 devices.
801 */
802void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
803{
804 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
805 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
806}
807
808/**
809 * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
810 * @hw: pointer to the HW struct
811 */
812static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
813{
814 unsigned int quad;
815
816 for (quad = 0; quad < ICE_MAX_QUAD; quad++)
817 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
818}
819
820/**
821 * ice_read_cgu_reg_e82x - Read a CGU register
822 * @hw: pointer to the HW struct
823 * @addr: Register address to read
824 * @val: storage for register value read
825 *
826 * Read the contents of a register of the Clock Generation Unit. Only
827 * applicable to E822 devices.
828 */
829static int
830ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
831{
832 struct ice_sbq_msg_input cgu_msg;
833 int err;
834
835 cgu_msg.opcode = ice_sbq_msg_rd;
836 cgu_msg.dest_dev = cgu;
837 cgu_msg.msg_addr_low = addr;
838 cgu_msg.msg_addr_high = 0x0;
839
840 err = ice_sbq_rw_reg(hw, &cgu_msg);
841 if (err) {
842 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
843 addr, err);
844 return err;
845 }
846
847 *val = cgu_msg.data;
848
849 return err;
850}
851
852/**
853 * ice_write_cgu_reg_e82x - Write a CGU register
854 * @hw: pointer to the HW struct
855 * @addr: Register address to write
856 * @val: value to write into the register
857 *
858 * Write the specified value to a register of the Clock Generation Unit. Only
859 * applicable to E822 devices.
860 */
861static int
862ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
863{
864 struct ice_sbq_msg_input cgu_msg;
865 int err;
866
867 cgu_msg.opcode = ice_sbq_msg_wr;
868 cgu_msg.dest_dev = cgu;
869 cgu_msg.msg_addr_low = addr;
870 cgu_msg.msg_addr_high = 0x0;
871 cgu_msg.data = val;
872
873 err = ice_sbq_rw_reg(hw, &cgu_msg);
874 if (err) {
875 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
876 addr, err);
877 return err;
878 }
879
880 return err;
881}
882
883/**
884 * ice_clk_freq_str - Convert time_ref_freq to string
885 * @clk_freq: Clock frequency
886 *
887 * Convert the specified TIME_REF clock frequency to a string.
888 */
889static const char *ice_clk_freq_str(u8 clk_freq)
890{
891 switch ((enum ice_time_ref_freq)clk_freq) {
892 case ICE_TIME_REF_FREQ_25_000:
893 return "25 MHz";
894 case ICE_TIME_REF_FREQ_122_880:
895 return "122.88 MHz";
896 case ICE_TIME_REF_FREQ_125_000:
897 return "125 MHz";
898 case ICE_TIME_REF_FREQ_153_600:
899 return "153.6 MHz";
900 case ICE_TIME_REF_FREQ_156_250:
901 return "156.25 MHz";
902 case ICE_TIME_REF_FREQ_245_760:
903 return "245.76 MHz";
904 default:
905 return "Unknown";
906 }
907}
908
909/**
910 * ice_clk_src_str - Convert time_ref_src to string
911 * @clk_src: Clock source
912 *
913 * Convert the specified clock source to its string name.
914 */
915static const char *ice_clk_src_str(u8 clk_src)
916{
917 switch ((enum ice_clk_src)clk_src) {
918 case ICE_CLK_SRC_TCX0:
919 return "TCX0";
920 case ICE_CLK_SRC_TIME_REF:
921 return "TIME_REF";
922 default:
923 return "Unknown";
924 }
925}
926
927/**
928 * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
929 * @hw: pointer to the HW struct
930 * @clk_freq: Clock frequency to program
931 * @clk_src: Clock source to select (TIME_REF, or TCX0)
932 *
933 * Configure the Clock Generation Unit with the desired clock frequency and
934 * time reference, enabling the PLL which drives the PTP hardware clock.
935 */
936static int
937ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
938 enum ice_clk_src clk_src)
939{
940 union tspll_ro_bwm_lf bwm_lf;
941 union nac_cgu_dword19 dw19;
942 union nac_cgu_dword22 dw22;
943 union nac_cgu_dword24 dw24;
944 union nac_cgu_dword9 dw9;
945 int err;
946
947 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
948 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
949 clk_freq);
950 return -EINVAL;
951 }
952
953 if (clk_src >= NUM_ICE_CLK_SRC) {
954 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
955 clk_src);
956 return -EINVAL;
957 }
958
959 if (clk_src == ICE_CLK_SRC_TCX0 &&
960 clk_freq != ICE_TIME_REF_FREQ_25_000) {
961 dev_warn(ice_hw_to_dev(hw),
962 "TCX0 only supports 25 MHz frequency\n");
963 return -EINVAL;
964 }
965
966 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
967 if (err)
968 return err;
969
970 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
971 if (err)
972 return err;
973
974 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
975 if (err)
976 return err;
977
978 /* Log the current clock configuration */
979 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
980 dw24.field.ts_pll_enable ? "enabled" : "disabled",
981 ice_clk_src_str(dw24.field.time_ref_sel),
982 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
983 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
984
985 /* Disable the PLL before changing the clock source or frequency */
986 if (dw24.field.ts_pll_enable) {
987 dw24.field.ts_pll_enable = 0;
988
989 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
990 if (err)
991 return err;
992 }
993
994 /* Set the frequency */
995 dw9.field.time_ref_freq_sel = clk_freq;
996 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
997 if (err)
998 return err;
999
1000 /* Configure the TS PLL feedback divisor */
1001 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
1002 if (err)
1003 return err;
1004
1005 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
1006 dw19.field.tspll_ndivratio = 1;
1007
1008 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
1009 if (err)
1010 return err;
1011
1012 /* Configure the TS PLL post divisor */
1013 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
1014 if (err)
1015 return err;
1016
1017 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
1018 dw22.field.time1588clk_sel_div2 = 0;
1019
1020 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
1021 if (err)
1022 return err;
1023
1024 /* Configure the TS PLL pre divisor and clock source */
1025 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
1026 if (err)
1027 return err;
1028
1029 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
1030 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
1031 dw24.field.time_ref_sel = clk_src;
1032
1033 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
1034 if (err)
1035 return err;
1036
1037 /* Finally, enable the PLL */
1038 dw24.field.ts_pll_enable = 1;
1039
1040 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
1041 if (err)
1042 return err;
1043
1044 /* Wait to verify if the PLL locks */
1045 usleep_range(1000, 5000);
1046
1047 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
1048 if (err)
1049 return err;
1050
1051 if (!bwm_lf.field.plllock_true_lock_cri) {
1052 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
1053 return -EBUSY;
1054 }
1055
1056 /* Log the current clock configuration */
1057 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
1058 dw24.field.ts_pll_enable ? "enabled" : "disabled",
1059 ice_clk_src_str(dw24.field.time_ref_sel),
1060 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
1061 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
1062
1063 return 0;
1064}
1065
1066/**
1067 * ice_init_cgu_e82x - Initialize CGU with settings from firmware
1068 * @hw: pointer to the HW structure
1069 *
1070 * Initialize the Clock Generation Unit of the E822 device.
1071 */
1072static int ice_init_cgu_e82x(struct ice_hw *hw)
1073{
1074 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
1075 union tspll_cntr_bist_settings cntr_bist;
1076 int err;
1077
1078 err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
1079 &cntr_bist.val);
1080 if (err)
1081 return err;
1082
1083 /* Disable sticky lock detection so lock err reported is accurate */
1084 cntr_bist.field.i_plllock_sel_0 = 0;
1085 cntr_bist.field.i_plllock_sel_1 = 0;
1086
1087 err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
1088 cntr_bist.val);
1089 if (err)
1090 return err;
1091
1092 /* Configure the CGU PLL using the parameters from the function
1093 * capabilities.
1094 */
1095 err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
1096 (enum ice_clk_src)ts_info->clk_src);
1097 if (err)
1098 return err;
1099
1100 return 0;
1101}
1102
1103/**
1104 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
1105 * @hw: pointer to the HW struct
1106 *
1107 * Set the window length used for the vernier port calibration process.
1108 */
1109static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
1110{
1111 u8 port;
1112
1113 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1114 int err;
1115
1116 err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
1117 PTP_VERNIER_WL);
1118 if (err) {
1119 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
1120 port, err);
1121 return err;
1122 }
1123 }
1124
1125 return 0;
1126}
1127
1128/**
1129 * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
1130 * @hw: pointer to HW struct
1131 *
1132 * Perform PHC initialization steps specific to E822 devices.
1133 */
1134static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
1135{
1136 int err;
1137 u32 regval;
1138
1139 /* Enable reading switch and PHY registers over the sideband queue */
1140#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
1141#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
1142 regval = rd32(hw, PF_SB_REM_DEV_CTL);
1143 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
1144 PF_SB_REM_DEV_CTL_PHY0);
1145 wr32(hw, PF_SB_REM_DEV_CTL, regval);
1146
1147 /* Initialize the Clock Generation Unit */
1148 err = ice_init_cgu_e82x(hw);
1149 if (err)
1150 return err;
1151
1152 /* Set window length for all the ports */
1153 return ice_ptp_set_vernier_wl(hw);
1154}
1155
1156/**
1157 * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
1158 * @hw: pointer to the HW struct
1159 * @time: Time to initialize the PHY port clocks to
1160 *
1161 * Program the PHY port registers with a new initial time value. The port
1162 * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
1163 * command. The time value is the upper 32 bits of the PHY timer, usually in
1164 * units of nominal nanoseconds.
1165 */
1166static int
1167ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
1168{
1169 u64 phy_time;
1170 u8 port;
1171 int err;
1172
1173 /* The time represents the upper 32 bits of the PHY timer, so we need
1174 * to shift to account for this when programming.
1175 */
1176 phy_time = (u64)time << 32;
1177
1178 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1179 /* Tx case */
1180 err = ice_write_64b_phy_reg_e82x(hw, port,
1181 P_REG_TX_TIMER_INC_PRE_L,
1182 phy_time);
1183 if (err)
1184 goto exit_err;
1185
1186 /* Rx case */
1187 err = ice_write_64b_phy_reg_e82x(hw, port,
1188 P_REG_RX_TIMER_INC_PRE_L,
1189 phy_time);
1190 if (err)
1191 goto exit_err;
1192 }
1193
1194 return 0;
1195
1196exit_err:
1197 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1198 port, err);
1199
1200 return err;
1201}
1202
1203/**
1204 * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
1205 * @hw: pointer to HW struct
1206 * @port: Port number to be programmed
1207 * @time: time in cycles to adjust the port Tx and Rx clocks
1208 *
1209 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1210 * registers. The atomic adjustment won't be completed until the driver issues
1211 * an ICE_PTP_ADJ_TIME command.
1212 *
1213 * Note that time is not in units of nanoseconds. It is in clock time
1214 * including the lower sub-nanosecond portion of the port timer.
1215 *
1216 * Negative adjustments are supported using 2s complement arithmetic.
1217 */
1218static int
1219ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
1220{
1221 u32 l_time, u_time;
1222 int err;
1223
1224 l_time = lower_32_bits(time);
1225 u_time = upper_32_bits(time);
1226
1227 /* Tx case */
1228 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1229 l_time);
1230 if (err)
1231 goto exit_err;
1232
1233 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1234 u_time);
1235 if (err)
1236 goto exit_err;
1237
1238 /* Rx case */
1239 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1240 l_time);
1241 if (err)
1242 goto exit_err;
1243
1244 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1245 u_time);
1246 if (err)
1247 goto exit_err;
1248
1249 return 0;
1250
1251exit_err:
1252 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1253 port, err);
1254 return err;
1255}
1256
1257/**
1258 * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
1259 * @hw: pointer to HW struct
1260 * @adj: adjustment in nanoseconds
1261 *
1262 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1263 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1264 * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
1265 */
1266static int
1267ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
1268{
1269 s64 cycles;
1270 u8 port;
1271
1272 /* The port clock supports adjustment of the sub-nanosecond portion of
1273 * the clock. We shift the provided adjustment in nanoseconds to
1274 * calculate the appropriate adjustment to program into the PHY ports.
1275 */
1276 if (adj > 0)
1277 cycles = (s64)adj << 32;
1278 else
1279 cycles = -(((s64)-adj) << 32);
1280
1281 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1282 int err;
1283
1284 err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
1285 if (err)
1286 return err;
1287 }
1288
1289 return 0;
1290}
1291
1292/**
1293 * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
1294 * @hw: pointer to HW struct
1295 * @incval: new increment value to prepare
1296 *
1297 * Prepare each of the PHY ports for a new increment value by programming the
1298 * port's TIMETUS registers. The new increment value will be updated after
1299 * issuing an ICE_PTP_INIT_INCVAL command.
1300 */
1301static int
1302ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
1303{
1304 int err;
1305 u8 port;
1306
1307 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1308 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
1309 incval);
1310 if (err)
1311 goto exit_err;
1312 }
1313
1314 return 0;
1315
1316exit_err:
1317 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1318 port, err);
1319
1320 return err;
1321}
1322
1323/**
1324 * ice_ptp_read_port_capture - Read a port's local time capture
1325 * @hw: pointer to HW struct
1326 * @port: Port number to read
1327 * @tx_ts: on return, the Tx port time capture
1328 * @rx_ts: on return, the Rx port time capture
1329 *
1330 * Read the port's Tx and Rx local time capture values.
1331 *
1332 * Note this has no equivalent for the E810 devices.
1333 */
1334static int
1335ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1336{
1337 int err;
1338
1339 /* Tx case */
1340 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1341 if (err) {
1342 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1343 err);
1344 return err;
1345 }
1346
1347 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1348 (unsigned long long)*tx_ts);
1349
1350 /* Rx case */
1351 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1352 if (err) {
1353 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1354 err);
1355 return err;
1356 }
1357
1358 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1359 (unsigned long long)*rx_ts);
1360
1361 return 0;
1362}
1363
1364/**
1365 * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
1366 * @hw: pointer to HW struct
1367 * @port: Port to which cmd has to be sent
1368 * @cmd: Command to be sent to the port
1369 *
1370 * Prepare the requested port for an upcoming timer sync command.
1371 *
1372 * Do not use this function directly. If you want to configure exactly one
1373 * port, use ice_ptp_one_port_cmd() instead.
1374 */
1375static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
1376 enum ice_ptp_tmr_cmd cmd)
1377{
1378 u32 cmd_val, val;
1379 u8 tmr_idx;
1380 int err;
1381
1382 tmr_idx = ice_get_ptp_src_clock_index(hw);
1383 cmd_val = tmr_idx << SEL_PHY_SRC;
1384 switch (cmd) {
1385 case ICE_PTP_INIT_TIME:
1386 cmd_val |= PHY_CMD_INIT_TIME;
1387 break;
1388 case ICE_PTP_INIT_INCVAL:
1389 cmd_val |= PHY_CMD_INIT_INCVAL;
1390 break;
1391 case ICE_PTP_ADJ_TIME:
1392 cmd_val |= PHY_CMD_ADJ_TIME;
1393 break;
1394 case ICE_PTP_READ_TIME:
1395 cmd_val |= PHY_CMD_READ_TIME;
1396 break;
1397 case ICE_PTP_ADJ_TIME_AT_TIME:
1398 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1399 break;
1400 case ICE_PTP_NOP:
1401 break;
1402 }
1403
1404 /* Tx case */
1405 /* Read, modify, write */
1406 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
1407 if (err) {
1408 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1409 err);
1410 return err;
1411 }
1412
1413 /* Modify necessary bits only and perform write */
1414 val &= ~TS_CMD_MASK;
1415 val |= cmd_val;
1416
1417 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
1418 if (err) {
1419 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1420 err);
1421 return err;
1422 }
1423
1424 /* Rx case */
1425 /* Read, modify, write */
1426 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
1427 if (err) {
1428 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1429 err);
1430 return err;
1431 }
1432
1433 /* Modify necessary bits only and perform write */
1434 val &= ~TS_CMD_MASK;
1435 val |= cmd_val;
1436
1437 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
1438 if (err) {
1439 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1440 err);
1441 return err;
1442 }
1443
1444 return 0;
1445}
1446
1447/**
1448 * ice_ptp_one_port_cmd - Prepare one port for a timer command
1449 * @hw: pointer to the HW struct
1450 * @configured_port: the port to configure with configured_cmd
1451 * @configured_cmd: timer command to prepare on the configured_port
1452 *
1453 * Prepare the configured_port for the configured_cmd, and prepare all other
1454 * ports for ICE_PTP_NOP. This causes the configured_port to execute the
1455 * desired command while all other ports perform no operation.
1456 */
1457static int
1458ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
1459 enum ice_ptp_tmr_cmd configured_cmd)
1460{
1461 u8 port;
1462
1463 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1464 enum ice_ptp_tmr_cmd cmd;
1465 int err;
1466
1467 if (port == configured_port)
1468 cmd = configured_cmd;
1469 else
1470 cmd = ICE_PTP_NOP;
1471
1472 err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
1473 if (err)
1474 return err;
1475 }
1476
1477 return 0;
1478}
1479
1480/**
1481 * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
1482 * @hw: pointer to the HW struct
1483 * @cmd: timer command to prepare
1484 *
1485 * Prepare all ports connected to this device for an upcoming timer sync
1486 * command.
1487 */
1488static int
1489ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1490{
1491 u8 port;
1492
1493 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1494 int err;
1495
1496 err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
1497 if (err)
1498 return err;
1499 }
1500
1501 return 0;
1502}
1503
1504/* E822 Vernier calibration functions
1505 *
1506 * The following functions are used as part of the vernier calibration of
1507 * a port. This calibration increases the precision of the timestamps on the
1508 * port.
1509 */
1510
1511/**
1512 * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
1513 * @hw: pointer to HW struct
1514 * @port: the port to read from
1515 * @link_out: if non-NULL, holds link speed on success
1516 * @fec_out: if non-NULL, holds FEC algorithm on success
1517 *
1518 * Read the serdes data for the PHY port and extract the link speed and FEC
1519 * algorithm.
1520 */
1521static int
1522ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
1523 enum ice_ptp_link_spd *link_out,
1524 enum ice_ptp_fec_mode *fec_out)
1525{
1526 enum ice_ptp_link_spd link;
1527 enum ice_ptp_fec_mode fec;
1528 u32 serdes;
1529 int err;
1530
1531 err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
1532 if (err) {
1533 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1534 return err;
1535 }
1536
1537 /* Determine the FEC algorithm */
1538 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1539
1540 serdes &= P_REG_LINK_SPEED_SERDES_M;
1541
1542 /* Determine the link speed */
1543 if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1544 switch (serdes) {
1545 case ICE_PTP_SERDES_25G:
1546 link = ICE_PTP_LNK_SPD_25G_RS;
1547 break;
1548 case ICE_PTP_SERDES_50G:
1549 link = ICE_PTP_LNK_SPD_50G_RS;
1550 break;
1551 case ICE_PTP_SERDES_100G:
1552 link = ICE_PTP_LNK_SPD_100G_RS;
1553 break;
1554 default:
1555 return -EIO;
1556 }
1557 } else {
1558 switch (serdes) {
1559 case ICE_PTP_SERDES_1G:
1560 link = ICE_PTP_LNK_SPD_1G;
1561 break;
1562 case ICE_PTP_SERDES_10G:
1563 link = ICE_PTP_LNK_SPD_10G;
1564 break;
1565 case ICE_PTP_SERDES_25G:
1566 link = ICE_PTP_LNK_SPD_25G;
1567 break;
1568 case ICE_PTP_SERDES_40G:
1569 link = ICE_PTP_LNK_SPD_40G;
1570 break;
1571 case ICE_PTP_SERDES_50G:
1572 link = ICE_PTP_LNK_SPD_50G;
1573 break;
1574 default:
1575 return -EIO;
1576 }
1577 }
1578
1579 if (link_out)
1580 *link_out = link;
1581 if (fec_out)
1582 *fec_out = fec;
1583
1584 return 0;
1585}
1586
1587/**
1588 * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
1589 * @hw: pointer to HW struct
1590 * @port: to configure the quad for
1591 */
1592static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
1593{
1594 enum ice_ptp_link_spd link_spd;
1595 int err;
1596 u32 val;
1597 u8 quad;
1598
1599 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
1600 if (err) {
1601 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1602 err);
1603 return;
1604 }
1605
1606 quad = port / ICE_PORTS_PER_QUAD;
1607
1608 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1609 if (err) {
1610 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1611 err);
1612 return;
1613 }
1614
1615 if (link_spd >= ICE_PTP_LNK_SPD_40G)
1616 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1617 else
1618 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1619
1620 err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1621 if (err) {
1622 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1623 err);
1624 return;
1625 }
1626}
1627
1628/**
1629 * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
1630 * @hw: pointer to the HW structure
1631 * @port: the port to configure
1632 *
1633 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1634 * hardware clock time units (TUs). That is, determine the number of TUs per
1635 * serdes unit interval, and program the UIX registers with this conversion.
1636 *
1637 * This conversion is used as part of the calibration process when determining
1638 * the additional error of a timestamp vs the real time of transmission or
1639 * receipt of the packet.
1640 *
1641 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1642 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1643 *
1644 * To calculate the conversion ratio, we use the following facts:
1645 *
1646 * a) the clock frequency in Hz (cycles per second)
1647 * b) the number of TUs per cycle (the increment value of the clock)
1648 * c) 1 second per 1 billion nanoseconds
1649 * d) the duration of 66 UIs in nanoseconds
1650 *
1651 * Given these facts, we can use the following table to work out what ratios
1652 * to multiply in order to get the number of TUs per 66 UIs:
1653 *
1654 * cycles | 1 second | incval (TUs) | nanoseconds
1655 * -------+--------------+--------------+-------------
1656 * second | 1 billion ns | cycle | 66 UIs
1657 *
1658 * To perform the multiplication using integers without too much loss of
1659 * precision, we can take use the following equation:
1660 *
1661 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1662 *
1663 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1664 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1665 *
1666 * The increment value has a maximum expected range of about 34 bits, while
1667 * the frequency value is about 29 bits. Multiplying these values shouldn't
1668 * overflow the 64 bits. However, we must then further multiply them again by
1669 * the Serdes unit interval duration. To avoid overflow here, we split the
1670 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1671 * a divide by 390,625,000. This does lose some precision, but avoids
1672 * miscalculation due to arithmetic overflow.
1673 */
1674static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
1675{
1676 u64 cur_freq, clk_incval, tu_per_sec, uix;
1677 int err;
1678
1679 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1680 clk_incval = ice_ptp_read_src_incval(hw);
1681
1682 /* Calculate TUs per second divided by 256 */
1683 tu_per_sec = (cur_freq * clk_incval) >> 8;
1684
1685#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1686#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1687
1688 /* Program the 10Gb/40Gb conversion ratio */
1689 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1690
1691 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
1692 uix);
1693 if (err) {
1694 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1695 err);
1696 return err;
1697 }
1698
1699 /* Program the 25Gb/100Gb conversion ratio */
1700 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1701
1702 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
1703 uix);
1704 if (err) {
1705 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1706 err);
1707 return err;
1708 }
1709
1710 return 0;
1711}
1712
1713/**
1714 * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
1715 * @hw: pointer to the HW struct
1716 * @port: port to configure
1717 *
1718 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1719 * timestamp calibration process. This depends on the link speed, as the PHY
1720 * uses different markers depending on the speed.
1721 *
1722 * 1Gb/10Gb/25Gb:
1723 * - Tx/Rx PAR/PCS markers
1724 *
1725 * 25Gb RS:
1726 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1727 *
1728 * 40Gb/50Gb:
1729 * - Tx/Rx PAR/PCS markers
1730 * - Rx Deskew PAR/PCS markers
1731 *
1732 * 50G RS and 100GB RS:
1733 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1734 * - Rx Deskew PAR/PCS markers
1735 * - Tx PAR/PCS markers
1736 *
1737 * To calculate the conversion, we use the PHC clock frequency (cycles per
1738 * second), the increment value (TUs per cycle), and the related PHY clock
1739 * frequency to calculate the TUs per unit of the PHY link clock. The
1740 * following table shows how the units convert:
1741 *
1742 * cycles | TUs | second
1743 * -------+-------+--------
1744 * second | cycle | cycles
1745 *
1746 * For each conversion register, look up the appropriate frequency from the
1747 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1748 * this to the appropriate register, preparing hardware to perform timestamp
1749 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1750 * in order to calibrate for the internal PHY delays.
1751 *
1752 * Note that the increment value ranges up to ~34 bits, and the clock
1753 * frequency is ~29 bits, so multiplying them together should fit within the
1754 * 64 bit arithmetic.
1755 */
1756static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
1757{
1758 u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1759 enum ice_ptp_link_spd link_spd;
1760 enum ice_ptp_fec_mode fec_mode;
1761 int err;
1762
1763 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
1764 if (err)
1765 return err;
1766
1767 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1768 clk_incval = ice_ptp_read_src_incval(hw);
1769
1770 /* Calculate TUs per cycle of the PHC clock */
1771 tu_per_sec = cur_freq * clk_incval;
1772
1773 /* For each PHY conversion register, look up the appropriate link
1774 * speed frequency and determine the TUs per that clock's cycle time.
1775 * Split this into a high and low value and then program the
1776 * appropriate register. If that link speed does not use the
1777 * associated register, write zeros to clear it instead.
1778 */
1779
1780 /* P_REG_PAR_TX_TUS */
1781 if (e822_vernier[link_spd].tx_par_clk)
1782 phy_tus = div_u64(tu_per_sec,
1783 e822_vernier[link_spd].tx_par_clk);
1784 else
1785 phy_tus = 0;
1786
1787 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
1788 phy_tus);
1789 if (err)
1790 return err;
1791
1792 /* P_REG_PAR_RX_TUS */
1793 if (e822_vernier[link_spd].rx_par_clk)
1794 phy_tus = div_u64(tu_per_sec,
1795 e822_vernier[link_spd].rx_par_clk);
1796 else
1797 phy_tus = 0;
1798
1799 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
1800 phy_tus);
1801 if (err)
1802 return err;
1803
1804 /* P_REG_PCS_TX_TUS */
1805 if (e822_vernier[link_spd].tx_pcs_clk)
1806 phy_tus = div_u64(tu_per_sec,
1807 e822_vernier[link_spd].tx_pcs_clk);
1808 else
1809 phy_tus = 0;
1810
1811 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
1812 phy_tus);
1813 if (err)
1814 return err;
1815
1816 /* P_REG_PCS_RX_TUS */
1817 if (e822_vernier[link_spd].rx_pcs_clk)
1818 phy_tus = div_u64(tu_per_sec,
1819 e822_vernier[link_spd].rx_pcs_clk);
1820 else
1821 phy_tus = 0;
1822
1823 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
1824 phy_tus);
1825 if (err)
1826 return err;
1827
1828 /* P_REG_DESK_PAR_TX_TUS */
1829 if (e822_vernier[link_spd].tx_desk_rsgb_par)
1830 phy_tus = div_u64(tu_per_sec,
1831 e822_vernier[link_spd].tx_desk_rsgb_par);
1832 else
1833 phy_tus = 0;
1834
1835 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1836 phy_tus);
1837 if (err)
1838 return err;
1839
1840 /* P_REG_DESK_PAR_RX_TUS */
1841 if (e822_vernier[link_spd].rx_desk_rsgb_par)
1842 phy_tus = div_u64(tu_per_sec,
1843 e822_vernier[link_spd].rx_desk_rsgb_par);
1844 else
1845 phy_tus = 0;
1846
1847 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1848 phy_tus);
1849 if (err)
1850 return err;
1851
1852 /* P_REG_DESK_PCS_TX_TUS */
1853 if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1854 phy_tus = div_u64(tu_per_sec,
1855 e822_vernier[link_spd].tx_desk_rsgb_pcs);
1856 else
1857 phy_tus = 0;
1858
1859 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1860 phy_tus);
1861 if (err)
1862 return err;
1863
1864 /* P_REG_DESK_PCS_RX_TUS */
1865 if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1866 phy_tus = div_u64(tu_per_sec,
1867 e822_vernier[link_spd].rx_desk_rsgb_pcs);
1868 else
1869 phy_tus = 0;
1870
1871 return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1872 phy_tus);
1873}
1874
1875/**
1876 * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
1877 * @hw: pointer to the HW struct
1878 * @link_spd: the Link speed to calculate for
1879 *
1880 * Calculate the fixed offset due to known static latency data.
1881 */
1882static u64
1883ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1884{
1885 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1886
1887 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1888 clk_incval = ice_ptp_read_src_incval(hw);
1889
1890 /* Calculate TUs per second */
1891 tu_per_sec = cur_freq * clk_incval;
1892
1893 /* Calculate number of TUs to add for the fixed Tx latency. Since the
1894 * latency measurement is in 1/100th of a nanosecond, we need to
1895 * multiply by tu_per_sec and then divide by 1e11. This calculation
1896 * overflows 64 bit integer arithmetic, so break it up into two
1897 * divisions by 1e4 first then by 1e7.
1898 */
1899 fixed_offset = div_u64(tu_per_sec, 10000);
1900 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1901 fixed_offset = div_u64(fixed_offset, 10000000);
1902
1903 return fixed_offset;
1904}
1905
1906/**
1907 * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
1908 * @hw: pointer to the HW struct
1909 * @port: the PHY port to configure
1910 *
1911 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1912 * adjust Tx timestamps by. This is calculated by combining some known static
1913 * latency along with the Vernier offset computations done by hardware.
1914 *
1915 * This function will not return successfully until the Tx offset calculations
1916 * have been completed, which requires waiting until at least one packet has
1917 * been transmitted by the device. It is safe to call this function
1918 * periodically until calibration succeeds, as it will only program the offset
1919 * once.
1920 *
1921 * To avoid overflow, when calculating the offset based on the known static
1922 * latency values, we use measurements in 1/100th of a nanosecond, and divide
1923 * the TUs per second up front. This avoids overflow while allowing
1924 * calculation of the adjustment using integer arithmetic.
1925 *
1926 * Returns zero on success, -EBUSY if the hardware vernier offset
1927 * calibration has not completed, or another error code on failure.
1928 */
1929int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
1930{
1931 enum ice_ptp_link_spd link_spd;
1932 enum ice_ptp_fec_mode fec_mode;
1933 u64 total_offset, val;
1934 int err;
1935 u32 reg;
1936
1937 /* Nothing to do if we've already programmed the offset */
1938 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®);
1939 if (err) {
1940 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
1941 port, err);
1942 return err;
1943 }
1944
1945 if (reg)
1946 return 0;
1947
1948 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®);
1949 if (err) {
1950 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
1951 port, err);
1952 return err;
1953 }
1954
1955 if (!(reg & P_REG_TX_OV_STATUS_OV_M))
1956 return -EBUSY;
1957
1958 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
1959 if (err)
1960 return err;
1961
1962 total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
1963
1964 /* Read the first Vernier offset from the PHY register and add it to
1965 * the total offset.
1966 */
1967 if (link_spd == ICE_PTP_LNK_SPD_1G ||
1968 link_spd == ICE_PTP_LNK_SPD_10G ||
1969 link_spd == ICE_PTP_LNK_SPD_25G ||
1970 link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1971 link_spd == ICE_PTP_LNK_SPD_40G ||
1972 link_spd == ICE_PTP_LNK_SPD_50G) {
1973 err = ice_read_64b_phy_reg_e82x(hw, port,
1974 P_REG_PAR_PCS_TX_OFFSET_L,
1975 &val);
1976 if (err)
1977 return err;
1978
1979 total_offset += val;
1980 }
1981
1982 /* For Tx, we only need to use the second Vernier offset for
1983 * multi-lane link speeds with RS-FEC. The lanes will always be
1984 * aligned.
1985 */
1986 if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1987 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1988 err = ice_read_64b_phy_reg_e82x(hw, port,
1989 P_REG_PAR_TX_TIME_L,
1990 &val);
1991 if (err)
1992 return err;
1993
1994 total_offset += val;
1995 }
1996
1997 /* Now that the total offset has been calculated, program it to the
1998 * PHY and indicate that the Tx offset is ready. After this,
1999 * timestamps will be enabled.
2000 */
2001 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
2002 total_offset);
2003 if (err)
2004 return err;
2005
2006 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
2007 if (err)
2008 return err;
2009
2010 dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
2011 port);
2012
2013 return 0;
2014}
2015
2016/**
2017 * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
2018 * @hw: pointer to the HW struct
2019 * @port: the PHY port to adjust for
2020 * @link_spd: the current link speed of the PHY
2021 * @fec_mode: the current FEC mode of the PHY
2022 * @pmd_adj: on return, the amount to adjust the Rx total offset by
2023 *
2024 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
2025 * This varies by link speed and FEC mode. The value calculated accounts for
2026 * various delays caused when receiving a packet.
2027 */
2028static int
2029ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
2030 enum ice_ptp_link_spd link_spd,
2031 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
2032{
2033 u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
2034 u8 pmd_align;
2035 u32 val;
2036 int err;
2037
2038 err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
2039 if (err) {
2040 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
2041 err);
2042 return err;
2043 }
2044
2045 pmd_align = (u8)val;
2046
2047 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
2048 clk_incval = ice_ptp_read_src_incval(hw);
2049
2050 /* Calculate TUs per second */
2051 tu_per_sec = cur_freq * clk_incval;
2052
2053 /* The PMD alignment adjustment measurement depends on the link speed,
2054 * and whether FEC is enabled. For each link speed, the alignment
2055 * adjustment is calculated by dividing a value by the length of
2056 * a Time Unit in nanoseconds.
2057 *
2058 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
2059 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
2060 * 10G w/FEC: align * 0.1 * 32/33
2061 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
2062 * 25G w/FEC: align * 0.4 * 32/33
2063 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
2064 * 40G w/FEC: align * 0.1 * 32/33
2065 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
2066 * 50G w/FEC: align * 0.8 * 32/33
2067 *
2068 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
2069 *
2070 * To allow for calculating this value using integer arithmetic, we
2071 * instead start with the number of TUs per second, (inverse of the
2072 * length of a Time Unit in nanoseconds), multiply by a value based
2073 * on the PMD alignment register, and then divide by the right value
2074 * calculated based on the table above. To avoid integer overflow this
2075 * division is broken up into a step of dividing by 125 first.
2076 */
2077 if (link_spd == ICE_PTP_LNK_SPD_1G) {
2078 if (pmd_align == 4)
2079 mult = 10;
2080 else
2081 mult = (pmd_align + 6) % 10;
2082 } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
2083 link_spd == ICE_PTP_LNK_SPD_25G ||
2084 link_spd == ICE_PTP_LNK_SPD_40G ||
2085 link_spd == ICE_PTP_LNK_SPD_50G) {
2086 /* If Clause 74 FEC, always calculate PMD adjust */
2087 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
2088 mult = pmd_align;
2089 else
2090 mult = 0;
2091 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
2092 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2093 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2094 if (pmd_align < 17)
2095 mult = pmd_align + 40;
2096 else
2097 mult = pmd_align;
2098 } else {
2099 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
2100 link_spd);
2101 mult = 0;
2102 }
2103
2104 /* In some cases, there's no need to adjust for the PMD alignment */
2105 if (!mult) {
2106 *pmd_adj = 0;
2107 return 0;
2108 }
2109
2110 /* Calculate the adjustment by multiplying TUs per second by the
2111 * appropriate multiplier and divisor. To avoid overflow, we first
2112 * divide by 125, and then handle remaining divisor based on the link
2113 * speed pmd_adj_divisor value.
2114 */
2115 adj = div_u64(tu_per_sec, 125);
2116 adj *= mult;
2117 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
2118
2119 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
2120 * cycle count is necessary.
2121 */
2122 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
2123 u64 cycle_adj;
2124 u8 rx_cycle;
2125
2126 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
2127 &val);
2128 if (err) {
2129 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
2130 err);
2131 return err;
2132 }
2133
2134 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
2135 if (rx_cycle) {
2136 mult = (4 - rx_cycle) * 40;
2137
2138 cycle_adj = div_u64(tu_per_sec, 125);
2139 cycle_adj *= mult;
2140 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2141
2142 adj += cycle_adj;
2143 }
2144 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
2145 u64 cycle_adj;
2146 u8 rx_cycle;
2147
2148 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
2149 &val);
2150 if (err) {
2151 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
2152 err);
2153 return err;
2154 }
2155
2156 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
2157 if (rx_cycle) {
2158 mult = rx_cycle * 40;
2159
2160 cycle_adj = div_u64(tu_per_sec, 125);
2161 cycle_adj *= mult;
2162 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2163
2164 adj += cycle_adj;
2165 }
2166 }
2167
2168 /* Return the calculated adjustment */
2169 *pmd_adj = adj;
2170
2171 return 0;
2172}
2173
2174/**
2175 * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
2176 * @hw: pointer to HW struct
2177 * @link_spd: The Link speed to calculate for
2178 *
2179 * Determine the fixed Rx latency for a given link speed.
2180 */
2181static u64
2182ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
2183{
2184 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
2185
2186 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
2187 clk_incval = ice_ptp_read_src_incval(hw);
2188
2189 /* Calculate TUs per second */
2190 tu_per_sec = cur_freq * clk_incval;
2191
2192 /* Calculate number of TUs to add for the fixed Rx latency. Since the
2193 * latency measurement is in 1/100th of a nanosecond, we need to
2194 * multiply by tu_per_sec and then divide by 1e11. This calculation
2195 * overflows 64 bit integer arithmetic, so break it up into two
2196 * divisions by 1e4 first then by 1e7.
2197 */
2198 fixed_offset = div_u64(tu_per_sec, 10000);
2199 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2200 fixed_offset = div_u64(fixed_offset, 10000000);
2201
2202 return fixed_offset;
2203}
2204
2205/**
2206 * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
2207 * @hw: pointer to the HW struct
2208 * @port: the PHY port to configure
2209 *
2210 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2211 * adjust Rx timestamps by. This combines calculations from the Vernier offset
2212 * measurements taken in hardware with some data about known fixed delay as
2213 * well as adjusting for multi-lane alignment delay.
2214 *
2215 * This function will not return successfully until the Rx offset calculations
2216 * have been completed, which requires waiting until at least one packet has
2217 * been received by the device. It is safe to call this function periodically
2218 * until calibration succeeds, as it will only program the offset once.
2219 *
2220 * This function must be called only after the offset registers are valid,
2221 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2222 * has measured the offset.
2223 *
2224 * To avoid overflow, when calculating the offset based on the known static
2225 * latency values, we use measurements in 1/100th of a nanosecond, and divide
2226 * the TUs per second up front. This avoids overflow while allowing
2227 * calculation of the adjustment using integer arithmetic.
2228 *
2229 * Returns zero on success, -EBUSY if the hardware vernier offset
2230 * calibration has not completed, or another error code on failure.
2231 */
2232int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
2233{
2234 enum ice_ptp_link_spd link_spd;
2235 enum ice_ptp_fec_mode fec_mode;
2236 u64 total_offset, pmd, val;
2237 int err;
2238 u32 reg;
2239
2240 /* Nothing to do if we've already programmed the offset */
2241 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®);
2242 if (err) {
2243 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
2244 port, err);
2245 return err;
2246 }
2247
2248 if (reg)
2249 return 0;
2250
2251 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®);
2252 if (err) {
2253 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2254 port, err);
2255 return err;
2256 }
2257
2258 if (!(reg & P_REG_RX_OV_STATUS_OV_M))
2259 return -EBUSY;
2260
2261 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
2262 if (err)
2263 return err;
2264
2265 total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
2266
2267 /* Read the first Vernier offset from the PHY register and add it to
2268 * the total offset.
2269 */
2270 err = ice_read_64b_phy_reg_e82x(hw, port,
2271 P_REG_PAR_PCS_RX_OFFSET_L,
2272 &val);
2273 if (err)
2274 return err;
2275
2276 total_offset += val;
2277
2278 /* For Rx, all multi-lane link speeds include a second Vernier
2279 * calibration, because the lanes might not be aligned.
2280 */
2281 if (link_spd == ICE_PTP_LNK_SPD_40G ||
2282 link_spd == ICE_PTP_LNK_SPD_50G ||
2283 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2284 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2285 err = ice_read_64b_phy_reg_e82x(hw, port,
2286 P_REG_PAR_RX_TIME_L,
2287 &val);
2288 if (err)
2289 return err;
2290
2291 total_offset += val;
2292 }
2293
2294 /* In addition, Rx must account for the PMD alignment */
2295 err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
2296 if (err)
2297 return err;
2298
2299 /* For RS-FEC, this adjustment adds delay, but for other modes, it
2300 * subtracts delay.
2301 */
2302 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2303 total_offset += pmd;
2304 else
2305 total_offset -= pmd;
2306
2307 /* Now that the total offset has been calculated, program it to the
2308 * PHY and indicate that the Rx offset is ready. After this,
2309 * timestamps will be enabled.
2310 */
2311 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2312 total_offset);
2313 if (err)
2314 return err;
2315
2316 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
2317 if (err)
2318 return err;
2319
2320 dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
2321 port);
2322
2323 return 0;
2324}
2325
2326/**
2327 * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
2328 * @hw: pointer to the HW struct
2329 * @port: the PHY port to read
2330 * @phy_time: on return, the 64bit PHY timer value
2331 * @phc_time: on return, the lower 64bits of PHC time
2332 *
2333 * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
2334 * and PHC timer values.
2335 */
2336static int
2337ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
2338 u64 *phc_time)
2339{
2340 u64 tx_time, rx_time;
2341 u32 zo, lo;
2342 u8 tmr_idx;
2343 int err;
2344
2345 tmr_idx = ice_get_ptp_src_clock_index(hw);
2346
2347 /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
2348 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2349
2350 /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
2351 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
2352 if (err)
2353 return err;
2354
2355 /* Issue the sync to start the ICE_PTP_READ_TIME capture */
2356 ice_ptp_exec_tmr_cmd(hw);
2357
2358 /* Read the captured PHC time from the shadow time registers */
2359 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2360 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2361 *phc_time = (u64)lo << 32 | zo;
2362
2363 /* Read the captured PHY time from the PHY shadow registers */
2364 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2365 if (err)
2366 return err;
2367
2368 /* If the PHY Tx and Rx timers don't match, log a warning message.
2369 * Note that this should not happen in normal circumstances since the
2370 * driver always programs them together.
2371 */
2372 if (tx_time != rx_time)
2373 dev_warn(ice_hw_to_dev(hw),
2374 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2375 port, (unsigned long long)tx_time,
2376 (unsigned long long)rx_time);
2377
2378 *phy_time = tx_time;
2379
2380 return 0;
2381}
2382
2383/**
2384 * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
2385 * @hw: pointer to the HW struct
2386 * @port: the PHY port to synchronize
2387 *
2388 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2389 * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
2390 * simultaneous read of the PHY timer and PHC timer. Then we use the
2391 * difference to calculate an appropriate 2s complement addition to add
2392 * to the PHY timer in order to ensure it reads the same value as the
2393 * primary PHC timer.
2394 */
2395static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
2396{
2397 u64 phc_time, phy_time, difference;
2398 int err;
2399
2400 if (!ice_ptp_lock(hw)) {
2401 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2402 return -EBUSY;
2403 }
2404
2405 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
2406 if (err)
2407 goto err_unlock;
2408
2409 /* Calculate the amount required to add to the port time in order for
2410 * it to match the PHC time.
2411 *
2412 * Note that the port adjustment is done using 2s complement
2413 * arithmetic. This is convenient since it means that we can simply
2414 * calculate the difference between the PHC time and the port time,
2415 * and it will be interpreted correctly.
2416 */
2417 difference = phc_time - phy_time;
2418
2419 err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
2420 if (err)
2421 goto err_unlock;
2422
2423 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
2424 if (err)
2425 goto err_unlock;
2426
2427 /* Do not perform any action on the main timer */
2428 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2429
2430 /* Issue the sync to activate the time adjustment */
2431 ice_ptp_exec_tmr_cmd(hw);
2432
2433 /* Re-capture the timer values to flush the command registers and
2434 * verify that the time was properly adjusted.
2435 */
2436 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
2437 if (err)
2438 goto err_unlock;
2439
2440 dev_info(ice_hw_to_dev(hw),
2441 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2442 port, (unsigned long long)phy_time,
2443 (unsigned long long)phc_time);
2444
2445 ice_ptp_unlock(hw);
2446
2447 return 0;
2448
2449err_unlock:
2450 ice_ptp_unlock(hw);
2451 return err;
2452}
2453
2454/**
2455 * ice_stop_phy_timer_e82x - Stop the PHY clock timer
2456 * @hw: pointer to the HW struct
2457 * @port: the PHY port to stop
2458 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2459 *
2460 * Stop the clock of a PHY port. This must be done as part of the flow to
2461 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2462 * initialized or when link speed changes.
2463 */
2464int
2465ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
2466{
2467 int err;
2468 u32 val;
2469
2470 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
2471 if (err)
2472 return err;
2473
2474 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
2475 if (err)
2476 return err;
2477
2478 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
2479 if (err)
2480 return err;
2481
2482 val &= ~P_REG_PS_START_M;
2483 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2484 if (err)
2485 return err;
2486
2487 val &= ~P_REG_PS_ENA_CLK_M;
2488 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2489 if (err)
2490 return err;
2491
2492 if (soft_reset) {
2493 val |= P_REG_PS_SFT_RESET_M;
2494 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2495 if (err)
2496 return err;
2497 }
2498
2499 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2500
2501 return 0;
2502}
2503
2504/**
2505 * ice_start_phy_timer_e82x - Start the PHY clock timer
2506 * @hw: pointer to the HW struct
2507 * @port: the PHY port to start
2508 *
2509 * Start the clock of a PHY port. This must be done as part of the flow to
2510 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2511 * initialized or when link speed changes.
2512 *
2513 * Hardware will take Vernier measurements on Tx or Rx of packets.
2514 */
2515int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
2516{
2517 u32 lo, hi, val;
2518 u64 incval;
2519 u8 tmr_idx;
2520 int err;
2521
2522 tmr_idx = ice_get_ptp_src_clock_index(hw);
2523
2524 err = ice_stop_phy_timer_e82x(hw, port, false);
2525 if (err)
2526 return err;
2527
2528 ice_phy_cfg_lane_e82x(hw, port);
2529
2530 err = ice_phy_cfg_uix_e82x(hw, port);
2531 if (err)
2532 return err;
2533
2534 err = ice_phy_cfg_parpcs_e82x(hw, port);
2535 if (err)
2536 return err;
2537
2538 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2539 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2540 incval = (u64)hi << 32 | lo;
2541
2542 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
2543 if (err)
2544 return err;
2545
2546 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2547 if (err)
2548 return err;
2549
2550 /* Do not perform any action on the main timer */
2551 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2552
2553 ice_ptp_exec_tmr_cmd(hw);
2554
2555 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
2556 if (err)
2557 return err;
2558
2559 val |= P_REG_PS_SFT_RESET_M;
2560 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2561 if (err)
2562 return err;
2563
2564 val |= P_REG_PS_START_M;
2565 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2566 if (err)
2567 return err;
2568
2569 val &= ~P_REG_PS_SFT_RESET_M;
2570 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2571 if (err)
2572 return err;
2573
2574 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2575 if (err)
2576 return err;
2577
2578 ice_ptp_exec_tmr_cmd(hw);
2579
2580 val |= P_REG_PS_ENA_CLK_M;
2581 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2582 if (err)
2583 return err;
2584
2585 val |= P_REG_PS_LOAD_OFFSET_M;
2586 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2587 if (err)
2588 return err;
2589
2590 ice_ptp_exec_tmr_cmd(hw);
2591
2592 err = ice_sync_phy_timer_e82x(hw, port);
2593 if (err)
2594 return err;
2595
2596 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2597
2598 return 0;
2599}
2600
2601/**
2602 * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
2603 * @hw: pointer to the HW struct
2604 * @quad: the timestamp quad to read from
2605 * @tstamp_ready: contents of the Tx memory status register
2606 *
2607 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
2608 * the PHY are ready. A set bit means the corresponding timestamp is valid and
2609 * ready to be captured from the PHY timestamp block.
2610 */
2611static int
2612ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
2613{
2614 u32 hi, lo;
2615 int err;
2616
2617 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
2618 if (err) {
2619 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
2620 quad, err);
2621 return err;
2622 }
2623
2624 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
2625 if (err) {
2626 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
2627 quad, err);
2628 return err;
2629 }
2630
2631 *tstamp_ready = (u64)hi << 32 | (u64)lo;
2632
2633 return 0;
2634}
2635
2636/* E810 functions
2637 *
2638 * The following functions operate on the E810 series devices which use
2639 * a separate external PHY.
2640 */
2641
2642/**
2643 * ice_read_phy_reg_e810 - Read register from external PHY on E810
2644 * @hw: pointer to the HW struct
2645 * @addr: the address to read from
2646 * @val: On return, the value read from the PHY
2647 *
2648 * Read a register from the external PHY on the E810 device.
2649 */
2650static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2651{
2652 struct ice_sbq_msg_input msg = {0};
2653 int err;
2654
2655 msg.msg_addr_low = lower_16_bits(addr);
2656 msg.msg_addr_high = upper_16_bits(addr);
2657 msg.opcode = ice_sbq_msg_rd;
2658 msg.dest_dev = rmn_0;
2659
2660 err = ice_sbq_rw_reg(hw, &msg);
2661 if (err) {
2662 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2663 err);
2664 return err;
2665 }
2666
2667 *val = msg.data;
2668
2669 return 0;
2670}
2671
2672/**
2673 * ice_write_phy_reg_e810 - Write register on external PHY on E810
2674 * @hw: pointer to the HW struct
2675 * @addr: the address to writem to
2676 * @val: the value to write to the PHY
2677 *
2678 * Write a value to a register of the external PHY on the E810 device.
2679 */
2680static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2681{
2682 struct ice_sbq_msg_input msg = {0};
2683 int err;
2684
2685 msg.msg_addr_low = lower_16_bits(addr);
2686 msg.msg_addr_high = upper_16_bits(addr);
2687 msg.opcode = ice_sbq_msg_wr;
2688 msg.dest_dev = rmn_0;
2689 msg.data = val;
2690
2691 err = ice_sbq_rw_reg(hw, &msg);
2692 if (err) {
2693 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2694 err);
2695 return err;
2696 }
2697
2698 return 0;
2699}
2700
2701/**
2702 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2703 * @hw: pointer to the HW struct
2704 * @idx: the timestamp index to read
2705 * @hi: 8 bit timestamp high value
2706 * @lo: 32 bit timestamp low value
2707 *
2708 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2709 * timestamp block of the external PHY on the E810 device using the low latency
2710 * timestamp read.
2711 */
2712static int
2713ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2714{
2715 u32 val;
2716 u8 i;
2717
2718 /* Write TS index to read to the PF register so the FW can read it */
2719 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2720 wr32(hw, PF_SB_ATQBAL, val);
2721
2722 /* Read the register repeatedly until the FW provides us the TS */
2723 for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2724 val = rd32(hw, PF_SB_ATQBAL);
2725
2726 /* When the bit is cleared, the TS is ready in the register */
2727 if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2728 /* High 8 bit value of the TS is on the bits 16:23 */
2729 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2730
2731 /* Read the low 32 bit value and set the TS valid bit */
2732 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2733 return 0;
2734 }
2735
2736 udelay(10);
2737 }
2738
2739 /* FW failed to provide the TS in time */
2740 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2741 return -EINVAL;
2742}
2743
2744/**
2745 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2746 * @hw: pointer to the HW struct
2747 * @lport: the lport to read from
2748 * @idx: the timestamp index to read
2749 * @hi: 8 bit timestamp high value
2750 * @lo: 32 bit timestamp low value
2751 *
2752 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2753 * timestamp block of the external PHY on the E810 device using sideband queue.
2754 */
2755static int
2756ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2757 u32 *lo)
2758{
2759 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2760 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2761 u32 lo_val, hi_val;
2762 int err;
2763
2764 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2765 if (err) {
2766 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2767 err);
2768 return err;
2769 }
2770
2771 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2772 if (err) {
2773 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2774 err);
2775 return err;
2776 }
2777
2778 *lo = lo_val;
2779 *hi = (u8)hi_val;
2780
2781 return 0;
2782}
2783
2784/**
2785 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2786 * @hw: pointer to the HW struct
2787 * @lport: the lport to read from
2788 * @idx: the timestamp index to read
2789 * @tstamp: on return, the 40bit timestamp value
2790 *
2791 * Read a 40bit timestamp value out of the timestamp block of the external PHY
2792 * on the E810 device.
2793 */
2794static int
2795ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2796{
2797 u32 lo = 0;
2798 u8 hi = 0;
2799 int err;
2800
2801 if (hw->dev_caps.ts_dev_info.ts_ll_read)
2802 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2803 else
2804 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2805
2806 if (err)
2807 return err;
2808
2809 /* For E810 devices, the timestamp is reported with the lower 32 bits
2810 * in the low register, and the upper 8 bits in the high register.
2811 */
2812 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2813
2814 return 0;
2815}
2816
2817/**
2818 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2819 * @hw: pointer to the HW struct
2820 * @lport: the lport to read from
2821 * @idx: the timestamp index to reset
2822 *
2823 * Read the timestamp and then forcibly overwrite its value to clear the valid
2824 * bit from the timestamp block of the external PHY on the E810 device.
2825 *
2826 * This function should only be called on an idx whose bit is set according to
2827 * ice_get_phy_tx_tstamp_ready().
2828 */
2829static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2830{
2831 u32 lo_addr, hi_addr;
2832 u64 unused_tstamp;
2833 int err;
2834
2835 err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
2836 if (err) {
2837 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
2838 lport, idx, err);
2839 return err;
2840 }
2841
2842 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2843 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2844
2845 err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2846 if (err) {
2847 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
2848 lport, idx, err);
2849 return err;
2850 }
2851
2852 err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2853 if (err) {
2854 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
2855 lport, idx, err);
2856 return err;
2857 }
2858
2859 return 0;
2860}
2861
2862/**
2863 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2864 * @hw: pointer to HW struct
2865 *
2866 * Enable the timesync PTP functionality for the external PHY connected to
2867 * this function.
2868 */
2869int ice_ptp_init_phy_e810(struct ice_hw *hw)
2870{
2871 u8 tmr_idx;
2872 int err;
2873
2874 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2875 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2876 GLTSYN_ENA_TSYN_ENA_M);
2877 if (err)
2878 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2879 err);
2880
2881 return err;
2882}
2883
2884/**
2885 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2886 * @hw: pointer to HW struct
2887 *
2888 * Perform E810-specific PTP hardware clock initialization steps.
2889 */
2890static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2891{
2892 /* Ensure synchronization delay is zero */
2893 wr32(hw, GLTSYN_SYNC_DLAY, 0);
2894
2895 /* Initialize the PHY */
2896 return ice_ptp_init_phy_e810(hw);
2897}
2898
2899/**
2900 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2901 * @hw: Board private structure
2902 * @time: Time to initialize the PHY port clock to
2903 *
2904 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2905 * initial clock time. The time will not actually be programmed until the
2906 * driver issues an ICE_PTP_INIT_TIME command.
2907 *
2908 * The time value is the upper 32 bits of the PHY timer, usually in units of
2909 * nominal nanoseconds.
2910 */
2911static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2912{
2913 u8 tmr_idx;
2914 int err;
2915
2916 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2917 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2918 if (err) {
2919 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2920 err);
2921 return err;
2922 }
2923
2924 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2925 if (err) {
2926 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2927 err);
2928 return err;
2929 }
2930
2931 return 0;
2932}
2933
2934/**
2935 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2936 * @hw: pointer to HW struct
2937 * @adj: adjustment value to program
2938 *
2939 * Prepare the PHY port for an atomic adjustment by programming the PHY
2940 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2941 * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
2942 *
2943 * The adjustment value only contains the portion used for the upper 32bits of
2944 * the PHY timer, usually in units of nominal nanoseconds. Negative
2945 * adjustments are supported using 2s complement arithmetic.
2946 */
2947static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2948{
2949 u8 tmr_idx;
2950 int err;
2951
2952 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2953
2954 /* Adjustments are represented as signed 2's complement values in
2955 * nanoseconds. Sub-nanosecond adjustment is not supported.
2956 */
2957 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2958 if (err) {
2959 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2960 err);
2961 return err;
2962 }
2963
2964 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2965 if (err) {
2966 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2967 err);
2968 return err;
2969 }
2970
2971 return 0;
2972}
2973
2974/**
2975 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2976 * @hw: pointer to HW struct
2977 * @incval: The new 40bit increment value to prepare
2978 *
2979 * Prepare the PHY port for a new increment value by programming the PHY
2980 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2981 * completed by issuing an ICE_PTP_INIT_INCVAL command.
2982 */
2983static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2984{
2985 u32 high, low;
2986 u8 tmr_idx;
2987 int err;
2988
2989 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2990 low = lower_32_bits(incval);
2991 high = upper_32_bits(incval);
2992
2993 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2994 if (err) {
2995 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2996 err);
2997 return err;
2998 }
2999
3000 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
3001 if (err) {
3002 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
3003 err);
3004 return err;
3005 }
3006
3007 return 0;
3008}
3009
3010/**
3011 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
3012 * @hw: pointer to HW struct
3013 * @cmd: Command to be sent to the port
3014 *
3015 * Prepare the external PHYs connected to this device for a timer sync
3016 * command.
3017 */
3018static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3019{
3020 u32 cmd_val, val;
3021 int err;
3022
3023 switch (cmd) {
3024 case ICE_PTP_INIT_TIME:
3025 cmd_val = GLTSYN_CMD_INIT_TIME;
3026 break;
3027 case ICE_PTP_INIT_INCVAL:
3028 cmd_val = GLTSYN_CMD_INIT_INCVAL;
3029 break;
3030 case ICE_PTP_ADJ_TIME:
3031 cmd_val = GLTSYN_CMD_ADJ_TIME;
3032 break;
3033 case ICE_PTP_READ_TIME:
3034 cmd_val = GLTSYN_CMD_READ_TIME;
3035 break;
3036 case ICE_PTP_ADJ_TIME_AT_TIME:
3037 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
3038 break;
3039 case ICE_PTP_NOP:
3040 return 0;
3041 }
3042
3043 /* Read, modify, write */
3044 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
3045 if (err) {
3046 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
3047 return err;
3048 }
3049
3050 /* Modify necessary bits only and perform write */
3051 val &= ~TS_CMD_MASK_E810;
3052 val |= cmd_val;
3053
3054 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
3055 if (err) {
3056 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
3057 return err;
3058 }
3059
3060 return 0;
3061}
3062
3063/**
3064 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
3065 * @hw: pointer to the HW struct
3066 * @port: the PHY port to read
3067 * @tstamp_ready: contents of the Tx memory status register
3068 *
3069 * E810 devices do not use a Tx memory status register. Instead simply
3070 * indicate that all timestamps are currently ready.
3071 */
3072static int
3073ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
3074{
3075 *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
3076 return 0;
3077}
3078
3079/* E810T SMA functions
3080 *
3081 * The following functions operate specifically on E810T hardware and are used
3082 * to access the extended GPIOs available.
3083 */
3084
3085/**
3086 * ice_get_pca9575_handle
3087 * @hw: pointer to the hw struct
3088 * @pca9575_handle: GPIO controller's handle
3089 *
3090 * Find and return the GPIO controller's handle in the netlist.
3091 * When found - the value will be cached in the hw structure and following calls
3092 * will return cached value
3093 */
3094static int
3095ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3096{
3097 struct ice_aqc_get_link_topo *cmd;
3098 struct ice_aq_desc desc;
3099 int status;
3100 u8 idx;
3101
3102 /* If handle was read previously return cached value */
3103 if (hw->io_expander_handle) {
3104 *pca9575_handle = hw->io_expander_handle;
3105 return 0;
3106 }
3107
3108 /* If handle was not detected read it from the netlist */
3109 cmd = &desc.params.get_link_topo;
3110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3111
3112 /* Set node type to GPIO controller */
3113 cmd->addr.topo_params.node_type_ctx =
3114 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3115 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3116
3117#define SW_PCA9575_SFP_TOPO_IDX 2
3118#define SW_PCA9575_QSFP_TOPO_IDX 1
3119
3120 /* Check if the SW IO expander controlling SMA exists in the netlist. */
3121 if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3122 idx = SW_PCA9575_SFP_TOPO_IDX;
3123 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3124 idx = SW_PCA9575_QSFP_TOPO_IDX;
3125 else
3126 return -EOPNOTSUPP;
3127
3128 cmd->addr.topo_params.index = idx;
3129
3130 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3131 if (status)
3132 return -EOPNOTSUPP;
3133
3134 /* Verify if we found the right IO expander type */
3135 if (desc.params.get_link_topo.node_part_num !=
3136 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3137 return -EOPNOTSUPP;
3138
3139 /* If present save the handle and return it */
3140 hw->io_expander_handle =
3141 le16_to_cpu(desc.params.get_link_topo.addr.handle);
3142 *pca9575_handle = hw->io_expander_handle;
3143
3144 return 0;
3145}
3146
3147/**
3148 * ice_read_sma_ctrl_e810t
3149 * @hw: pointer to the hw struct
3150 * @data: pointer to data to be read from the GPIO controller
3151 *
3152 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3153 * PCA9575 expander, so only bits 3-7 in data are valid.
3154 */
3155int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3156{
3157 int status;
3158 u16 handle;
3159 u8 i;
3160
3161 status = ice_get_pca9575_handle(hw, &handle);
3162 if (status)
3163 return status;
3164
3165 *data = 0;
3166
3167 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3168 bool pin;
3169
3170 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3171 &pin, NULL);
3172 if (status)
3173 break;
3174 *data |= (u8)(!pin) << i;
3175 }
3176
3177 return status;
3178}
3179
3180/**
3181 * ice_write_sma_ctrl_e810t
3182 * @hw: pointer to the hw struct
3183 * @data: data to be written to the GPIO controller
3184 *
3185 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3186 * of the PCA9575 expander, so only bits 3-7 in data are valid.
3187 */
3188int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3189{
3190 int status;
3191 u16 handle;
3192 u8 i;
3193
3194 status = ice_get_pca9575_handle(hw, &handle);
3195 if (status)
3196 return status;
3197
3198 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3199 bool pin;
3200
3201 pin = !(data & (1 << i));
3202 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3203 pin, NULL);
3204 if (status)
3205 break;
3206 }
3207
3208 return status;
3209}
3210
3211/**
3212 * ice_read_pca9575_reg_e810t
3213 * @hw: pointer to the hw struct
3214 * @offset: GPIO controller register offset
3215 * @data: pointer to data to be read from the GPIO controller
3216 *
3217 * Read the register from the GPIO controller
3218 */
3219int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3220{
3221 struct ice_aqc_link_topo_addr link_topo;
3222 __le16 addr;
3223 u16 handle;
3224 int err;
3225
3226 memset(&link_topo, 0, sizeof(link_topo));
3227
3228 err = ice_get_pca9575_handle(hw, &handle);
3229 if (err)
3230 return err;
3231
3232 link_topo.handle = cpu_to_le16(handle);
3233 link_topo.topo_params.node_type_ctx =
3234 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3235 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3236
3237 addr = cpu_to_le16((u16)offset);
3238
3239 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3240}
3241
3242/* Device agnostic functions
3243 *
3244 * The following functions implement shared behavior common to both E822 and
3245 * E810 devices, possibly calling a device specific implementation where
3246 * necessary.
3247 */
3248
3249/**
3250 * ice_ptp_lock - Acquire PTP global semaphore register lock
3251 * @hw: pointer to the HW struct
3252 *
3253 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
3254 * was acquired, false otherwise.
3255 *
3256 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
3257 * value. If software sees the busy bit cleared, this means that this function
3258 * acquired the lock (and the busy bit is now set). If software sees the busy
3259 * bit set, it means that another function acquired the lock.
3260 *
3261 * Software must clear the busy bit with a write to release the lock for other
3262 * functions when done.
3263 */
3264bool ice_ptp_lock(struct ice_hw *hw)
3265{
3266 u32 hw_lock;
3267 int i;
3268
3269#define MAX_TRIES 15
3270
3271 for (i = 0; i < MAX_TRIES; i++) {
3272 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
3273 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
3274 if (hw_lock) {
3275 /* Somebody is holding the lock */
3276 usleep_range(5000, 6000);
3277 continue;
3278 }
3279
3280 break;
3281 }
3282
3283 return !hw_lock;
3284}
3285
3286/**
3287 * ice_ptp_unlock - Release PTP global semaphore register lock
3288 * @hw: pointer to the HW struct
3289 *
3290 * Release the global PTP hardware semaphore lock. This is done by writing to
3291 * the PFTSYN_SEM register.
3292 */
3293void ice_ptp_unlock(struct ice_hw *hw)
3294{
3295 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
3296}
3297
3298/**
3299 * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
3300 * @hw: pointer to the HW structure
3301 *
3302 * Determine the PHY model for the device, and initialize hw->phy_model
3303 * for use by other functions.
3304 */
3305void ice_ptp_init_phy_model(struct ice_hw *hw)
3306{
3307 if (ice_is_e810(hw))
3308 hw->phy_model = ICE_PHY_E810;
3309 else
3310 hw->phy_model = ICE_PHY_E82X;
3311}
3312
3313/**
3314 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
3315 * @hw: pointer to HW struct
3316 * @cmd: the command to issue
3317 *
3318 * Prepare the source timer and PHY timers and then trigger the requested
3319 * command. This causes the shadow registers previously written in preparation
3320 * for the command to be synchronously applied to both the source and PHY
3321 * timers.
3322 */
3323static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3324{
3325 int err;
3326
3327 /* First, prepare the source timer */
3328 ice_ptp_src_cmd(hw, cmd);
3329
3330 /* Next, prepare the ports */
3331 switch (hw->phy_model) {
3332 case ICE_PHY_E810:
3333 err = ice_ptp_port_cmd_e810(hw, cmd);
3334 break;
3335 case ICE_PHY_E82X:
3336 err = ice_ptp_port_cmd_e82x(hw, cmd);
3337 break;
3338 default:
3339 err = -EOPNOTSUPP;
3340 }
3341
3342 if (err) {
3343 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
3344 cmd, err);
3345 return err;
3346 }
3347
3348 /* Write the sync command register to drive both source and PHY timer
3349 * commands synchronously
3350 */
3351 ice_ptp_exec_tmr_cmd(hw);
3352
3353 return 0;
3354}
3355
3356/**
3357 * ice_ptp_init_time - Initialize device time to provided value
3358 * @hw: pointer to HW struct
3359 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
3360 *
3361 * Initialize the device to the specified time provided. This requires a three
3362 * step process:
3363 *
3364 * 1) write the new init time to the source timer shadow registers
3365 * 2) write the new init time to the PHY timer shadow registers
3366 * 3) issue an init_time timer command to synchronously switch both the source
3367 * and port timers to the new init time value at the next clock cycle.
3368 */
3369int ice_ptp_init_time(struct ice_hw *hw, u64 time)
3370{
3371 u8 tmr_idx;
3372 int err;
3373
3374 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3375
3376 /* Source timers */
3377 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
3378 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
3379 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
3380
3381 /* PHY timers */
3382 /* Fill Rx and Tx ports and send msg to PHY */
3383 switch (hw->phy_model) {
3384 case ICE_PHY_E810:
3385 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
3386 break;
3387 case ICE_PHY_E82X:
3388 err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
3389 break;
3390 default:
3391 err = -EOPNOTSUPP;
3392 }
3393
3394 if (err)
3395 return err;
3396
3397 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
3398}
3399
3400/**
3401 * ice_ptp_write_incval - Program PHC with new increment value
3402 * @hw: pointer to HW struct
3403 * @incval: Source timer increment value per clock cycle
3404 *
3405 * Program the PHC with a new increment value. This requires a three-step
3406 * process:
3407 *
3408 * 1) Write the increment value to the source timer shadow registers
3409 * 2) Write the increment value to the PHY timer shadow registers
3410 * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
3411 * the source and port timers to the new increment value at the next clock
3412 * cycle.
3413 */
3414int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3415{
3416 u8 tmr_idx;
3417 int err;
3418
3419 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3420
3421 /* Shadow Adjust */
3422 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3423 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3424
3425 switch (hw->phy_model) {
3426 case ICE_PHY_E810:
3427 err = ice_ptp_prep_phy_incval_e810(hw, incval);
3428 break;
3429 case ICE_PHY_E82X:
3430 err = ice_ptp_prep_phy_incval_e82x(hw, incval);
3431 break;
3432 default:
3433 err = -EOPNOTSUPP;
3434 }
3435
3436 if (err)
3437 return err;
3438
3439 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
3440}
3441
3442/**
3443 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3444 * @hw: pointer to HW struct
3445 * @incval: Source timer increment value per clock cycle
3446 *
3447 * Program a new PHC incval while holding the PTP semaphore.
3448 */
3449int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3450{
3451 int err;
3452
3453 if (!ice_ptp_lock(hw))
3454 return -EBUSY;
3455
3456 err = ice_ptp_write_incval(hw, incval);
3457
3458 ice_ptp_unlock(hw);
3459
3460 return err;
3461}
3462
3463/**
3464 * ice_ptp_adj_clock - Adjust PHC clock time atomically
3465 * @hw: pointer to HW struct
3466 * @adj: Adjustment in nanoseconds
3467 *
3468 * Perform an atomic adjustment of the PHC time by the specified number of
3469 * nanoseconds. This requires a three-step process:
3470 *
3471 * 1) Write the adjustment to the source timer shadow registers
3472 * 2) Write the adjustment to the PHY timer shadow registers
3473 * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
3474 * adjustment to both the source and port timers at the next clock cycle.
3475 */
3476int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3477{
3478 u8 tmr_idx;
3479 int err;
3480
3481 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3482
3483 /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3484 * For an ICE_PTP_ADJ_TIME command, this set of registers represents
3485 * the value to add to the clock time. It supports subtraction by
3486 * interpreting the value as a 2's complement integer.
3487 */
3488 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3489 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3490
3491 switch (hw->phy_model) {
3492 case ICE_PHY_E810:
3493 err = ice_ptp_prep_phy_adj_e810(hw, adj);
3494 break;
3495 case ICE_PHY_E82X:
3496 err = ice_ptp_prep_phy_adj_e82x(hw, adj);
3497 break;
3498 default:
3499 err = -EOPNOTSUPP;
3500 }
3501
3502 if (err)
3503 return err;
3504
3505 return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
3506}
3507
3508/**
3509 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3510 * @hw: pointer to the HW struct
3511 * @block: the block to read from
3512 * @idx: the timestamp index to read
3513 * @tstamp: on return, the 40bit timestamp value
3514 *
3515 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3516 * the block is the quad to read from. For E810 devices, the block is the
3517 * logical port to read from.
3518 */
3519int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3520{
3521 switch (hw->phy_model) {
3522 case ICE_PHY_E810:
3523 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3524 case ICE_PHY_E82X:
3525 return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
3526 default:
3527 return -EOPNOTSUPP;
3528 }
3529}
3530
3531/**
3532 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3533 * @hw: pointer to the HW struct
3534 * @block: the block to read from
3535 * @idx: the timestamp index to reset
3536 *
3537 * Clear a timestamp from the timestamp block, discarding its value without
3538 * returning it. This resets the memory status bit for the timestamp index
3539 * allowing it to be reused for another timestamp in the future.
3540 *
3541 * For E822 devices, the block number is the PHY quad to clear from. For E810
3542 * devices, the block number is the logical port to clear from.
3543 *
3544 * This function must only be called on a timestamp index whose valid bit is
3545 * set according to ice_get_phy_tx_tstamp_ready().
3546 */
3547int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3548{
3549 switch (hw->phy_model) {
3550 case ICE_PHY_E810:
3551 return ice_clear_phy_tstamp_e810(hw, block, idx);
3552 case ICE_PHY_E82X:
3553 return ice_clear_phy_tstamp_e82x(hw, block, idx);
3554 default:
3555 return -EOPNOTSUPP;
3556 }
3557}
3558
3559/**
3560 * ice_get_pf_c827_idx - find and return the C827 index for the current pf
3561 * @hw: pointer to the hw struct
3562 * @idx: index of the found C827 PHY
3563 * Return:
3564 * * 0 - success
3565 * * negative - failure
3566 */
3567static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
3568{
3569 struct ice_aqc_get_link_topo cmd;
3570 u8 node_part_number;
3571 u16 node_handle;
3572 int status;
3573 u8 ctx;
3574
3575 if (hw->mac_type != ICE_MAC_E810)
3576 return -ENODEV;
3577
3578 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
3579 *idx = C827_0;
3580 return 0;
3581 }
3582
3583 memset(&cmd, 0, sizeof(cmd));
3584
3585 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
3586 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
3587 cmd.addr.topo_params.node_type_ctx = ctx;
3588
3589 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
3590 &node_handle);
3591 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
3592 return -ENOENT;
3593
3594 if (node_handle == E810C_QSFP_C827_0_HANDLE)
3595 *idx = C827_0;
3596 else if (node_handle == E810C_QSFP_C827_1_HANDLE)
3597 *idx = C827_1;
3598 else
3599 return -EIO;
3600
3601 return 0;
3602}
3603
3604/**
3605 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
3606 * @hw: pointer to the HW struct
3607 */
3608void ice_ptp_reset_ts_memory(struct ice_hw *hw)
3609{
3610 switch (hw->phy_model) {
3611 case ICE_PHY_E82X:
3612 ice_ptp_reset_ts_memory_e82x(hw);
3613 break;
3614 case ICE_PHY_E810:
3615 default:
3616 return;
3617 }
3618}
3619
3620/**
3621 * ice_ptp_init_phc - Initialize PTP hardware clock
3622 * @hw: pointer to the HW struct
3623 *
3624 * Perform the steps required to initialize the PTP hardware clock.
3625 */
3626int ice_ptp_init_phc(struct ice_hw *hw)
3627{
3628 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3629
3630 /* Enable source clocks */
3631 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3632
3633 /* Clear event err indications for auxiliary pins */
3634 (void)rd32(hw, GLTSYN_STAT(src_idx));
3635
3636 switch (hw->phy_model) {
3637 case ICE_PHY_E810:
3638 return ice_ptp_init_phc_e810(hw);
3639 case ICE_PHY_E82X:
3640 return ice_ptp_init_phc_e82x(hw);
3641 default:
3642 return -EOPNOTSUPP;
3643 }
3644}
3645
3646/**
3647 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
3648 * @hw: pointer to the HW struct
3649 * @block: the timestamp block to check
3650 * @tstamp_ready: storage for the PHY Tx memory status information
3651 *
3652 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
3653 * which indicates which timestamps in the block may be captured. A set bit
3654 * means the timestamp can be read. An unset bit means the timestamp is not
3655 * ready and software should avoid reading the register.
3656 */
3657int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
3658{
3659 switch (hw->phy_model) {
3660 case ICE_PHY_E810:
3661 return ice_get_phy_tx_tstamp_ready_e810(hw, block,
3662 tstamp_ready);
3663 case ICE_PHY_E82X:
3664 return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
3665 tstamp_ready);
3666 break;
3667 default:
3668 return -EOPNOTSUPP;
3669 }
3670}
3671
3672/**
3673 * ice_cgu_get_pin_desc_e823 - get pin description array
3674 * @hw: pointer to the hw struct
3675 * @input: if request is done against input or output pin
3676 * @size: number of inputs/outputs
3677 *
3678 * Return: pointer to pin description array associated to given hw.
3679 */
3680static const struct ice_cgu_pin_desc *
3681ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
3682{
3683 static const struct ice_cgu_pin_desc *t;
3684
3685 if (hw->cgu_part_number ==
3686 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
3687 if (input) {
3688 t = ice_e823_zl_cgu_inputs;
3689 *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
3690 } else {
3691 t = ice_e823_zl_cgu_outputs;
3692 *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
3693 }
3694 } else if (hw->cgu_part_number ==
3695 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
3696 if (input) {
3697 t = ice_e823_si_cgu_inputs;
3698 *size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
3699 } else {
3700 t = ice_e823_si_cgu_outputs;
3701 *size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
3702 }
3703 } else {
3704 t = NULL;
3705 *size = 0;
3706 }
3707
3708 return t;
3709}
3710
3711/**
3712 * ice_cgu_get_pin_desc - get pin description array
3713 * @hw: pointer to the hw struct
3714 * @input: if request is done against input or output pins
3715 * @size: size of array returned by function
3716 *
3717 * Return: pointer to pin description array associated to given hw.
3718 */
3719static const struct ice_cgu_pin_desc *
3720ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
3721{
3722 const struct ice_cgu_pin_desc *t = NULL;
3723
3724 switch (hw->device_id) {
3725 case ICE_DEV_ID_E810C_SFP:
3726 if (input) {
3727 t = ice_e810t_sfp_cgu_inputs;
3728 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
3729 } else {
3730 t = ice_e810t_sfp_cgu_outputs;
3731 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
3732 }
3733 break;
3734 case ICE_DEV_ID_E810C_QSFP:
3735 if (input) {
3736 t = ice_e810t_qsfp_cgu_inputs;
3737 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
3738 } else {
3739 t = ice_e810t_qsfp_cgu_outputs;
3740 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
3741 }
3742 break;
3743 case ICE_DEV_ID_E823L_10G_BASE_T:
3744 case ICE_DEV_ID_E823L_1GBE:
3745 case ICE_DEV_ID_E823L_BACKPLANE:
3746 case ICE_DEV_ID_E823L_QSFP:
3747 case ICE_DEV_ID_E823L_SFP:
3748 case ICE_DEV_ID_E823C_10G_BASE_T:
3749 case ICE_DEV_ID_E823C_BACKPLANE:
3750 case ICE_DEV_ID_E823C_QSFP:
3751 case ICE_DEV_ID_E823C_SFP:
3752 case ICE_DEV_ID_E823C_SGMII:
3753 t = ice_cgu_get_pin_desc_e823(hw, input, size);
3754 break;
3755 default:
3756 break;
3757 }
3758
3759 return t;
3760}
3761
3762/**
3763 * ice_cgu_get_pin_type - get pin's type
3764 * @hw: pointer to the hw struct
3765 * @pin: pin index
3766 * @input: if request is done against input or output pin
3767 *
3768 * Return: type of a pin.
3769 */
3770enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
3771{
3772 const struct ice_cgu_pin_desc *t;
3773 int t_size;
3774
3775 t = ice_cgu_get_pin_desc(hw, input, &t_size);
3776
3777 if (!t)
3778 return 0;
3779
3780 if (pin >= t_size)
3781 return 0;
3782
3783 return t[pin].type;
3784}
3785
3786/**
3787 * ice_cgu_get_pin_freq_supp - get pin's supported frequency
3788 * @hw: pointer to the hw struct
3789 * @pin: pin index
3790 * @input: if request is done against input or output pin
3791 * @num: output number of supported frequencies
3792 *
3793 * Get frequency supported number and array of supported frequencies.
3794 *
3795 * Return: array of supported frequencies for given pin.
3796 */
3797struct dpll_pin_frequency *
3798ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
3799{
3800 const struct ice_cgu_pin_desc *t;
3801 int t_size;
3802
3803 *num = 0;
3804 t = ice_cgu_get_pin_desc(hw, input, &t_size);
3805 if (!t)
3806 return NULL;
3807 if (pin >= t_size)
3808 return NULL;
3809 *num = t[pin].freq_supp_num;
3810
3811 return t[pin].freq_supp;
3812}
3813
3814/**
3815 * ice_cgu_get_pin_name - get pin's name
3816 * @hw: pointer to the hw struct
3817 * @pin: pin index
3818 * @input: if request is done against input or output pin
3819 *
3820 * Return:
3821 * * null terminated char array with name
3822 * * NULL in case of failure
3823 */
3824const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
3825{
3826 const struct ice_cgu_pin_desc *t;
3827 int t_size;
3828
3829 t = ice_cgu_get_pin_desc(hw, input, &t_size);
3830
3831 if (!t)
3832 return NULL;
3833
3834 if (pin >= t_size)
3835 return NULL;
3836
3837 return t[pin].name;
3838}
3839
3840/**
3841 * ice_get_cgu_state - get the state of the DPLL
3842 * @hw: pointer to the hw struct
3843 * @dpll_idx: Index of internal DPLL unit
3844 * @last_dpll_state: last known state of DPLL
3845 * @pin: pointer to a buffer for returning currently active pin
3846 * @ref_state: reference clock state
3847 * @eec_mode: eec mode of the DPLL
3848 * @phase_offset: pointer to a buffer for returning phase offset
3849 * @dpll_state: state of the DPLL (output)
3850 *
3851 * This function will read the state of the DPLL(dpll_idx). Non-null
3852 * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
3853 * retrieve currently active pin, state, mode and phase_offset respectively.
3854 *
3855 * Return: state of the DPLL
3856 */
3857int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
3858 enum dpll_lock_status last_dpll_state, u8 *pin,
3859 u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
3860 enum dpll_lock_status *dpll_state)
3861{
3862 u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
3863 s64 hw_phase_offset;
3864 int status;
3865
3866 status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
3867 &hw_dpll_state, &hw_config,
3868 &hw_phase_offset, &hw_eec_mode);
3869 if (status)
3870 return status;
3871
3872 if (pin)
3873 /* current ref pin in dpll_state_refsel_status_X register */
3874 *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
3875 if (phase_offset)
3876 *phase_offset = hw_phase_offset;
3877 if (ref_state)
3878 *ref_state = hw_ref_state;
3879 if (eec_mode)
3880 *eec_mode = hw_eec_mode;
3881 if (!dpll_state)
3882 return 0;
3883
3884 /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
3885 * it would never return to FREERUN. This aligns to ITU-T G.781
3886 * Recommendation. We cannot report HOLDOVER as HO memory is cleared
3887 * while switching to another reference.
3888 * Only for situations where previous state was either: "LOCKED without
3889 * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
3890 */
3891 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
3892 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
3893 *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
3894 else
3895 *dpll_state = DPLL_LOCK_STATUS_LOCKED;
3896 } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
3897 last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
3898 *dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
3899 } else {
3900 *dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
3901 }
3902
3903 return 0;
3904}
3905
3906/**
3907 * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
3908 * @hw: pointer to the hw struct
3909 * @base_idx: returns index of first recovered clock pin on device
3910 * @pin_num: returns number of recovered clock pins available on device
3911 *
3912 * Based on hw provide caller info about recovery clock pins available on the
3913 * board.
3914 *
3915 * Return:
3916 * * 0 - success, information is valid
3917 * * negative - failure, information is not valid
3918 */
3919int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
3920{
3921 u8 phy_idx;
3922 int ret;
3923
3924 switch (hw->device_id) {
3925 case ICE_DEV_ID_E810C_SFP:
3926 case ICE_DEV_ID_E810C_QSFP:
3927
3928 ret = ice_get_pf_c827_idx(hw, &phy_idx);
3929 if (ret)
3930 return ret;
3931 *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
3932 *pin_num = ICE_E810_RCLK_PINS_NUM;
3933 ret = 0;
3934 break;
3935 case ICE_DEV_ID_E823L_10G_BASE_T:
3936 case ICE_DEV_ID_E823L_1GBE:
3937 case ICE_DEV_ID_E823L_BACKPLANE:
3938 case ICE_DEV_ID_E823L_QSFP:
3939 case ICE_DEV_ID_E823L_SFP:
3940 case ICE_DEV_ID_E823C_10G_BASE_T:
3941 case ICE_DEV_ID_E823C_BACKPLANE:
3942 case ICE_DEV_ID_E823C_QSFP:
3943 case ICE_DEV_ID_E823C_SFP:
3944 case ICE_DEV_ID_E823C_SGMII:
3945 *pin_num = ICE_E82X_RCLK_PINS_NUM;
3946 ret = 0;
3947 if (hw->cgu_part_number ==
3948 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
3949 *base_idx = ZL_REF1P;
3950 else if (hw->cgu_part_number ==
3951 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
3952 *base_idx = SI_REF1P;
3953 else
3954 ret = -ENODEV;
3955
3956 break;
3957 default:
3958 ret = -ENODEV;
3959 break;
3960 }
3961
3962 return ret;
3963}
3964
3965/**
3966 * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
3967 * @hw: pointer to the hw struct
3968 * @pin_id: id of a pin
3969 * @caps: capabilities to modify
3970 *
3971 * Return:
3972 * * 0 - success, state capabilities were modified
3973 * * negative - failure, capabilities were not modified
3974 */
3975int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
3976 unsigned long *caps)
3977{
3978 bool can_change = true;
3979
3980 switch (hw->device_id) {
3981 case ICE_DEV_ID_E810C_SFP:
3982 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
3983 can_change = false;
3984 break;
3985 case ICE_DEV_ID_E810C_QSFP:
3986 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
3987 can_change = false;
3988 break;
3989 case ICE_DEV_ID_E823L_10G_BASE_T:
3990 case ICE_DEV_ID_E823L_1GBE:
3991 case ICE_DEV_ID_E823L_BACKPLANE:
3992 case ICE_DEV_ID_E823L_QSFP:
3993 case ICE_DEV_ID_E823L_SFP:
3994 case ICE_DEV_ID_E823C_10G_BASE_T:
3995 case ICE_DEV_ID_E823C_BACKPLANE:
3996 case ICE_DEV_ID_E823C_QSFP:
3997 case ICE_DEV_ID_E823C_SFP:
3998 case ICE_DEV_ID_E823C_SGMII:
3999 if (hw->cgu_part_number ==
4000 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
4001 pin_id == ZL_OUT2)
4002 can_change = false;
4003 else if (hw->cgu_part_number ==
4004 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
4005 pin_id == SI_OUT1)
4006 can_change = false;
4007 break;
4008 default:
4009 return -EINVAL;
4010 }
4011 if (can_change)
4012 *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
4013 else
4014 *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
4015
4016 return 0;
4017}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include <linux/delay.h>
5#include "ice_common.h"
6#include "ice_ptp_hw.h"
7#include "ice_ptp_consts.h"
8#include "ice_cgu_regs.h"
9
10/* Low level functions for interacting with and managing the device clock used
11 * for the Precision Time Protocol.
12 *
13 * The ice hardware represents the current time using three registers:
14 *
15 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
16 * +---------------+ +---------------+ +---------------+
17 * | 32 bits | | 32 bits | | 32 bits |
18 * +---------------+ +---------------+ +---------------+
19 *
20 * The registers are incremented every clock tick using a 40bit increment
21 * value defined over two registers:
22 *
23 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
24 * +---------------+ +---------------+
25 * | 8 bit s | | 32 bits |
26 * +---------------+ +---------------+
27 *
28 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
29 * registers every clock source tick. Depending on the specific device
30 * configuration, the clock source frequency could be one of a number of
31 * values.
32 *
33 * For E810 devices, the increment frequency is 812.5 MHz
34 *
35 * For E822 devices the clock can be derived from different sources, and the
36 * increment has an effective frequency of one of the following:
37 * - 823.4375 MHz
38 * - 783.36 MHz
39 * - 796.875 MHz
40 * - 816 MHz
41 * - 830.078125 MHz
42 * - 783.36 MHz
43 *
44 * The hardware captures timestamps in the PHY for incoming packets, and for
45 * outgoing packets on request. To support this, the PHY maintains a timer
46 * that matches the lower 64 bits of the global source timer.
47 *
48 * In order to ensure that the PHY timers and the source timer are equivalent,
49 * shadow registers are used to prepare the desired initial values. A special
50 * sync command is issued to trigger copying from the shadow registers into
51 * the appropriate source and PHY registers simultaneously.
52 *
53 * The driver supports devices which have different PHYs with subtly different
54 * mechanisms to program and control the timers. We divide the devices into
55 * families named after the first major device, E810 and similar devices, and
56 * E822 and similar devices.
57 *
58 * - E822 based devices have additional support for fine grained Vernier
59 * calibration which requires significant setup
60 * - The layout of timestamp data in the PHY register blocks is different
61 * - The way timer synchronization commands are issued is different.
62 *
63 * To support this, very low level functions have an e810 or e822 suffix
64 * indicating what type of device they work on. Higher level abstractions for
65 * tasks that can be done on both devices do not have the suffix and will
66 * correctly look up the appropriate low level function when running.
67 *
68 * Functions which only make sense on a single device family may not have
69 * a suitable generic implementation
70 */
71
72/**
73 * ice_get_ptp_src_clock_index - determine source clock index
74 * @hw: pointer to HW struct
75 *
76 * Determine the source clock index currently in use, based on device
77 * capabilities reported during initialization.
78 */
79u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
80{
81 return hw->func_caps.ts_func_info.tmr_index_assoc;
82}
83
84/**
85 * ice_ptp_read_src_incval - Read source timer increment value
86 * @hw: pointer to HW struct
87 *
88 * Read the increment value of the source timer and return it.
89 */
90static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
91{
92 u32 lo, hi;
93 u8 tmr_idx;
94
95 tmr_idx = ice_get_ptp_src_clock_index(hw);
96
97 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
98 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
99
100 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
101}
102
103/**
104 * ice_ptp_src_cmd - Prepare source timer for a timer command
105 * @hw: pointer to HW structure
106 * @cmd: Timer command
107 *
108 * Prepare the source timer for an upcoming timer sync command.
109 */
110static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
111{
112 u32 cmd_val;
113 u8 tmr_idx;
114
115 tmr_idx = ice_get_ptp_src_clock_index(hw);
116 cmd_val = tmr_idx << SEL_CPK_SRC;
117
118 switch (cmd) {
119 case INIT_TIME:
120 cmd_val |= GLTSYN_CMD_INIT_TIME;
121 break;
122 case INIT_INCVAL:
123 cmd_val |= GLTSYN_CMD_INIT_INCVAL;
124 break;
125 case ADJ_TIME:
126 cmd_val |= GLTSYN_CMD_ADJ_TIME;
127 break;
128 case ADJ_TIME_AT_TIME:
129 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
130 break;
131 case READ_TIME:
132 cmd_val |= GLTSYN_CMD_READ_TIME;
133 break;
134 }
135
136 wr32(hw, GLTSYN_CMD, cmd_val);
137}
138
139/**
140 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
141 * @hw: pointer to HW struct
142 *
143 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
144 * write immediately. This triggers the hardware to begin executing all of the
145 * source and PHY timer commands synchronously.
146 */
147static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
148{
149 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
150 ice_flush(hw);
151}
152
153/* E822 family functions
154 *
155 * The following functions operate on the E822 family of devices.
156 */
157
158/**
159 * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
160 * @msg: the PHY message buffer to fill in
161 * @port: the port to access
162 * @offset: the register offset
163 */
164static void
165ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
166{
167 int phy_port, phy, quadtype;
168
169 phy_port = port % ICE_PORTS_PER_PHY;
170 phy = port / ICE_PORTS_PER_PHY;
171 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
172
173 if (quadtype == 0) {
174 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
175 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
176 } else {
177 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
178 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
179 }
180
181 if (phy == 0)
182 msg->dest_dev = rmn_0;
183 else if (phy == 1)
184 msg->dest_dev = rmn_1;
185 else
186 msg->dest_dev = rmn_2;
187}
188
189/**
190 * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
191 * @low_addr: the low address to check
192 * @high_addr: on return, contains the high address of the 64bit register
193 *
194 * Checks if the provided low address is one of the known 64bit PHY values
195 * represented as two 32bit registers. If it is, return the appropriate high
196 * register offset to use.
197 */
198static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
199{
200 switch (low_addr) {
201 case P_REG_PAR_PCS_TX_OFFSET_L:
202 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
203 return true;
204 case P_REG_PAR_PCS_RX_OFFSET_L:
205 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
206 return true;
207 case P_REG_PAR_TX_TIME_L:
208 *high_addr = P_REG_PAR_TX_TIME_U;
209 return true;
210 case P_REG_PAR_RX_TIME_L:
211 *high_addr = P_REG_PAR_RX_TIME_U;
212 return true;
213 case P_REG_TOTAL_TX_OFFSET_L:
214 *high_addr = P_REG_TOTAL_TX_OFFSET_U;
215 return true;
216 case P_REG_TOTAL_RX_OFFSET_L:
217 *high_addr = P_REG_TOTAL_RX_OFFSET_U;
218 return true;
219 case P_REG_UIX66_10G_40G_L:
220 *high_addr = P_REG_UIX66_10G_40G_U;
221 return true;
222 case P_REG_UIX66_25G_100G_L:
223 *high_addr = P_REG_UIX66_25G_100G_U;
224 return true;
225 case P_REG_TX_CAPTURE_L:
226 *high_addr = P_REG_TX_CAPTURE_U;
227 return true;
228 case P_REG_RX_CAPTURE_L:
229 *high_addr = P_REG_RX_CAPTURE_U;
230 return true;
231 case P_REG_TX_TIMER_INC_PRE_L:
232 *high_addr = P_REG_TX_TIMER_INC_PRE_U;
233 return true;
234 case P_REG_RX_TIMER_INC_PRE_L:
235 *high_addr = P_REG_RX_TIMER_INC_PRE_U;
236 return true;
237 default:
238 return false;
239 }
240}
241
242/**
243 * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
244 * @low_addr: the low address to check
245 * @high_addr: on return, contains the high address of the 40bit value
246 *
247 * Checks if the provided low address is one of the known 40bit PHY values
248 * split into two registers with the lower 8 bits in the low register and the
249 * upper 32 bits in the high register. If it is, return the appropriate high
250 * register offset to use.
251 */
252static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
253{
254 switch (low_addr) {
255 case P_REG_TIMETUS_L:
256 *high_addr = P_REG_TIMETUS_U;
257 return true;
258 case P_REG_PAR_RX_TUS_L:
259 *high_addr = P_REG_PAR_RX_TUS_U;
260 return true;
261 case P_REG_PAR_TX_TUS_L:
262 *high_addr = P_REG_PAR_TX_TUS_U;
263 return true;
264 case P_REG_PCS_RX_TUS_L:
265 *high_addr = P_REG_PCS_RX_TUS_U;
266 return true;
267 case P_REG_PCS_TX_TUS_L:
268 *high_addr = P_REG_PCS_TX_TUS_U;
269 return true;
270 case P_REG_DESK_PAR_RX_TUS_L:
271 *high_addr = P_REG_DESK_PAR_RX_TUS_U;
272 return true;
273 case P_REG_DESK_PAR_TX_TUS_L:
274 *high_addr = P_REG_DESK_PAR_TX_TUS_U;
275 return true;
276 case P_REG_DESK_PCS_RX_TUS_L:
277 *high_addr = P_REG_DESK_PCS_RX_TUS_U;
278 return true;
279 case P_REG_DESK_PCS_TX_TUS_L:
280 *high_addr = P_REG_DESK_PCS_TX_TUS_U;
281 return true;
282 default:
283 return false;
284 }
285}
286
287/**
288 * ice_read_phy_reg_e822 - Read a PHY register
289 * @hw: pointer to the HW struct
290 * @port: PHY port to read from
291 * @offset: PHY register offset to read
292 * @val: on return, the contents read from the PHY
293 *
294 * Read a PHY register for the given port over the device sideband queue.
295 */
296int
297ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
298{
299 struct ice_sbq_msg_input msg = {0};
300 int err;
301
302 ice_fill_phy_msg_e822(&msg, port, offset);
303 msg.opcode = ice_sbq_msg_rd;
304
305 err = ice_sbq_rw_reg(hw, &msg);
306 if (err) {
307 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
308 err);
309 return err;
310 }
311
312 *val = msg.data;
313
314 return 0;
315}
316
317/**
318 * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
319 * @hw: pointer to the HW struct
320 * @port: PHY port to read from
321 * @low_addr: offset of the lower register to read from
322 * @val: on return, the contents of the 64bit value from the PHY registers
323 *
324 * Reads the two registers associated with a 64bit value and returns it in the
325 * val pointer. The offset always specifies the lower register offset to use.
326 * The high offset is looked up. This function only operates on registers
327 * known to be two parts of a 64bit value.
328 */
329static int
330ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
331{
332 u32 low, high;
333 u16 high_addr;
334 int err;
335
336 /* Only operate on registers known to be split into two 32bit
337 * registers.
338 */
339 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
340 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
341 low_addr);
342 return -EINVAL;
343 }
344
345 err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
346 if (err) {
347 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
348 low_addr, err);
349 return err;
350 }
351
352 err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
353 if (err) {
354 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
355 high_addr, err);
356 return err;
357 }
358
359 *val = (u64)high << 32 | low;
360
361 return 0;
362}
363
364/**
365 * ice_write_phy_reg_e822 - Write a PHY register
366 * @hw: pointer to the HW struct
367 * @port: PHY port to write to
368 * @offset: PHY register offset to write
369 * @val: The value to write to the register
370 *
371 * Write a PHY register for the given port over the device sideband queue.
372 */
373int
374ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
375{
376 struct ice_sbq_msg_input msg = {0};
377 int err;
378
379 ice_fill_phy_msg_e822(&msg, port, offset);
380 msg.opcode = ice_sbq_msg_wr;
381 msg.data = val;
382
383 err = ice_sbq_rw_reg(hw, &msg);
384 if (err) {
385 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
386 err);
387 return err;
388 }
389
390 return 0;
391}
392
393/**
394 * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
395 * @hw: pointer to the HW struct
396 * @port: port to write to
397 * @low_addr: offset of the low register
398 * @val: 40b value to write
399 *
400 * Write the provided 40b value to the two associated registers by splitting
401 * it up into two chunks, the lower 8 bits and the upper 32 bits.
402 */
403static int
404ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
405{
406 u32 low, high;
407 u16 high_addr;
408 int err;
409
410 /* Only operate on registers known to be split into a lower 8 bit
411 * register and an upper 32 bit register.
412 */
413 if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
414 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
415 low_addr);
416 return -EINVAL;
417 }
418
419 low = (u32)(val & P_REG_40B_LOW_M);
420 high = (u32)(val >> P_REG_40B_HIGH_S);
421
422 err = ice_write_phy_reg_e822(hw, port, low_addr, low);
423 if (err) {
424 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
425 low_addr, err);
426 return err;
427 }
428
429 err = ice_write_phy_reg_e822(hw, port, high_addr, high);
430 if (err) {
431 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
432 high_addr, err);
433 return err;
434 }
435
436 return 0;
437}
438
439/**
440 * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
441 * @hw: pointer to the HW struct
442 * @port: PHY port to read from
443 * @low_addr: offset of the lower register to read from
444 * @val: the contents of the 64bit value to write to PHY
445 *
446 * Write the 64bit value to the two associated 32bit PHY registers. The offset
447 * is always specified as the lower register, and the high address is looked
448 * up. This function only operates on registers known to be two parts of
449 * a 64bit value.
450 */
451static int
452ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
453{
454 u32 low, high;
455 u16 high_addr;
456 int err;
457
458 /* Only operate on registers known to be split into two 32bit
459 * registers.
460 */
461 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
462 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
463 low_addr);
464 return -EINVAL;
465 }
466
467 low = lower_32_bits(val);
468 high = upper_32_bits(val);
469
470 err = ice_write_phy_reg_e822(hw, port, low_addr, low);
471 if (err) {
472 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
473 low_addr, err);
474 return err;
475 }
476
477 err = ice_write_phy_reg_e822(hw, port, high_addr, high);
478 if (err) {
479 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
480 high_addr, err);
481 return err;
482 }
483
484 return 0;
485}
486
487/**
488 * ice_fill_quad_msg_e822 - Fill message data for quad register access
489 * @msg: the PHY message buffer to fill in
490 * @quad: the quad to access
491 * @offset: the register offset
492 *
493 * Fill a message buffer for accessing a register in a quad shared between
494 * multiple PHYs.
495 */
496static void
497ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
498{
499 u32 addr;
500
501 msg->dest_dev = rmn_0;
502
503 if ((quad % ICE_NUM_QUAD_TYPE) == 0)
504 addr = Q_0_BASE + offset;
505 else
506 addr = Q_1_BASE + offset;
507
508 msg->msg_addr_low = lower_16_bits(addr);
509 msg->msg_addr_high = upper_16_bits(addr);
510}
511
512/**
513 * ice_read_quad_reg_e822 - Read a PHY quad register
514 * @hw: pointer to the HW struct
515 * @quad: quad to read from
516 * @offset: quad register offset to read
517 * @val: on return, the contents read from the quad
518 *
519 * Read a quad register over the device sideband queue. Quad registers are
520 * shared between multiple PHYs.
521 */
522int
523ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
524{
525 struct ice_sbq_msg_input msg = {0};
526 int err;
527
528 if (quad >= ICE_MAX_QUAD)
529 return -EINVAL;
530
531 ice_fill_quad_msg_e822(&msg, quad, offset);
532 msg.opcode = ice_sbq_msg_rd;
533
534 err = ice_sbq_rw_reg(hw, &msg);
535 if (err) {
536 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
537 err);
538 return err;
539 }
540
541 *val = msg.data;
542
543 return 0;
544}
545
546/**
547 * ice_write_quad_reg_e822 - Write a PHY quad register
548 * @hw: pointer to the HW struct
549 * @quad: quad to write to
550 * @offset: quad register offset to write
551 * @val: The value to write to the register
552 *
553 * Write a quad register over the device sideband queue. Quad registers are
554 * shared between multiple PHYs.
555 */
556int
557ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
558{
559 struct ice_sbq_msg_input msg = {0};
560 int err;
561
562 if (quad >= ICE_MAX_QUAD)
563 return -EINVAL;
564
565 ice_fill_quad_msg_e822(&msg, quad, offset);
566 msg.opcode = ice_sbq_msg_wr;
567 msg.data = val;
568
569 err = ice_sbq_rw_reg(hw, &msg);
570 if (err) {
571 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
572 err);
573 return err;
574 }
575
576 return 0;
577}
578
579/**
580 * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
581 * @hw: pointer to the HW struct
582 * @quad: the quad to read from
583 * @idx: the timestamp index to read
584 * @tstamp: on return, the 40bit timestamp value
585 *
586 * Read a 40bit timestamp value out of the two associated registers in the
587 * quad memory block that is shared between the internal PHYs of the E822
588 * family of devices.
589 */
590static int
591ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
592{
593 u16 lo_addr, hi_addr;
594 u32 lo, hi;
595 int err;
596
597 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
598 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
599
600 err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
601 if (err) {
602 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
603 err);
604 return err;
605 }
606
607 err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
608 if (err) {
609 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
610 err);
611 return err;
612 }
613
614 /* For E822 based internal PHYs, the timestamp is reported with the
615 * lower 8 bits in the low register, and the upper 32 bits in the high
616 * register.
617 */
618 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
619
620 return 0;
621}
622
623/**
624 * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
625 * @hw: pointer to the HW struct
626 * @quad: the quad to read from
627 * @idx: the timestamp index to reset
628 *
629 * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
630 * shared between the internal PHYs on the E822 devices.
631 */
632static int
633ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
634{
635 u16 lo_addr, hi_addr;
636 int err;
637
638 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
639 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
640
641 err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
642 if (err) {
643 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
644 err);
645 return err;
646 }
647
648 err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
649 if (err) {
650 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
651 err);
652 return err;
653 }
654
655 return 0;
656}
657
658/**
659 * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
660 * @hw: pointer to the HW struct
661 * @quad: the quad to read from
662 *
663 * Clear all timestamps from the PHY quad block that is shared between the
664 * internal PHYs on the E822 devices.
665 */
666void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
667{
668 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
669 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
670}
671
672/**
673 * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
674 * @hw: pointer to the HW struct
675 */
676static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
677{
678 unsigned int quad;
679
680 for (quad = 0; quad < ICE_MAX_QUAD; quad++)
681 ice_ptp_reset_ts_memory_quad_e822(hw, quad);
682}
683
684/**
685 * ice_read_cgu_reg_e822 - Read a CGU register
686 * @hw: pointer to the HW struct
687 * @addr: Register address to read
688 * @val: storage for register value read
689 *
690 * Read the contents of a register of the Clock Generation Unit. Only
691 * applicable to E822 devices.
692 */
693static int
694ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
695{
696 struct ice_sbq_msg_input cgu_msg;
697 int err;
698
699 cgu_msg.opcode = ice_sbq_msg_rd;
700 cgu_msg.dest_dev = cgu;
701 cgu_msg.msg_addr_low = addr;
702 cgu_msg.msg_addr_high = 0x0;
703
704 err = ice_sbq_rw_reg(hw, &cgu_msg);
705 if (err) {
706 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
707 addr, err);
708 return err;
709 }
710
711 *val = cgu_msg.data;
712
713 return err;
714}
715
716/**
717 * ice_write_cgu_reg_e822 - Write a CGU register
718 * @hw: pointer to the HW struct
719 * @addr: Register address to write
720 * @val: value to write into the register
721 *
722 * Write the specified value to a register of the Clock Generation Unit. Only
723 * applicable to E822 devices.
724 */
725static int
726ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
727{
728 struct ice_sbq_msg_input cgu_msg;
729 int err;
730
731 cgu_msg.opcode = ice_sbq_msg_wr;
732 cgu_msg.dest_dev = cgu;
733 cgu_msg.msg_addr_low = addr;
734 cgu_msg.msg_addr_high = 0x0;
735 cgu_msg.data = val;
736
737 err = ice_sbq_rw_reg(hw, &cgu_msg);
738 if (err) {
739 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
740 addr, err);
741 return err;
742 }
743
744 return err;
745}
746
747/**
748 * ice_clk_freq_str - Convert time_ref_freq to string
749 * @clk_freq: Clock frequency
750 *
751 * Convert the specified TIME_REF clock frequency to a string.
752 */
753static const char *ice_clk_freq_str(u8 clk_freq)
754{
755 switch ((enum ice_time_ref_freq)clk_freq) {
756 case ICE_TIME_REF_FREQ_25_000:
757 return "25 MHz";
758 case ICE_TIME_REF_FREQ_122_880:
759 return "122.88 MHz";
760 case ICE_TIME_REF_FREQ_125_000:
761 return "125 MHz";
762 case ICE_TIME_REF_FREQ_153_600:
763 return "153.6 MHz";
764 case ICE_TIME_REF_FREQ_156_250:
765 return "156.25 MHz";
766 case ICE_TIME_REF_FREQ_245_760:
767 return "245.76 MHz";
768 default:
769 return "Unknown";
770 }
771}
772
773/**
774 * ice_clk_src_str - Convert time_ref_src to string
775 * @clk_src: Clock source
776 *
777 * Convert the specified clock source to its string name.
778 */
779static const char *ice_clk_src_str(u8 clk_src)
780{
781 switch ((enum ice_clk_src)clk_src) {
782 case ICE_CLK_SRC_TCX0:
783 return "TCX0";
784 case ICE_CLK_SRC_TIME_REF:
785 return "TIME_REF";
786 default:
787 return "Unknown";
788 }
789}
790
791/**
792 * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
793 * @hw: pointer to the HW struct
794 * @clk_freq: Clock frequency to program
795 * @clk_src: Clock source to select (TIME_REF, or TCX0)
796 *
797 * Configure the Clock Generation Unit with the desired clock frequency and
798 * time reference, enabling the PLL which drives the PTP hardware clock.
799 */
800static int
801ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
802 enum ice_clk_src clk_src)
803{
804 union tspll_ro_bwm_lf bwm_lf;
805 union nac_cgu_dword19 dw19;
806 union nac_cgu_dword22 dw22;
807 union nac_cgu_dword24 dw24;
808 union nac_cgu_dword9 dw9;
809 int err;
810
811 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
812 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
813 clk_freq);
814 return -EINVAL;
815 }
816
817 if (clk_src >= NUM_ICE_CLK_SRC) {
818 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
819 clk_src);
820 return -EINVAL;
821 }
822
823 if (clk_src == ICE_CLK_SRC_TCX0 &&
824 clk_freq != ICE_TIME_REF_FREQ_25_000) {
825 dev_warn(ice_hw_to_dev(hw),
826 "TCX0 only supports 25 MHz frequency\n");
827 return -EINVAL;
828 }
829
830 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
831 if (err)
832 return err;
833
834 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
835 if (err)
836 return err;
837
838 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
839 if (err)
840 return err;
841
842 /* Log the current clock configuration */
843 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
844 dw24.field.ts_pll_enable ? "enabled" : "disabled",
845 ice_clk_src_str(dw24.field.time_ref_sel),
846 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
847 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
848
849 /* Disable the PLL before changing the clock source or frequency */
850 if (dw24.field.ts_pll_enable) {
851 dw24.field.ts_pll_enable = 0;
852
853 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
854 if (err)
855 return err;
856 }
857
858 /* Set the frequency */
859 dw9.field.time_ref_freq_sel = clk_freq;
860 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
861 if (err)
862 return err;
863
864 /* Configure the TS PLL feedback divisor */
865 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
866 if (err)
867 return err;
868
869 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
870 dw19.field.tspll_ndivratio = 1;
871
872 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
873 if (err)
874 return err;
875
876 /* Configure the TS PLL post divisor */
877 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
878 if (err)
879 return err;
880
881 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
882 dw22.field.time1588clk_sel_div2 = 0;
883
884 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
885 if (err)
886 return err;
887
888 /* Configure the TS PLL pre divisor and clock source */
889 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
890 if (err)
891 return err;
892
893 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
894 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
895 dw24.field.time_ref_sel = clk_src;
896
897 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
898 if (err)
899 return err;
900
901 /* Finally, enable the PLL */
902 dw24.field.ts_pll_enable = 1;
903
904 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
905 if (err)
906 return err;
907
908 /* Wait to verify if the PLL locks */
909 usleep_range(1000, 5000);
910
911 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
912 if (err)
913 return err;
914
915 if (!bwm_lf.field.plllock_true_lock_cri) {
916 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
917 return -EBUSY;
918 }
919
920 /* Log the current clock configuration */
921 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
922 dw24.field.ts_pll_enable ? "enabled" : "disabled",
923 ice_clk_src_str(dw24.field.time_ref_sel),
924 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
925 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
926
927 return 0;
928}
929
930/**
931 * ice_init_cgu_e822 - Initialize CGU with settings from firmware
932 * @hw: pointer to the HW structure
933 *
934 * Initialize the Clock Generation Unit of the E822 device.
935 */
936static int ice_init_cgu_e822(struct ice_hw *hw)
937{
938 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
939 union tspll_cntr_bist_settings cntr_bist;
940 int err;
941
942 err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
943 &cntr_bist.val);
944 if (err)
945 return err;
946
947 /* Disable sticky lock detection so lock err reported is accurate */
948 cntr_bist.field.i_plllock_sel_0 = 0;
949 cntr_bist.field.i_plllock_sel_1 = 0;
950
951 err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
952 cntr_bist.val);
953 if (err)
954 return err;
955
956 /* Configure the CGU PLL using the parameters from the function
957 * capabilities.
958 */
959 err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
960 (enum ice_clk_src)ts_info->clk_src);
961 if (err)
962 return err;
963
964 return 0;
965}
966
967/**
968 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
969 * @hw: pointer to the HW struct
970 *
971 * Set the window length used for the vernier port calibration process.
972 */
973static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
974{
975 u8 port;
976
977 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
978 int err;
979
980 err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
981 PTP_VERNIER_WL);
982 if (err) {
983 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
984 port, err);
985 return err;
986 }
987 }
988
989 return 0;
990}
991
992/**
993 * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
994 * @hw: pointer to HW struct
995 *
996 * Perform PHC initialization steps specific to E822 devices.
997 */
998static int ice_ptp_init_phc_e822(struct ice_hw *hw)
999{
1000 int err;
1001 u32 regval;
1002
1003 /* Enable reading switch and PHY registers over the sideband queue */
1004#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
1005#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
1006 regval = rd32(hw, PF_SB_REM_DEV_CTL);
1007 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
1008 PF_SB_REM_DEV_CTL_PHY0);
1009 wr32(hw, PF_SB_REM_DEV_CTL, regval);
1010
1011 /* Initialize the Clock Generation Unit */
1012 err = ice_init_cgu_e822(hw);
1013 if (err)
1014 return err;
1015
1016 /* Set window length for all the ports */
1017 return ice_ptp_set_vernier_wl(hw);
1018}
1019
1020/**
1021 * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
1022 * @hw: pointer to the HW struct
1023 * @time: Time to initialize the PHY port clocks to
1024 *
1025 * Program the PHY port registers with a new initial time value. The port
1026 * clock will be initialized once the driver issues an INIT_TIME sync
1027 * command. The time value is the upper 32 bits of the PHY timer, usually in
1028 * units of nominal nanoseconds.
1029 */
1030static int
1031ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
1032{
1033 u64 phy_time;
1034 u8 port;
1035 int err;
1036
1037 /* The time represents the upper 32 bits of the PHY timer, so we need
1038 * to shift to account for this when programming.
1039 */
1040 phy_time = (u64)time << 32;
1041
1042 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1043 /* Tx case */
1044 err = ice_write_64b_phy_reg_e822(hw, port,
1045 P_REG_TX_TIMER_INC_PRE_L,
1046 phy_time);
1047 if (err)
1048 goto exit_err;
1049
1050 /* Rx case */
1051 err = ice_write_64b_phy_reg_e822(hw, port,
1052 P_REG_RX_TIMER_INC_PRE_L,
1053 phy_time);
1054 if (err)
1055 goto exit_err;
1056 }
1057
1058 return 0;
1059
1060exit_err:
1061 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1062 port, err);
1063
1064 return err;
1065}
1066
1067/**
1068 * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
1069 * @hw: pointer to HW struct
1070 * @port: Port number to be programmed
1071 * @time: time in cycles to adjust the port Tx and Rx clocks
1072 *
1073 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1074 * registers. The atomic adjustment won't be completed until the driver issues
1075 * an ADJ_TIME command.
1076 *
1077 * Note that time is not in units of nanoseconds. It is in clock time
1078 * including the lower sub-nanosecond portion of the port timer.
1079 *
1080 * Negative adjustments are supported using 2s complement arithmetic.
1081 */
1082int
1083ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
1084{
1085 u32 l_time, u_time;
1086 int err;
1087
1088 l_time = lower_32_bits(time);
1089 u_time = upper_32_bits(time);
1090
1091 /* Tx case */
1092 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1093 l_time);
1094 if (err)
1095 goto exit_err;
1096
1097 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1098 u_time);
1099 if (err)
1100 goto exit_err;
1101
1102 /* Rx case */
1103 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1104 l_time);
1105 if (err)
1106 goto exit_err;
1107
1108 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1109 u_time);
1110 if (err)
1111 goto exit_err;
1112
1113 return 0;
1114
1115exit_err:
1116 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1117 port, err);
1118 return err;
1119}
1120
1121/**
1122 * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
1123 * @hw: pointer to HW struct
1124 * @adj: adjustment in nanoseconds
1125 *
1126 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1127 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1128 * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
1129 */
1130static int
1131ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
1132{
1133 s64 cycles;
1134 u8 port;
1135
1136 /* The port clock supports adjustment of the sub-nanosecond portion of
1137 * the clock. We shift the provided adjustment in nanoseconds to
1138 * calculate the appropriate adjustment to program into the PHY ports.
1139 */
1140 if (adj > 0)
1141 cycles = (s64)adj << 32;
1142 else
1143 cycles = -(((s64)-adj) << 32);
1144
1145 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1146 int err;
1147
1148 err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
1149 if (err)
1150 return err;
1151 }
1152
1153 return 0;
1154}
1155
1156/**
1157 * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
1158 * @hw: pointer to HW struct
1159 * @incval: new increment value to prepare
1160 *
1161 * Prepare each of the PHY ports for a new increment value by programming the
1162 * port's TIMETUS registers. The new increment value will be updated after
1163 * issuing an INIT_INCVAL command.
1164 */
1165static int
1166ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
1167{
1168 int err;
1169 u8 port;
1170
1171 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1172 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
1173 incval);
1174 if (err)
1175 goto exit_err;
1176 }
1177
1178 return 0;
1179
1180exit_err:
1181 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1182 port, err);
1183
1184 return err;
1185}
1186
1187/**
1188 * ice_ptp_read_port_capture - Read a port's local time capture
1189 * @hw: pointer to HW struct
1190 * @port: Port number to read
1191 * @tx_ts: on return, the Tx port time capture
1192 * @rx_ts: on return, the Rx port time capture
1193 *
1194 * Read the port's Tx and Rx local time capture values.
1195 *
1196 * Note this has no equivalent for the E810 devices.
1197 */
1198static int
1199ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1200{
1201 int err;
1202
1203 /* Tx case */
1204 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1205 if (err) {
1206 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1207 err);
1208 return err;
1209 }
1210
1211 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1212 (unsigned long long)*tx_ts);
1213
1214 /* Rx case */
1215 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1216 if (err) {
1217 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1218 err);
1219 return err;
1220 }
1221
1222 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1223 (unsigned long long)*rx_ts);
1224
1225 return 0;
1226}
1227
1228/**
1229 * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
1230 * @hw: pointer to HW struct
1231 * @port: Port to which cmd has to be sent
1232 * @cmd: Command to be sent to the port
1233 *
1234 * Prepare the requested port for an upcoming timer sync command.
1235 *
1236 * Note there is no equivalent of this operation on E810, as that device
1237 * always handles all external PHYs internally.
1238 */
1239static int
1240ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
1241{
1242 u32 cmd_val, val;
1243 u8 tmr_idx;
1244 int err;
1245
1246 tmr_idx = ice_get_ptp_src_clock_index(hw);
1247 cmd_val = tmr_idx << SEL_PHY_SRC;
1248 switch (cmd) {
1249 case INIT_TIME:
1250 cmd_val |= PHY_CMD_INIT_TIME;
1251 break;
1252 case INIT_INCVAL:
1253 cmd_val |= PHY_CMD_INIT_INCVAL;
1254 break;
1255 case ADJ_TIME:
1256 cmd_val |= PHY_CMD_ADJ_TIME;
1257 break;
1258 case READ_TIME:
1259 cmd_val |= PHY_CMD_READ_TIME;
1260 break;
1261 case ADJ_TIME_AT_TIME:
1262 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1263 break;
1264 }
1265
1266 /* Tx case */
1267 /* Read, modify, write */
1268 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
1269 if (err) {
1270 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1271 err);
1272 return err;
1273 }
1274
1275 /* Modify necessary bits only and perform write */
1276 val &= ~TS_CMD_MASK;
1277 val |= cmd_val;
1278
1279 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
1280 if (err) {
1281 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1282 err);
1283 return err;
1284 }
1285
1286 /* Rx case */
1287 /* Read, modify, write */
1288 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
1289 if (err) {
1290 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1291 err);
1292 return err;
1293 }
1294
1295 /* Modify necessary bits only and perform write */
1296 val &= ~TS_CMD_MASK;
1297 val |= cmd_val;
1298
1299 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
1300 if (err) {
1301 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1302 err);
1303 return err;
1304 }
1305
1306 return 0;
1307}
1308
1309/**
1310 * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
1311 * @hw: pointer to the HW struct
1312 * @cmd: timer command to prepare
1313 *
1314 * Prepare all ports connected to this device for an upcoming timer sync
1315 * command.
1316 */
1317static int
1318ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1319{
1320 u8 port;
1321
1322 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1323 int err;
1324
1325 err = ice_ptp_one_port_cmd(hw, port, cmd);
1326 if (err)
1327 return err;
1328 }
1329
1330 return 0;
1331}
1332
1333/* E822 Vernier calibration functions
1334 *
1335 * The following functions are used as part of the vernier calibration of
1336 * a port. This calibration increases the precision of the timestamps on the
1337 * port.
1338 */
1339
1340/**
1341 * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
1342 * @hw: pointer to HW struct
1343 * @port: the port to read from
1344 * @link_out: if non-NULL, holds link speed on success
1345 * @fec_out: if non-NULL, holds FEC algorithm on success
1346 *
1347 * Read the serdes data for the PHY port and extract the link speed and FEC
1348 * algorithm.
1349 */
1350static int
1351ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
1352 enum ice_ptp_link_spd *link_out,
1353 enum ice_ptp_fec_mode *fec_out)
1354{
1355 enum ice_ptp_link_spd link;
1356 enum ice_ptp_fec_mode fec;
1357 u32 serdes;
1358 int err;
1359
1360 err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
1361 if (err) {
1362 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1363 return err;
1364 }
1365
1366 /* Determine the FEC algorithm */
1367 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1368
1369 serdes &= P_REG_LINK_SPEED_SERDES_M;
1370
1371 /* Determine the link speed */
1372 if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1373 switch (serdes) {
1374 case ICE_PTP_SERDES_25G:
1375 link = ICE_PTP_LNK_SPD_25G_RS;
1376 break;
1377 case ICE_PTP_SERDES_50G:
1378 link = ICE_PTP_LNK_SPD_50G_RS;
1379 break;
1380 case ICE_PTP_SERDES_100G:
1381 link = ICE_PTP_LNK_SPD_100G_RS;
1382 break;
1383 default:
1384 return -EIO;
1385 }
1386 } else {
1387 switch (serdes) {
1388 case ICE_PTP_SERDES_1G:
1389 link = ICE_PTP_LNK_SPD_1G;
1390 break;
1391 case ICE_PTP_SERDES_10G:
1392 link = ICE_PTP_LNK_SPD_10G;
1393 break;
1394 case ICE_PTP_SERDES_25G:
1395 link = ICE_PTP_LNK_SPD_25G;
1396 break;
1397 case ICE_PTP_SERDES_40G:
1398 link = ICE_PTP_LNK_SPD_40G;
1399 break;
1400 case ICE_PTP_SERDES_50G:
1401 link = ICE_PTP_LNK_SPD_50G;
1402 break;
1403 default:
1404 return -EIO;
1405 }
1406 }
1407
1408 if (link_out)
1409 *link_out = link;
1410 if (fec_out)
1411 *fec_out = fec;
1412
1413 return 0;
1414}
1415
1416/**
1417 * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
1418 * @hw: pointer to HW struct
1419 * @port: to configure the quad for
1420 */
1421static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
1422{
1423 enum ice_ptp_link_spd link_spd;
1424 int err;
1425 u32 val;
1426 u8 quad;
1427
1428 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
1429 if (err) {
1430 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1431 err);
1432 return;
1433 }
1434
1435 quad = port / ICE_PORTS_PER_QUAD;
1436
1437 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1438 if (err) {
1439 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1440 err);
1441 return;
1442 }
1443
1444 if (link_spd >= ICE_PTP_LNK_SPD_40G)
1445 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1446 else
1447 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1448
1449 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1450 if (err) {
1451 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1452 err);
1453 return;
1454 }
1455}
1456
1457/**
1458 * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
1459 * @hw: pointer to the HW structure
1460 * @port: the port to configure
1461 *
1462 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1463 * hardware clock time units (TUs). That is, determine the number of TUs per
1464 * serdes unit interval, and program the UIX registers with this conversion.
1465 *
1466 * This conversion is used as part of the calibration process when determining
1467 * the additional error of a timestamp vs the real time of transmission or
1468 * receipt of the packet.
1469 *
1470 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1471 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1472 *
1473 * To calculate the conversion ratio, we use the following facts:
1474 *
1475 * a) the clock frequency in Hz (cycles per second)
1476 * b) the number of TUs per cycle (the increment value of the clock)
1477 * c) 1 second per 1 billion nanoseconds
1478 * d) the duration of 66 UIs in nanoseconds
1479 *
1480 * Given these facts, we can use the following table to work out what ratios
1481 * to multiply in order to get the number of TUs per 66 UIs:
1482 *
1483 * cycles | 1 second | incval (TUs) | nanoseconds
1484 * -------+--------------+--------------+-------------
1485 * second | 1 billion ns | cycle | 66 UIs
1486 *
1487 * To perform the multiplication using integers without too much loss of
1488 * precision, we can take use the following equation:
1489 *
1490 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1491 *
1492 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1493 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1494 *
1495 * The increment value has a maximum expected range of about 34 bits, while
1496 * the frequency value is about 29 bits. Multiplying these values shouldn't
1497 * overflow the 64 bits. However, we must then further multiply them again by
1498 * the Serdes unit interval duration. To avoid overflow here, we split the
1499 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1500 * a divide by 390,625,000. This does lose some precision, but avoids
1501 * miscalculation due to arithmetic overflow.
1502 */
1503static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
1504{
1505 u64 cur_freq, clk_incval, tu_per_sec, uix;
1506 int err;
1507
1508 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1509 clk_incval = ice_ptp_read_src_incval(hw);
1510
1511 /* Calculate TUs per second divided by 256 */
1512 tu_per_sec = (cur_freq * clk_incval) >> 8;
1513
1514#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1515#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1516
1517 /* Program the 10Gb/40Gb conversion ratio */
1518 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1519
1520 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
1521 uix);
1522 if (err) {
1523 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1524 err);
1525 return err;
1526 }
1527
1528 /* Program the 25Gb/100Gb conversion ratio */
1529 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1530
1531 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
1532 uix);
1533 if (err) {
1534 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1535 err);
1536 return err;
1537 }
1538
1539 return 0;
1540}
1541
1542/**
1543 * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
1544 * @hw: pointer to the HW struct
1545 * @port: port to configure
1546 *
1547 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1548 * timestamp calibration process. This depends on the link speed, as the PHY
1549 * uses different markers depending on the speed.
1550 *
1551 * 1Gb/10Gb/25Gb:
1552 * - Tx/Rx PAR/PCS markers
1553 *
1554 * 25Gb RS:
1555 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1556 *
1557 * 40Gb/50Gb:
1558 * - Tx/Rx PAR/PCS markers
1559 * - Rx Deskew PAR/PCS markers
1560 *
1561 * 50G RS and 100GB RS:
1562 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1563 * - Rx Deskew PAR/PCS markers
1564 * - Tx PAR/PCS markers
1565 *
1566 * To calculate the conversion, we use the PHC clock frequency (cycles per
1567 * second), the increment value (TUs per cycle), and the related PHY clock
1568 * frequency to calculate the TUs per unit of the PHY link clock. The
1569 * following table shows how the units convert:
1570 *
1571 * cycles | TUs | second
1572 * -------+-------+--------
1573 * second | cycle | cycles
1574 *
1575 * For each conversion register, look up the appropriate frequency from the
1576 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1577 * this to the appropriate register, preparing hardware to perform timestamp
1578 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1579 * in order to calibrate for the internal PHY delays.
1580 *
1581 * Note that the increment value ranges up to ~34 bits, and the clock
1582 * frequency is ~29 bits, so multiplying them together should fit within the
1583 * 64 bit arithmetic.
1584 */
1585static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
1586{
1587 u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1588 enum ice_ptp_link_spd link_spd;
1589 enum ice_ptp_fec_mode fec_mode;
1590 int err;
1591
1592 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1593 if (err)
1594 return err;
1595
1596 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1597 clk_incval = ice_ptp_read_src_incval(hw);
1598
1599 /* Calculate TUs per cycle of the PHC clock */
1600 tu_per_sec = cur_freq * clk_incval;
1601
1602 /* For each PHY conversion register, look up the appropriate link
1603 * speed frequency and determine the TUs per that clock's cycle time.
1604 * Split this into a high and low value and then program the
1605 * appropriate register. If that link speed does not use the
1606 * associated register, write zeros to clear it instead.
1607 */
1608
1609 /* P_REG_PAR_TX_TUS */
1610 if (e822_vernier[link_spd].tx_par_clk)
1611 phy_tus = div_u64(tu_per_sec,
1612 e822_vernier[link_spd].tx_par_clk);
1613 else
1614 phy_tus = 0;
1615
1616 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
1617 phy_tus);
1618 if (err)
1619 return err;
1620
1621 /* P_REG_PAR_RX_TUS */
1622 if (e822_vernier[link_spd].rx_par_clk)
1623 phy_tus = div_u64(tu_per_sec,
1624 e822_vernier[link_spd].rx_par_clk);
1625 else
1626 phy_tus = 0;
1627
1628 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
1629 phy_tus);
1630 if (err)
1631 return err;
1632
1633 /* P_REG_PCS_TX_TUS */
1634 if (e822_vernier[link_spd].tx_pcs_clk)
1635 phy_tus = div_u64(tu_per_sec,
1636 e822_vernier[link_spd].tx_pcs_clk);
1637 else
1638 phy_tus = 0;
1639
1640 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
1641 phy_tus);
1642 if (err)
1643 return err;
1644
1645 /* P_REG_PCS_RX_TUS */
1646 if (e822_vernier[link_spd].rx_pcs_clk)
1647 phy_tus = div_u64(tu_per_sec,
1648 e822_vernier[link_spd].rx_pcs_clk);
1649 else
1650 phy_tus = 0;
1651
1652 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
1653 phy_tus);
1654 if (err)
1655 return err;
1656
1657 /* P_REG_DESK_PAR_TX_TUS */
1658 if (e822_vernier[link_spd].tx_desk_rsgb_par)
1659 phy_tus = div_u64(tu_per_sec,
1660 e822_vernier[link_spd].tx_desk_rsgb_par);
1661 else
1662 phy_tus = 0;
1663
1664 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1665 phy_tus);
1666 if (err)
1667 return err;
1668
1669 /* P_REG_DESK_PAR_RX_TUS */
1670 if (e822_vernier[link_spd].rx_desk_rsgb_par)
1671 phy_tus = div_u64(tu_per_sec,
1672 e822_vernier[link_spd].rx_desk_rsgb_par);
1673 else
1674 phy_tus = 0;
1675
1676 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1677 phy_tus);
1678 if (err)
1679 return err;
1680
1681 /* P_REG_DESK_PCS_TX_TUS */
1682 if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1683 phy_tus = div_u64(tu_per_sec,
1684 e822_vernier[link_spd].tx_desk_rsgb_pcs);
1685 else
1686 phy_tus = 0;
1687
1688 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1689 phy_tus);
1690 if (err)
1691 return err;
1692
1693 /* P_REG_DESK_PCS_RX_TUS */
1694 if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1695 phy_tus = div_u64(tu_per_sec,
1696 e822_vernier[link_spd].rx_desk_rsgb_pcs);
1697 else
1698 phy_tus = 0;
1699
1700 return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1701 phy_tus);
1702}
1703
1704/**
1705 * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
1706 * @hw: pointer to the HW struct
1707 * @link_spd: the Link speed to calculate for
1708 *
1709 * Calculate the fixed offset due to known static latency data.
1710 */
1711static u64
1712ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1713{
1714 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1715
1716 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1717 clk_incval = ice_ptp_read_src_incval(hw);
1718
1719 /* Calculate TUs per second */
1720 tu_per_sec = cur_freq * clk_incval;
1721
1722 /* Calculate number of TUs to add for the fixed Tx latency. Since the
1723 * latency measurement is in 1/100th of a nanosecond, we need to
1724 * multiply by tu_per_sec and then divide by 1e11. This calculation
1725 * overflows 64 bit integer arithmetic, so break it up into two
1726 * divisions by 1e4 first then by 1e7.
1727 */
1728 fixed_offset = div_u64(tu_per_sec, 10000);
1729 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1730 fixed_offset = div_u64(fixed_offset, 10000000);
1731
1732 return fixed_offset;
1733}
1734
1735/**
1736 * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
1737 * @hw: pointer to the HW struct
1738 * @port: the PHY port to configure
1739 *
1740 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1741 * adjust Tx timestamps by. This is calculated by combining some known static
1742 * latency along with the Vernier offset computations done by hardware.
1743 *
1744 * This function will not return successfully until the Tx offset calculations
1745 * have been completed, which requires waiting until at least one packet has
1746 * been transmitted by the device. It is safe to call this function
1747 * periodically until calibration succeeds, as it will only program the offset
1748 * once.
1749 *
1750 * To avoid overflow, when calculating the offset based on the known static
1751 * latency values, we use measurements in 1/100th of a nanosecond, and divide
1752 * the TUs per second up front. This avoids overflow while allowing
1753 * calculation of the adjustment using integer arithmetic.
1754 *
1755 * Returns zero on success, -EBUSY if the hardware vernier offset
1756 * calibration has not completed, or another error code on failure.
1757 */
1758int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
1759{
1760 enum ice_ptp_link_spd link_spd;
1761 enum ice_ptp_fec_mode fec_mode;
1762 u64 total_offset, val;
1763 int err;
1764 u32 reg;
1765
1766 /* Nothing to do if we've already programmed the offset */
1767 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, ®);
1768 if (err) {
1769 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
1770 port, err);
1771 return err;
1772 }
1773
1774 if (reg)
1775 return 0;
1776
1777 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, ®);
1778 if (err) {
1779 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
1780 port, err);
1781 return err;
1782 }
1783
1784 if (!(reg & P_REG_TX_OV_STATUS_OV_M))
1785 return -EBUSY;
1786
1787 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1788 if (err)
1789 return err;
1790
1791 total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1792
1793 /* Read the first Vernier offset from the PHY register and add it to
1794 * the total offset.
1795 */
1796 if (link_spd == ICE_PTP_LNK_SPD_1G ||
1797 link_spd == ICE_PTP_LNK_SPD_10G ||
1798 link_spd == ICE_PTP_LNK_SPD_25G ||
1799 link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1800 link_spd == ICE_PTP_LNK_SPD_40G ||
1801 link_spd == ICE_PTP_LNK_SPD_50G) {
1802 err = ice_read_64b_phy_reg_e822(hw, port,
1803 P_REG_PAR_PCS_TX_OFFSET_L,
1804 &val);
1805 if (err)
1806 return err;
1807
1808 total_offset += val;
1809 }
1810
1811 /* For Tx, we only need to use the second Vernier offset for
1812 * multi-lane link speeds with RS-FEC. The lanes will always be
1813 * aligned.
1814 */
1815 if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1816 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1817 err = ice_read_64b_phy_reg_e822(hw, port,
1818 P_REG_PAR_TX_TIME_L,
1819 &val);
1820 if (err)
1821 return err;
1822
1823 total_offset += val;
1824 }
1825
1826 /* Now that the total offset has been calculated, program it to the
1827 * PHY and indicate that the Tx offset is ready. After this,
1828 * timestamps will be enabled.
1829 */
1830 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1831 total_offset);
1832 if (err)
1833 return err;
1834
1835 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1836 if (err)
1837 return err;
1838
1839 dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
1840 port);
1841
1842 return 0;
1843}
1844
1845/**
1846 * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
1847 * @hw: pointer to the HW struct
1848 * @port: the PHY port to adjust for
1849 * @link_spd: the current link speed of the PHY
1850 * @fec_mode: the current FEC mode of the PHY
1851 * @pmd_adj: on return, the amount to adjust the Rx total offset by
1852 *
1853 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
1854 * This varies by link speed and FEC mode. The value calculated accounts for
1855 * various delays caused when receiving a packet.
1856 */
1857static int
1858ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
1859 enum ice_ptp_link_spd link_spd,
1860 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
1861{
1862 u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
1863 u8 pmd_align;
1864 u32 val;
1865 int err;
1866
1867 err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
1868 if (err) {
1869 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
1870 err);
1871 return err;
1872 }
1873
1874 pmd_align = (u8)val;
1875
1876 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1877 clk_incval = ice_ptp_read_src_incval(hw);
1878
1879 /* Calculate TUs per second */
1880 tu_per_sec = cur_freq * clk_incval;
1881
1882 /* The PMD alignment adjustment measurement depends on the link speed,
1883 * and whether FEC is enabled. For each link speed, the alignment
1884 * adjustment is calculated by dividing a value by the length of
1885 * a Time Unit in nanoseconds.
1886 *
1887 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
1888 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
1889 * 10G w/FEC: align * 0.1 * 32/33
1890 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
1891 * 25G w/FEC: align * 0.4 * 32/33
1892 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
1893 * 40G w/FEC: align * 0.1 * 32/33
1894 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
1895 * 50G w/FEC: align * 0.8 * 32/33
1896 *
1897 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
1898 *
1899 * To allow for calculating this value using integer arithmetic, we
1900 * instead start with the number of TUs per second, (inverse of the
1901 * length of a Time Unit in nanoseconds), multiply by a value based
1902 * on the PMD alignment register, and then divide by the right value
1903 * calculated based on the table above. To avoid integer overflow this
1904 * division is broken up into a step of dividing by 125 first.
1905 */
1906 if (link_spd == ICE_PTP_LNK_SPD_1G) {
1907 if (pmd_align == 4)
1908 mult = 10;
1909 else
1910 mult = (pmd_align + 6) % 10;
1911 } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
1912 link_spd == ICE_PTP_LNK_SPD_25G ||
1913 link_spd == ICE_PTP_LNK_SPD_40G ||
1914 link_spd == ICE_PTP_LNK_SPD_50G) {
1915 /* If Clause 74 FEC, always calculate PMD adjust */
1916 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
1917 mult = pmd_align;
1918 else
1919 mult = 0;
1920 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1921 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1922 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1923 if (pmd_align < 17)
1924 mult = pmd_align + 40;
1925 else
1926 mult = pmd_align;
1927 } else {
1928 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
1929 link_spd);
1930 mult = 0;
1931 }
1932
1933 /* In some cases, there's no need to adjust for the PMD alignment */
1934 if (!mult) {
1935 *pmd_adj = 0;
1936 return 0;
1937 }
1938
1939 /* Calculate the adjustment by multiplying TUs per second by the
1940 * appropriate multiplier and divisor. To avoid overflow, we first
1941 * divide by 125, and then handle remaining divisor based on the link
1942 * speed pmd_adj_divisor value.
1943 */
1944 adj = div_u64(tu_per_sec, 125);
1945 adj *= mult;
1946 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
1947
1948 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
1949 * cycle count is necessary.
1950 */
1951 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
1952 u64 cycle_adj;
1953 u8 rx_cycle;
1954
1955 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
1956 &val);
1957 if (err) {
1958 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
1959 err);
1960 return err;
1961 }
1962
1963 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
1964 if (rx_cycle) {
1965 mult = (4 - rx_cycle) * 40;
1966
1967 cycle_adj = div_u64(tu_per_sec, 125);
1968 cycle_adj *= mult;
1969 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1970
1971 adj += cycle_adj;
1972 }
1973 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
1974 u64 cycle_adj;
1975 u8 rx_cycle;
1976
1977 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
1978 &val);
1979 if (err) {
1980 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
1981 err);
1982 return err;
1983 }
1984
1985 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
1986 if (rx_cycle) {
1987 mult = rx_cycle * 40;
1988
1989 cycle_adj = div_u64(tu_per_sec, 125);
1990 cycle_adj *= mult;
1991 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1992
1993 adj += cycle_adj;
1994 }
1995 }
1996
1997 /* Return the calculated adjustment */
1998 *pmd_adj = adj;
1999
2000 return 0;
2001}
2002
2003/**
2004 * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
2005 * @hw: pointer to HW struct
2006 * @link_spd: The Link speed to calculate for
2007 *
2008 * Determine the fixed Rx latency for a given link speed.
2009 */
2010static u64
2011ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
2012{
2013 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
2014
2015 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
2016 clk_incval = ice_ptp_read_src_incval(hw);
2017
2018 /* Calculate TUs per second */
2019 tu_per_sec = cur_freq * clk_incval;
2020
2021 /* Calculate number of TUs to add for the fixed Rx latency. Since the
2022 * latency measurement is in 1/100th of a nanosecond, we need to
2023 * multiply by tu_per_sec and then divide by 1e11. This calculation
2024 * overflows 64 bit integer arithmetic, so break it up into two
2025 * divisions by 1e4 first then by 1e7.
2026 */
2027 fixed_offset = div_u64(tu_per_sec, 10000);
2028 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2029 fixed_offset = div_u64(fixed_offset, 10000000);
2030
2031 return fixed_offset;
2032}
2033
2034/**
2035 * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
2036 * @hw: pointer to the HW struct
2037 * @port: the PHY port to configure
2038 *
2039 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2040 * adjust Rx timestamps by. This combines calculations from the Vernier offset
2041 * measurements taken in hardware with some data about known fixed delay as
2042 * well as adjusting for multi-lane alignment delay.
2043 *
2044 * This function will not return successfully until the Rx offset calculations
2045 * have been completed, which requires waiting until at least one packet has
2046 * been received by the device. It is safe to call this function periodically
2047 * until calibration succeeds, as it will only program the offset once.
2048 *
2049 * This function must be called only after the offset registers are valid,
2050 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2051 * has measured the offset.
2052 *
2053 * To avoid overflow, when calculating the offset based on the known static
2054 * latency values, we use measurements in 1/100th of a nanosecond, and divide
2055 * the TUs per second up front. This avoids overflow while allowing
2056 * calculation of the adjustment using integer arithmetic.
2057 *
2058 * Returns zero on success, -EBUSY if the hardware vernier offset
2059 * calibration has not completed, or another error code on failure.
2060 */
2061int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
2062{
2063 enum ice_ptp_link_spd link_spd;
2064 enum ice_ptp_fec_mode fec_mode;
2065 u64 total_offset, pmd, val;
2066 int err;
2067 u32 reg;
2068
2069 /* Nothing to do if we've already programmed the offset */
2070 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, ®);
2071 if (err) {
2072 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
2073 port, err);
2074 return err;
2075 }
2076
2077 if (reg)
2078 return 0;
2079
2080 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, ®);
2081 if (err) {
2082 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2083 port, err);
2084 return err;
2085 }
2086
2087 if (!(reg & P_REG_RX_OV_STATUS_OV_M))
2088 return -EBUSY;
2089
2090 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2091 if (err)
2092 return err;
2093
2094 total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2095
2096 /* Read the first Vernier offset from the PHY register and add it to
2097 * the total offset.
2098 */
2099 err = ice_read_64b_phy_reg_e822(hw, port,
2100 P_REG_PAR_PCS_RX_OFFSET_L,
2101 &val);
2102 if (err)
2103 return err;
2104
2105 total_offset += val;
2106
2107 /* For Rx, all multi-lane link speeds include a second Vernier
2108 * calibration, because the lanes might not be aligned.
2109 */
2110 if (link_spd == ICE_PTP_LNK_SPD_40G ||
2111 link_spd == ICE_PTP_LNK_SPD_50G ||
2112 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2113 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2114 err = ice_read_64b_phy_reg_e822(hw, port,
2115 P_REG_PAR_RX_TIME_L,
2116 &val);
2117 if (err)
2118 return err;
2119
2120 total_offset += val;
2121 }
2122
2123 /* In addition, Rx must account for the PMD alignment */
2124 err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
2125 if (err)
2126 return err;
2127
2128 /* For RS-FEC, this adjustment adds delay, but for other modes, it
2129 * subtracts delay.
2130 */
2131 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2132 total_offset += pmd;
2133 else
2134 total_offset -= pmd;
2135
2136 /* Now that the total offset has been calculated, program it to the
2137 * PHY and indicate that the Rx offset is ready. After this,
2138 * timestamps will be enabled.
2139 */
2140 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2141 total_offset);
2142 if (err)
2143 return err;
2144
2145 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2146 if (err)
2147 return err;
2148
2149 dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
2150 port);
2151
2152 return 0;
2153}
2154
2155/**
2156 * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
2157 * @hw: pointer to the HW struct
2158 * @port: the PHY port to read
2159 * @phy_time: on return, the 64bit PHY timer value
2160 * @phc_time: on return, the lower 64bits of PHC time
2161 *
2162 * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
2163 * timer values.
2164 */
2165static int
2166ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
2167 u64 *phc_time)
2168{
2169 u64 tx_time, rx_time;
2170 u32 zo, lo;
2171 u8 tmr_idx;
2172 int err;
2173
2174 tmr_idx = ice_get_ptp_src_clock_index(hw);
2175
2176 /* Prepare the PHC timer for a READ_TIME capture command */
2177 ice_ptp_src_cmd(hw, READ_TIME);
2178
2179 /* Prepare the PHY timer for a READ_TIME capture command */
2180 err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
2181 if (err)
2182 return err;
2183
2184 /* Issue the sync to start the READ_TIME capture */
2185 ice_ptp_exec_tmr_cmd(hw);
2186
2187 /* Read the captured PHC time from the shadow time registers */
2188 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2189 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2190 *phc_time = (u64)lo << 32 | zo;
2191
2192 /* Read the captured PHY time from the PHY shadow registers */
2193 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2194 if (err)
2195 return err;
2196
2197 /* If the PHY Tx and Rx timers don't match, log a warning message.
2198 * Note that this should not happen in normal circumstances since the
2199 * driver always programs them together.
2200 */
2201 if (tx_time != rx_time)
2202 dev_warn(ice_hw_to_dev(hw),
2203 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2204 port, (unsigned long long)tx_time,
2205 (unsigned long long)rx_time);
2206
2207 *phy_time = tx_time;
2208
2209 return 0;
2210}
2211
2212/**
2213 * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
2214 * @hw: pointer to the HW struct
2215 * @port: the PHY port to synchronize
2216 *
2217 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2218 * This is done by issuing a READ_TIME command which triggers a simultaneous
2219 * read of the PHY timer and PHC timer. Then we use the difference to
2220 * calculate an appropriate 2s complement addition to add to the PHY timer in
2221 * order to ensure it reads the same value as the primary PHC timer.
2222 */
2223static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
2224{
2225 u64 phc_time, phy_time, difference;
2226 int err;
2227
2228 if (!ice_ptp_lock(hw)) {
2229 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2230 return -EBUSY;
2231 }
2232
2233 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2234 if (err)
2235 goto err_unlock;
2236
2237 /* Calculate the amount required to add to the port time in order for
2238 * it to match the PHC time.
2239 *
2240 * Note that the port adjustment is done using 2s complement
2241 * arithmetic. This is convenient since it means that we can simply
2242 * calculate the difference between the PHC time and the port time,
2243 * and it will be interpreted correctly.
2244 */
2245 difference = phc_time - phy_time;
2246
2247 err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
2248 if (err)
2249 goto err_unlock;
2250
2251 err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
2252 if (err)
2253 goto err_unlock;
2254
2255 /* Issue the sync to activate the time adjustment */
2256 ice_ptp_exec_tmr_cmd(hw);
2257
2258 /* Re-capture the timer values to flush the command registers and
2259 * verify that the time was properly adjusted.
2260 */
2261 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2262 if (err)
2263 goto err_unlock;
2264
2265 dev_info(ice_hw_to_dev(hw),
2266 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2267 port, (unsigned long long)phy_time,
2268 (unsigned long long)phc_time);
2269
2270 ice_ptp_unlock(hw);
2271
2272 return 0;
2273
2274err_unlock:
2275 ice_ptp_unlock(hw);
2276 return err;
2277}
2278
2279/**
2280 * ice_stop_phy_timer_e822 - Stop the PHY clock timer
2281 * @hw: pointer to the HW struct
2282 * @port: the PHY port to stop
2283 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2284 *
2285 * Stop the clock of a PHY port. This must be done as part of the flow to
2286 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2287 * initialized or when link speed changes.
2288 */
2289int
2290ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
2291{
2292 int err;
2293 u32 val;
2294
2295 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
2296 if (err)
2297 return err;
2298
2299 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
2300 if (err)
2301 return err;
2302
2303 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2304 if (err)
2305 return err;
2306
2307 val &= ~P_REG_PS_START_M;
2308 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2309 if (err)
2310 return err;
2311
2312 val &= ~P_REG_PS_ENA_CLK_M;
2313 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2314 if (err)
2315 return err;
2316
2317 if (soft_reset) {
2318 val |= P_REG_PS_SFT_RESET_M;
2319 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2320 if (err)
2321 return err;
2322 }
2323
2324 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2325
2326 return 0;
2327}
2328
2329/**
2330 * ice_start_phy_timer_e822 - Start the PHY clock timer
2331 * @hw: pointer to the HW struct
2332 * @port: the PHY port to start
2333 *
2334 * Start the clock of a PHY port. This must be done as part of the flow to
2335 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2336 * initialized or when link speed changes.
2337 *
2338 * Hardware will take Vernier measurements on Tx or Rx of packets.
2339 */
2340int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
2341{
2342 u32 lo, hi, val;
2343 u64 incval;
2344 u8 tmr_idx;
2345 int err;
2346
2347 tmr_idx = ice_get_ptp_src_clock_index(hw);
2348
2349 err = ice_stop_phy_timer_e822(hw, port, false);
2350 if (err)
2351 return err;
2352
2353 ice_phy_cfg_lane_e822(hw, port);
2354
2355 err = ice_phy_cfg_uix_e822(hw, port);
2356 if (err)
2357 return err;
2358
2359 err = ice_phy_cfg_parpcs_e822(hw, port);
2360 if (err)
2361 return err;
2362
2363 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2364 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2365 incval = (u64)hi << 32 | lo;
2366
2367 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
2368 if (err)
2369 return err;
2370
2371 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2372 if (err)
2373 return err;
2374
2375 ice_ptp_exec_tmr_cmd(hw);
2376
2377 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2378 if (err)
2379 return err;
2380
2381 val |= P_REG_PS_SFT_RESET_M;
2382 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2383 if (err)
2384 return err;
2385
2386 val |= P_REG_PS_START_M;
2387 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2388 if (err)
2389 return err;
2390
2391 val &= ~P_REG_PS_SFT_RESET_M;
2392 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2393 if (err)
2394 return err;
2395
2396 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2397 if (err)
2398 return err;
2399
2400 ice_ptp_exec_tmr_cmd(hw);
2401
2402 val |= P_REG_PS_ENA_CLK_M;
2403 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2404 if (err)
2405 return err;
2406
2407 val |= P_REG_PS_LOAD_OFFSET_M;
2408 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2409 if (err)
2410 return err;
2411
2412 ice_ptp_exec_tmr_cmd(hw);
2413
2414 err = ice_sync_phy_timer_e822(hw, port);
2415 if (err)
2416 return err;
2417
2418 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2419
2420 return 0;
2421}
2422
2423/**
2424 * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
2425 * @hw: pointer to the HW struct
2426 * @quad: the timestamp quad to read from
2427 * @tstamp_ready: contents of the Tx memory status register
2428 *
2429 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
2430 * the PHY are ready. A set bit means the corresponding timestamp is valid and
2431 * ready to be captured from the PHY timestamp block.
2432 */
2433static int
2434ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
2435{
2436 u32 hi, lo;
2437 int err;
2438
2439 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
2440 if (err) {
2441 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
2442 quad, err);
2443 return err;
2444 }
2445
2446 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
2447 if (err) {
2448 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
2449 quad, err);
2450 return err;
2451 }
2452
2453 *tstamp_ready = (u64)hi << 32 | (u64)lo;
2454
2455 return 0;
2456}
2457
2458/* E810 functions
2459 *
2460 * The following functions operate on the E810 series devices which use
2461 * a separate external PHY.
2462 */
2463
2464/**
2465 * ice_read_phy_reg_e810 - Read register from external PHY on E810
2466 * @hw: pointer to the HW struct
2467 * @addr: the address to read from
2468 * @val: On return, the value read from the PHY
2469 *
2470 * Read a register from the external PHY on the E810 device.
2471 */
2472static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2473{
2474 struct ice_sbq_msg_input msg = {0};
2475 int err;
2476
2477 msg.msg_addr_low = lower_16_bits(addr);
2478 msg.msg_addr_high = upper_16_bits(addr);
2479 msg.opcode = ice_sbq_msg_rd;
2480 msg.dest_dev = rmn_0;
2481
2482 err = ice_sbq_rw_reg(hw, &msg);
2483 if (err) {
2484 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2485 err);
2486 return err;
2487 }
2488
2489 *val = msg.data;
2490
2491 return 0;
2492}
2493
2494/**
2495 * ice_write_phy_reg_e810 - Write register on external PHY on E810
2496 * @hw: pointer to the HW struct
2497 * @addr: the address to writem to
2498 * @val: the value to write to the PHY
2499 *
2500 * Write a value to a register of the external PHY on the E810 device.
2501 */
2502static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2503{
2504 struct ice_sbq_msg_input msg = {0};
2505 int err;
2506
2507 msg.msg_addr_low = lower_16_bits(addr);
2508 msg.msg_addr_high = upper_16_bits(addr);
2509 msg.opcode = ice_sbq_msg_wr;
2510 msg.dest_dev = rmn_0;
2511 msg.data = val;
2512
2513 err = ice_sbq_rw_reg(hw, &msg);
2514 if (err) {
2515 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2516 err);
2517 return err;
2518 }
2519
2520 return 0;
2521}
2522
2523/**
2524 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2525 * @hw: pointer to the HW struct
2526 * @idx: the timestamp index to read
2527 * @hi: 8 bit timestamp high value
2528 * @lo: 32 bit timestamp low value
2529 *
2530 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2531 * timestamp block of the external PHY on the E810 device using the low latency
2532 * timestamp read.
2533 */
2534static int
2535ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2536{
2537 u32 val;
2538 u8 i;
2539
2540 /* Write TS index to read to the PF register so the FW can read it */
2541 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2542 wr32(hw, PF_SB_ATQBAL, val);
2543
2544 /* Read the register repeatedly until the FW provides us the TS */
2545 for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2546 val = rd32(hw, PF_SB_ATQBAL);
2547
2548 /* When the bit is cleared, the TS is ready in the register */
2549 if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2550 /* High 8 bit value of the TS is on the bits 16:23 */
2551 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2552
2553 /* Read the low 32 bit value and set the TS valid bit */
2554 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2555 return 0;
2556 }
2557
2558 udelay(10);
2559 }
2560
2561 /* FW failed to provide the TS in time */
2562 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2563 return -EINVAL;
2564}
2565
2566/**
2567 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2568 * @hw: pointer to the HW struct
2569 * @lport: the lport to read from
2570 * @idx: the timestamp index to read
2571 * @hi: 8 bit timestamp high value
2572 * @lo: 32 bit timestamp low value
2573 *
2574 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2575 * timestamp block of the external PHY on the E810 device using sideband queue.
2576 */
2577static int
2578ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2579 u32 *lo)
2580{
2581 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2582 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2583 u32 lo_val, hi_val;
2584 int err;
2585
2586 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2587 if (err) {
2588 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2589 err);
2590 return err;
2591 }
2592
2593 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2594 if (err) {
2595 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2596 err);
2597 return err;
2598 }
2599
2600 *lo = lo_val;
2601 *hi = (u8)hi_val;
2602
2603 return 0;
2604}
2605
2606/**
2607 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2608 * @hw: pointer to the HW struct
2609 * @lport: the lport to read from
2610 * @idx: the timestamp index to read
2611 * @tstamp: on return, the 40bit timestamp value
2612 *
2613 * Read a 40bit timestamp value out of the timestamp block of the external PHY
2614 * on the E810 device.
2615 */
2616static int
2617ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2618{
2619 u32 lo = 0;
2620 u8 hi = 0;
2621 int err;
2622
2623 if (hw->dev_caps.ts_dev_info.ts_ll_read)
2624 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2625 else
2626 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2627
2628 if (err)
2629 return err;
2630
2631 /* For E810 devices, the timestamp is reported with the lower 32 bits
2632 * in the low register, and the upper 8 bits in the high register.
2633 */
2634 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2635
2636 return 0;
2637}
2638
2639/**
2640 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2641 * @hw: pointer to the HW struct
2642 * @lport: the lport to read from
2643 * @idx: the timestamp index to reset
2644 *
2645 * Clear a timestamp, resetting its valid bit, from the timestamp block of the
2646 * external PHY on the E810 device.
2647 */
2648static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2649{
2650 u32 lo_addr, hi_addr;
2651 int err;
2652
2653 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2654 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2655
2656 err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2657 if (err) {
2658 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
2659 err);
2660 return err;
2661 }
2662
2663 err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2664 if (err) {
2665 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
2666 err);
2667 return err;
2668 }
2669
2670 return 0;
2671}
2672
2673/**
2674 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2675 * @hw: pointer to HW struct
2676 *
2677 * Enable the timesync PTP functionality for the external PHY connected to
2678 * this function.
2679 */
2680int ice_ptp_init_phy_e810(struct ice_hw *hw)
2681{
2682 u8 tmr_idx;
2683 int err;
2684
2685 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2686 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2687 GLTSYN_ENA_TSYN_ENA_M);
2688 if (err)
2689 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2690 err);
2691
2692 return err;
2693}
2694
2695/**
2696 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2697 * @hw: pointer to HW struct
2698 *
2699 * Perform E810-specific PTP hardware clock initialization steps.
2700 */
2701static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2702{
2703 /* Ensure synchronization delay is zero */
2704 wr32(hw, GLTSYN_SYNC_DLAY, 0);
2705
2706 /* Initialize the PHY */
2707 return ice_ptp_init_phy_e810(hw);
2708}
2709
2710/**
2711 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2712 * @hw: Board private structure
2713 * @time: Time to initialize the PHY port clock to
2714 *
2715 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2716 * initial clock time. The time will not actually be programmed until the
2717 * driver issues an INIT_TIME command.
2718 *
2719 * The time value is the upper 32 bits of the PHY timer, usually in units of
2720 * nominal nanoseconds.
2721 */
2722static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2723{
2724 u8 tmr_idx;
2725 int err;
2726
2727 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2728 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2729 if (err) {
2730 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2731 err);
2732 return err;
2733 }
2734
2735 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2736 if (err) {
2737 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2738 err);
2739 return err;
2740 }
2741
2742 return 0;
2743}
2744
2745/**
2746 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2747 * @hw: pointer to HW struct
2748 * @adj: adjustment value to program
2749 *
2750 * Prepare the PHY port for an atomic adjustment by programming the PHY
2751 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2752 * is completed by issuing an ADJ_TIME sync command.
2753 *
2754 * The adjustment value only contains the portion used for the upper 32bits of
2755 * the PHY timer, usually in units of nominal nanoseconds. Negative
2756 * adjustments are supported using 2s complement arithmetic.
2757 */
2758static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2759{
2760 u8 tmr_idx;
2761 int err;
2762
2763 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2764
2765 /* Adjustments are represented as signed 2's complement values in
2766 * nanoseconds. Sub-nanosecond adjustment is not supported.
2767 */
2768 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2769 if (err) {
2770 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2771 err);
2772 return err;
2773 }
2774
2775 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2776 if (err) {
2777 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2778 err);
2779 return err;
2780 }
2781
2782 return 0;
2783}
2784
2785/**
2786 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2787 * @hw: pointer to HW struct
2788 * @incval: The new 40bit increment value to prepare
2789 *
2790 * Prepare the PHY port for a new increment value by programming the PHY
2791 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2792 * completed by issuing an INIT_INCVAL command.
2793 */
2794static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2795{
2796 u32 high, low;
2797 u8 tmr_idx;
2798 int err;
2799
2800 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2801 low = lower_32_bits(incval);
2802 high = upper_32_bits(incval);
2803
2804 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2805 if (err) {
2806 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2807 err);
2808 return err;
2809 }
2810
2811 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
2812 if (err) {
2813 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
2814 err);
2815 return err;
2816 }
2817
2818 return 0;
2819}
2820
2821/**
2822 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
2823 * @hw: pointer to HW struct
2824 * @cmd: Command to be sent to the port
2825 *
2826 * Prepare the external PHYs connected to this device for a timer sync
2827 * command.
2828 */
2829static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2830{
2831 u32 cmd_val, val;
2832 int err;
2833
2834 switch (cmd) {
2835 case INIT_TIME:
2836 cmd_val = GLTSYN_CMD_INIT_TIME;
2837 break;
2838 case INIT_INCVAL:
2839 cmd_val = GLTSYN_CMD_INIT_INCVAL;
2840 break;
2841 case ADJ_TIME:
2842 cmd_val = GLTSYN_CMD_ADJ_TIME;
2843 break;
2844 case READ_TIME:
2845 cmd_val = GLTSYN_CMD_READ_TIME;
2846 break;
2847 case ADJ_TIME_AT_TIME:
2848 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
2849 break;
2850 }
2851
2852 /* Read, modify, write */
2853 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
2854 if (err) {
2855 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
2856 return err;
2857 }
2858
2859 /* Modify necessary bits only and perform write */
2860 val &= ~TS_CMD_MASK_E810;
2861 val |= cmd_val;
2862
2863 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
2864 if (err) {
2865 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
2866 return err;
2867 }
2868
2869 return 0;
2870}
2871
2872/* Device agnostic functions
2873 *
2874 * The following functions implement shared behavior common to both E822 and
2875 * E810 devices, possibly calling a device specific implementation where
2876 * necessary.
2877 */
2878
2879/**
2880 * ice_ptp_lock - Acquire PTP global semaphore register lock
2881 * @hw: pointer to the HW struct
2882 *
2883 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
2884 * was acquired, false otherwise.
2885 *
2886 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
2887 * value. If software sees the busy bit cleared, this means that this function
2888 * acquired the lock (and the busy bit is now set). If software sees the busy
2889 * bit set, it means that another function acquired the lock.
2890 *
2891 * Software must clear the busy bit with a write to release the lock for other
2892 * functions when done.
2893 */
2894bool ice_ptp_lock(struct ice_hw *hw)
2895{
2896 u32 hw_lock;
2897 int i;
2898
2899#define MAX_TRIES 15
2900
2901 for (i = 0; i < MAX_TRIES; i++) {
2902 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2903 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
2904 if (hw_lock) {
2905 /* Somebody is holding the lock */
2906 usleep_range(5000, 6000);
2907 continue;
2908 }
2909
2910 break;
2911 }
2912
2913 return !hw_lock;
2914}
2915
2916/**
2917 * ice_ptp_unlock - Release PTP global semaphore register lock
2918 * @hw: pointer to the HW struct
2919 *
2920 * Release the global PTP hardware semaphore lock. This is done by writing to
2921 * the PFTSYN_SEM register.
2922 */
2923void ice_ptp_unlock(struct ice_hw *hw)
2924{
2925 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
2926}
2927
2928/**
2929 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
2930 * @hw: pointer to HW struct
2931 * @cmd: the command to issue
2932 *
2933 * Prepare the source timer and PHY timers and then trigger the requested
2934 * command. This causes the shadow registers previously written in preparation
2935 * for the command to be synchronously applied to both the source and PHY
2936 * timers.
2937 */
2938static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2939{
2940 int err;
2941
2942 /* First, prepare the source timer */
2943 ice_ptp_src_cmd(hw, cmd);
2944
2945 /* Next, prepare the ports */
2946 if (ice_is_e810(hw))
2947 err = ice_ptp_port_cmd_e810(hw, cmd);
2948 else
2949 err = ice_ptp_port_cmd_e822(hw, cmd);
2950 if (err) {
2951 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
2952 cmd, err);
2953 return err;
2954 }
2955
2956 /* Write the sync command register to drive both source and PHY timer
2957 * commands synchronously
2958 */
2959 ice_ptp_exec_tmr_cmd(hw);
2960
2961 return 0;
2962}
2963
2964/**
2965 * ice_ptp_init_time - Initialize device time to provided value
2966 * @hw: pointer to HW struct
2967 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
2968 *
2969 * Initialize the device to the specified time provided. This requires a three
2970 * step process:
2971 *
2972 * 1) write the new init time to the source timer shadow registers
2973 * 2) write the new init time to the PHY timer shadow registers
2974 * 3) issue an init_time timer command to synchronously switch both the source
2975 * and port timers to the new init time value at the next clock cycle.
2976 */
2977int ice_ptp_init_time(struct ice_hw *hw, u64 time)
2978{
2979 u8 tmr_idx;
2980 int err;
2981
2982 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2983
2984 /* Source timers */
2985 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
2986 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
2987 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
2988
2989 /* PHY timers */
2990 /* Fill Rx and Tx ports and send msg to PHY */
2991 if (ice_is_e810(hw))
2992 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
2993 else
2994 err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
2995 if (err)
2996 return err;
2997
2998 return ice_ptp_tmr_cmd(hw, INIT_TIME);
2999}
3000
3001/**
3002 * ice_ptp_write_incval - Program PHC with new increment value
3003 * @hw: pointer to HW struct
3004 * @incval: Source timer increment value per clock cycle
3005 *
3006 * Program the PHC with a new increment value. This requires a three-step
3007 * process:
3008 *
3009 * 1) Write the increment value to the source timer shadow registers
3010 * 2) Write the increment value to the PHY timer shadow registers
3011 * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
3012 * source and port timers to the new increment value at the next clock
3013 * cycle.
3014 */
3015int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3016{
3017 u8 tmr_idx;
3018 int err;
3019
3020 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3021
3022 /* Shadow Adjust */
3023 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3024 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3025
3026 if (ice_is_e810(hw))
3027 err = ice_ptp_prep_phy_incval_e810(hw, incval);
3028 else
3029 err = ice_ptp_prep_phy_incval_e822(hw, incval);
3030 if (err)
3031 return err;
3032
3033 return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
3034}
3035
3036/**
3037 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3038 * @hw: pointer to HW struct
3039 * @incval: Source timer increment value per clock cycle
3040 *
3041 * Program a new PHC incval while holding the PTP semaphore.
3042 */
3043int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3044{
3045 int err;
3046
3047 if (!ice_ptp_lock(hw))
3048 return -EBUSY;
3049
3050 err = ice_ptp_write_incval(hw, incval);
3051
3052 ice_ptp_unlock(hw);
3053
3054 return err;
3055}
3056
3057/**
3058 * ice_ptp_adj_clock - Adjust PHC clock time atomically
3059 * @hw: pointer to HW struct
3060 * @adj: Adjustment in nanoseconds
3061 *
3062 * Perform an atomic adjustment of the PHC time by the specified number of
3063 * nanoseconds. This requires a three-step process:
3064 *
3065 * 1) Write the adjustment to the source timer shadow registers
3066 * 2) Write the adjustment to the PHY timer shadow registers
3067 * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
3068 * both the source and port timers at the next clock cycle.
3069 */
3070int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3071{
3072 u8 tmr_idx;
3073 int err;
3074
3075 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3076
3077 /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3078 * For an ADJ_TIME command, this set of registers represents the value
3079 * to add to the clock time. It supports subtraction by interpreting
3080 * the value as a 2's complement integer.
3081 */
3082 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3083 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3084
3085 if (ice_is_e810(hw))
3086 err = ice_ptp_prep_phy_adj_e810(hw, adj);
3087 else
3088 err = ice_ptp_prep_phy_adj_e822(hw, adj);
3089 if (err)
3090 return err;
3091
3092 return ice_ptp_tmr_cmd(hw, ADJ_TIME);
3093}
3094
3095/**
3096 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3097 * @hw: pointer to the HW struct
3098 * @block: the block to read from
3099 * @idx: the timestamp index to read
3100 * @tstamp: on return, the 40bit timestamp value
3101 *
3102 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3103 * the block is the quad to read from. For E810 devices, the block is the
3104 * logical port to read from.
3105 */
3106int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3107{
3108 if (ice_is_e810(hw))
3109 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3110 else
3111 return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
3112}
3113
3114/**
3115 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3116 * @hw: pointer to the HW struct
3117 * @block: the block to read from
3118 * @idx: the timestamp index to reset
3119 *
3120 * Clear a timestamp, resetting its valid bit, from the timestamp block. For
3121 * E822 devices, the block is the quad to clear from. For E810 devices, the
3122 * block is the logical port to clear from.
3123 */
3124int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3125{
3126 if (ice_is_e810(hw))
3127 return ice_clear_phy_tstamp_e810(hw, block, idx);
3128 else
3129 return ice_clear_phy_tstamp_e822(hw, block, idx);
3130}
3131
3132/**
3133 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
3134 * @hw: pointer to the HW struct
3135 * @port: the PHY port to read
3136 * @tstamp_ready: contents of the Tx memory status register
3137 *
3138 * E810 devices do not use a Tx memory status register. Instead simply
3139 * indicate that all timestamps are currently ready.
3140 */
3141static int
3142ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
3143{
3144 *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
3145 return 0;
3146}
3147
3148/* E810T SMA functions
3149 *
3150 * The following functions operate specifically on E810T hardware and are used
3151 * to access the extended GPIOs available.
3152 */
3153
3154/**
3155 * ice_get_pca9575_handle
3156 * @hw: pointer to the hw struct
3157 * @pca9575_handle: GPIO controller's handle
3158 *
3159 * Find and return the GPIO controller's handle in the netlist.
3160 * When found - the value will be cached in the hw structure and following calls
3161 * will return cached value
3162 */
3163static int
3164ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3165{
3166 struct ice_aqc_get_link_topo *cmd;
3167 struct ice_aq_desc desc;
3168 int status;
3169 u8 idx;
3170
3171 /* If handle was read previously return cached value */
3172 if (hw->io_expander_handle) {
3173 *pca9575_handle = hw->io_expander_handle;
3174 return 0;
3175 }
3176
3177 /* If handle was not detected read it from the netlist */
3178 cmd = &desc.params.get_link_topo;
3179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3180
3181 /* Set node type to GPIO controller */
3182 cmd->addr.topo_params.node_type_ctx =
3183 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3184 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3185
3186#define SW_PCA9575_SFP_TOPO_IDX 2
3187#define SW_PCA9575_QSFP_TOPO_IDX 1
3188
3189 /* Check if the SW IO expander controlling SMA exists in the netlist. */
3190 if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3191 idx = SW_PCA9575_SFP_TOPO_IDX;
3192 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3193 idx = SW_PCA9575_QSFP_TOPO_IDX;
3194 else
3195 return -EOPNOTSUPP;
3196
3197 cmd->addr.topo_params.index = idx;
3198
3199 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3200 if (status)
3201 return -EOPNOTSUPP;
3202
3203 /* Verify if we found the right IO expander type */
3204 if (desc.params.get_link_topo.node_part_num !=
3205 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3206 return -EOPNOTSUPP;
3207
3208 /* If present save the handle and return it */
3209 hw->io_expander_handle =
3210 le16_to_cpu(desc.params.get_link_topo.addr.handle);
3211 *pca9575_handle = hw->io_expander_handle;
3212
3213 return 0;
3214}
3215
3216/**
3217 * ice_read_sma_ctrl_e810t
3218 * @hw: pointer to the hw struct
3219 * @data: pointer to data to be read from the GPIO controller
3220 *
3221 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3222 * PCA9575 expander, so only bits 3-7 in data are valid.
3223 */
3224int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3225{
3226 int status;
3227 u16 handle;
3228 u8 i;
3229
3230 status = ice_get_pca9575_handle(hw, &handle);
3231 if (status)
3232 return status;
3233
3234 *data = 0;
3235
3236 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3237 bool pin;
3238
3239 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3240 &pin, NULL);
3241 if (status)
3242 break;
3243 *data |= (u8)(!pin) << i;
3244 }
3245
3246 return status;
3247}
3248
3249/**
3250 * ice_write_sma_ctrl_e810t
3251 * @hw: pointer to the hw struct
3252 * @data: data to be written to the GPIO controller
3253 *
3254 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3255 * of the PCA9575 expander, so only bits 3-7 in data are valid.
3256 */
3257int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3258{
3259 int status;
3260 u16 handle;
3261 u8 i;
3262
3263 status = ice_get_pca9575_handle(hw, &handle);
3264 if (status)
3265 return status;
3266
3267 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3268 bool pin;
3269
3270 pin = !(data & (1 << i));
3271 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3272 pin, NULL);
3273 if (status)
3274 break;
3275 }
3276
3277 return status;
3278}
3279
3280/**
3281 * ice_read_pca9575_reg_e810t
3282 * @hw: pointer to the hw struct
3283 * @offset: GPIO controller register offset
3284 * @data: pointer to data to be read from the GPIO controller
3285 *
3286 * Read the register from the GPIO controller
3287 */
3288int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3289{
3290 struct ice_aqc_link_topo_addr link_topo;
3291 __le16 addr;
3292 u16 handle;
3293 int err;
3294
3295 memset(&link_topo, 0, sizeof(link_topo));
3296
3297 err = ice_get_pca9575_handle(hw, &handle);
3298 if (err)
3299 return err;
3300
3301 link_topo.handle = cpu_to_le16(handle);
3302 link_topo.topo_params.node_type_ctx =
3303 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3304 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3305
3306 addr = cpu_to_le16((u16)offset);
3307
3308 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3309}
3310
3311/**
3312 * ice_is_pca9575_present
3313 * @hw: pointer to the hw struct
3314 *
3315 * Check if the SW IO expander is present in the netlist
3316 */
3317bool ice_is_pca9575_present(struct ice_hw *hw)
3318{
3319 u16 handle = 0;
3320 int status;
3321
3322 if (!ice_is_e810t(hw))
3323 return false;
3324
3325 status = ice_get_pca9575_handle(hw, &handle);
3326
3327 return !status && handle;
3328}
3329
3330/**
3331 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
3332 * @hw: pointer to the HW struct
3333 */
3334void ice_ptp_reset_ts_memory(struct ice_hw *hw)
3335{
3336 if (ice_is_e810(hw))
3337 return;
3338
3339 ice_ptp_reset_ts_memory_e822(hw);
3340}
3341
3342/**
3343 * ice_ptp_init_phc - Initialize PTP hardware clock
3344 * @hw: pointer to the HW struct
3345 *
3346 * Perform the steps required to initialize the PTP hardware clock.
3347 */
3348int ice_ptp_init_phc(struct ice_hw *hw)
3349{
3350 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3351
3352 /* Enable source clocks */
3353 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3354
3355 /* Clear event err indications for auxiliary pins */
3356 (void)rd32(hw, GLTSYN_STAT(src_idx));
3357
3358 if (ice_is_e810(hw))
3359 return ice_ptp_init_phc_e810(hw);
3360 else
3361 return ice_ptp_init_phc_e822(hw);
3362}
3363
3364/**
3365 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
3366 * @hw: pointer to the HW struct
3367 * @block: the timestamp block to check
3368 * @tstamp_ready: storage for the PHY Tx memory status information
3369 *
3370 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
3371 * which indicates which timestamps in the block may be captured. A set bit
3372 * means the timestamp can be read. An unset bit means the timestamp is not
3373 * ready and software should avoid reading the register.
3374 */
3375int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
3376{
3377 if (ice_is_e810(hw))
3378 return ice_get_phy_tx_tstamp_ready_e810(hw, block,
3379 tstamp_ready);
3380 else
3381 return ice_get_phy_tx_tstamp_ready_e822(hw, block,
3382 tstamp_ready);
3383}