Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2/* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#include <linux/types.h>
8#include "qed.h"
9#include "qed_dev_api.h"
10#include "qed_hw.h"
11#include "qed_l2.h"
12#include "qed_mcp.h"
13#include "qed_ptp.h"
14#include "qed_reg_addr.h"
15
16/* 16 nano second time quantas to wait before making a Drift adjustment */
17#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0
18/* Nano seconds to add/subtract when making a Drift adjustment */
19#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28
20/* Add/subtract the Adjustment_Value when making a Drift adjustment */
21#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
22#define QED_TIMESTAMP_MASK BIT(16)
23/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
24#define QED_PTP_UCAST_PARAM_MASK 0x70F
25
26static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
27{
28 switch (MFW_PORT(p_hwfn)) {
29 case 0:
30 return QED_RESC_LOCK_PTP_PORT0;
31 case 1:
32 return QED_RESC_LOCK_PTP_PORT1;
33 case 2:
34 return QED_RESC_LOCK_PTP_PORT2;
35 case 3:
36 return QED_RESC_LOCK_PTP_PORT3;
37 default:
38 return QED_RESC_LOCK_RESC_INVALID;
39 }
40}
41
42static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
43{
44 struct qed_resc_lock_params params;
45 enum qed_resc_lock resource;
46 int rc;
47
48 resource = qed_ptcdev_to_resc(p_hwfn);
49 if (resource == QED_RESC_LOCK_RESC_INVALID)
50 return -EINVAL;
51
52 qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true);
53
54 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms);
55 if (rc && rc != -EINVAL) {
56 return rc;
57 } else if (rc == -EINVAL) {
58 /* MFW doesn't support resource locking, first PF on the port
59 * has lock ownership.
60 */
61 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
62 return 0;
63
64 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
65 return -EBUSY;
66 } else if (!params.b_granted) {
67 DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
68 return -EBUSY;
69 }
70
71 return 0;
72}
73
74static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
75{
76 struct qed_resc_unlock_params params;
77 enum qed_resc_lock resource;
78 int rc;
79
80 resource = qed_ptcdev_to_resc(p_hwfn);
81 if (resource == QED_RESC_LOCK_RESC_INVALID)
82 return -EINVAL;
83
84 qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true);
85
86 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms);
87 if (rc == -EINVAL) {
88 /* MFW doesn't support locking, first PF has lock ownership */
89 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
90 rc = 0;
91 } else {
92 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
93 return -EINVAL;
94 }
95 } else if (rc) {
96 DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
97 }
98
99 return rc;
100}
101
102/* Read Rx timestamp */
103static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
104{
105 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
106 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
107 u32 val;
108
109 *timestamp = 0;
110 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
111 if (!(val & QED_TIMESTAMP_MASK)) {
112 DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
113 return -EINVAL;
114 }
115
116 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
117 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
118 *timestamp <<= 32;
119 *timestamp |= val;
120
121 /* Reset timestamp register to allow new timestamp */
122 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
123 QED_TIMESTAMP_MASK);
124
125 return 0;
126}
127
128/* Read Tx timestamp */
129static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
130{
131 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
132 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
133 u32 val;
134
135 *timestamp = 0;
136 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
137 if (!(val & QED_TIMESTAMP_MASK)) {
138 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
139 "Invalid Tx timestamp, buf_seqid = %08x\n", val);
140 return -EINVAL;
141 }
142
143 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
144 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
145 *timestamp <<= 32;
146 *timestamp |= val;
147
148 /* Reset timestamp register to allow new timestamp */
149 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
150
151 return 0;
152}
153
154/* Read Phy Hardware Clock */
155static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
156{
157 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
158 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
159 u32 temp = 0;
160
161 temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
162 *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
163 *phc_cycles <<= 32;
164 *phc_cycles |= temp;
165
166 return 0;
167}
168
169/* Filter PTP protocol packets that need to be timestamped */
170static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
171 enum qed_ptp_filter_type rx_type,
172 enum qed_ptp_hwtstamp_tx_type tx_type)
173{
174 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
175 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
176 u32 rule_mask, enable_cfg = 0x0;
177
178 switch (rx_type) {
179 case QED_PTP_FILTER_NONE:
180 enable_cfg = 0x0;
181 rule_mask = 0x3FFF;
182 break;
183 case QED_PTP_FILTER_ALL:
184 enable_cfg = 0x7;
185 rule_mask = 0x3CAA;
186 break;
187 case QED_PTP_FILTER_V1_L4_EVENT:
188 enable_cfg = 0x3;
189 rule_mask = 0x3FFA;
190 break;
191 case QED_PTP_FILTER_V1_L4_GEN:
192 enable_cfg = 0x3;
193 rule_mask = 0x3FFE;
194 break;
195 case QED_PTP_FILTER_V2_L4_EVENT:
196 enable_cfg = 0x5;
197 rule_mask = 0x3FAA;
198 break;
199 case QED_PTP_FILTER_V2_L4_GEN:
200 enable_cfg = 0x5;
201 rule_mask = 0x3FEE;
202 break;
203 case QED_PTP_FILTER_V2_L2_EVENT:
204 enable_cfg = 0x5;
205 rule_mask = 0x3CFF;
206 break;
207 case QED_PTP_FILTER_V2_L2_GEN:
208 enable_cfg = 0x5;
209 rule_mask = 0x3EFF;
210 break;
211 case QED_PTP_FILTER_V2_EVENT:
212 enable_cfg = 0x5;
213 rule_mask = 0x3CAA;
214 break;
215 case QED_PTP_FILTER_V2_GEN:
216 enable_cfg = 0x5;
217 rule_mask = 0x3EEE;
218 break;
219 default:
220 DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
221 return -EINVAL;
222 }
223
224 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
225 QED_PTP_UCAST_PARAM_MASK);
226 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
227 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
228
229 if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
230 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
231 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
232 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
233 } else {
234 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
235 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
236 QED_PTP_UCAST_PARAM_MASK);
237 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
238 }
239
240 /* Reset possibly old timestamps */
241 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
242 QED_TIMESTAMP_MASK);
243
244 return 0;
245}
246
247/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
248 * FW/HW accepts the adjustment value in terms of 3 parameters:
249 * Drift period - adjustment happens once in certain number of nano seconds.
250 * Drift value - time is adjusted by a certain value, for example by 5 ns.
251 * Drift direction - add or subtract the adjustment value.
252 * The routine translates ppb into the adjustment triplet in an optimal manner.
253 */
254static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
255{
256 s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
257 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
258 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
259 u32 drift_ctr_cfg = 0, drift_state;
260 int drift_dir = 1;
261
262 if (ppb < 0) {
263 ppb = -ppb;
264 drift_dir = 0;
265 }
266
267 if (ppb > 1) {
268 s64 best_dif = ppb, best_approx_dev = 1;
269
270 /* Adjustment value is up to +/-7ns, find an optimal value in
271 * this range.
272 */
273 for (val = 7; val > 0; val--) {
274 period = div_s64(val * 1000000000, ppb);
275 period -= 8;
276 period >>= 4;
277 if (period < 1)
278 period = 1;
279 if (period > 0xFFFFFFE)
280 period = 0xFFFFFFE;
281
282 /* Check both rounding ends for approximate error */
283 approx_dev = period * 16 + 8;
284 dif = ppb * approx_dev - val * 1000000000;
285 dif2 = dif + 16 * ppb;
286
287 if (dif < 0)
288 dif = -dif;
289 if (dif2 < 0)
290 dif2 = -dif2;
291
292 /* Determine which end gives better approximation */
293 if (dif * (approx_dev + 16) > dif2 * approx_dev) {
294 period++;
295 approx_dev += 16;
296 dif = dif2;
297 }
298
299 /* Track best approximation found so far */
300 if (best_dif * approx_dev > dif * best_approx_dev) {
301 best_dif = dif;
302 best_val = val;
303 best_period = period;
304 best_approx_dev = approx_dev;
305 }
306 }
307 } else if (ppb == 1) {
308 /* This is a special case as its the only value which wouldn't
309 * fit in a s64 variable. In order to prevent castings simple
310 * handle it seperately.
311 */
312 best_val = 4;
313 best_period = 0xee6b27f;
314 } else {
315 best_val = 0;
316 best_period = 0xFFFFFFF;
317 }
318
319 drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
320 (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
321 (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
322
323 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
324
325 drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
326 if (drift_state & 1) {
327 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
328 drift_ctr_cfg);
329 } else {
330 DP_INFO(p_hwfn, "Drift counter is not reset\n");
331 return -EINVAL;
332 }
333
334 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
335
336 return 0;
337}
338
339static int qed_ptp_hw_enable(struct qed_dev *cdev)
340{
341 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
342 struct qed_ptt *p_ptt;
343 int rc;
344
345 p_ptt = qed_ptt_acquire(p_hwfn);
346 if (!p_ptt) {
347 DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
348 return -EBUSY;
349 }
350
351 p_hwfn->p_ptp_ptt = p_ptt;
352
353 rc = qed_ptp_res_lock(p_hwfn, p_ptt);
354 if (rc) {
355 DP_INFO(p_hwfn,
356 "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
357 qed_ptt_release(p_hwfn, p_ptt);
358 p_hwfn->p_ptp_ptt = NULL;
359 return rc;
360 }
361
362 /* Reset PTP event detection rules - will be configured in the IOCTL */
363 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
364 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
365 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
366 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
367
368 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
369 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
370
371 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
372
373 /* Pause free running counter */
374 if (QED_IS_BB_B0(p_hwfn->cdev))
375 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
376 if (QED_IS_AH(p_hwfn->cdev))
377 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
378
379 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
380 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
381 /* Resume free running counter */
382 if (QED_IS_BB_B0(p_hwfn->cdev))
383 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
384 if (QED_IS_AH(p_hwfn->cdev)) {
385 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
386 qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
387 }
388
389 /* Disable drift register */
390 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
391 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
392
393 /* Reset possibly old timestamps */
394 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
395 QED_TIMESTAMP_MASK);
396 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
397
398 return 0;
399}
400
401static int qed_ptp_hw_disable(struct qed_dev *cdev)
402{
403 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
404 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
405
406 qed_ptp_res_unlock(p_hwfn, p_ptt);
407
408 /* Reset PTP event detection rules */
409 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
410 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
411
412 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
413 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
414
415 /* Disable the PTP feature */
416 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
417 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
418
419 qed_ptt_release(p_hwfn, p_ptt);
420 p_hwfn->p_ptp_ptt = NULL;
421
422 return 0;
423}
424
425const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
426 .cfg_filters = qed_ptp_hw_cfg_filters,
427 .read_rx_ts = qed_ptp_hw_read_rx_ts,
428 .read_tx_ts = qed_ptp_hw_read_tx_ts,
429 .read_cc = qed_ptp_hw_read_cc,
430 .adjfreq = qed_ptp_hw_adjfreq,
431 .disable = qed_ptp_hw_disable,
432 .enable = qed_ptp_hw_enable,
433};
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include "qed.h"
34#include "qed_dev_api.h"
35#include "qed_hw.h"
36#include "qed_l2.h"
37#include "qed_mcp.h"
38#include "qed_reg_addr.h"
39
40/* 16 nano second time quantas to wait before making a Drift adjustment */
41#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0
42/* Nano seconds to add/subtract when making a Drift adjustment */
43#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28
44/* Add/subtract the Adjustment_Value when making a Drift adjustment */
45#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
46#define QED_TIMESTAMP_MASK BIT(16)
47
48static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
49{
50 switch (qed_device_get_port_id(p_hwfn->cdev)) {
51 case 0:
52 return QED_RESC_LOCK_PTP_PORT0;
53 case 1:
54 return QED_RESC_LOCK_PTP_PORT1;
55 case 2:
56 return QED_RESC_LOCK_PTP_PORT2;
57 case 3:
58 return QED_RESC_LOCK_PTP_PORT3;
59 default:
60 return QED_RESC_LOCK_RESC_INVALID;
61 }
62}
63
64static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
65{
66 struct qed_resc_lock_params params;
67 enum qed_resc_lock resource;
68 int rc;
69
70 resource = qed_ptcdev_to_resc(p_hwfn);
71 if (resource == QED_RESC_LOCK_RESC_INVALID)
72 return -EINVAL;
73
74 qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true);
75
76 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms);
77 if (rc && rc != -EINVAL) {
78 return rc;
79 } else if (rc == -EINVAL) {
80 /* MFW doesn't support resource locking, first PF on the port
81 * has lock ownership.
82 */
83 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
84 return 0;
85
86 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
87 return -EBUSY;
88 } else if (!rc && !params.b_granted) {
89 DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
90 return -EBUSY;
91 }
92
93 return rc;
94}
95
96static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
97{
98 struct qed_resc_unlock_params params;
99 enum qed_resc_lock resource;
100 int rc;
101
102 resource = qed_ptcdev_to_resc(p_hwfn);
103 if (resource == QED_RESC_LOCK_RESC_INVALID)
104 return -EINVAL;
105
106 qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true);
107
108 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms);
109 if (rc == -EINVAL) {
110 /* MFW doesn't support locking, first PF has lock ownership */
111 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
112 rc = 0;
113 } else {
114 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
115 return -EINVAL;
116 }
117 } else if (rc) {
118 DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
119 }
120
121 return rc;
122}
123
124/* Read Rx timestamp */
125static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
126{
127 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
128 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
129 u32 val;
130
131 *timestamp = 0;
132 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
133 if (!(val & QED_TIMESTAMP_MASK)) {
134 DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
135 return -EINVAL;
136 }
137
138 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
139 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
140 *timestamp <<= 32;
141 *timestamp |= val;
142
143 /* Reset timestamp register to allow new timestamp */
144 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
145 QED_TIMESTAMP_MASK);
146
147 return 0;
148}
149
150/* Read Tx timestamp */
151static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
152{
153 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
154 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
155 u32 val;
156
157 *timestamp = 0;
158 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
159 if (!(val & QED_TIMESTAMP_MASK)) {
160 DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
161 return -EINVAL;
162 }
163
164 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
165 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
166 *timestamp <<= 32;
167 *timestamp |= val;
168
169 /* Reset timestamp register to allow new timestamp */
170 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
171
172 return 0;
173}
174
175/* Read Phy Hardware Clock */
176static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
177{
178 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
179 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
180 u32 temp = 0;
181
182 temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
183 *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
184 *phc_cycles <<= 32;
185 *phc_cycles |= temp;
186
187 return 0;
188}
189
190/* Filter PTP protocol packets that need to be timestamped */
191static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
192 enum qed_ptp_filter_type rx_type,
193 enum qed_ptp_hwtstamp_tx_type tx_type)
194{
195 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
196 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
197 u32 rule_mask, enable_cfg = 0x0;
198
199 switch (rx_type) {
200 case QED_PTP_FILTER_NONE:
201 enable_cfg = 0x0;
202 rule_mask = 0x3FFF;
203 break;
204 case QED_PTP_FILTER_ALL:
205 enable_cfg = 0x7;
206 rule_mask = 0x3CAA;
207 break;
208 case QED_PTP_FILTER_V1_L4_EVENT:
209 enable_cfg = 0x3;
210 rule_mask = 0x3FFA;
211 break;
212 case QED_PTP_FILTER_V1_L4_GEN:
213 enable_cfg = 0x3;
214 rule_mask = 0x3FFE;
215 break;
216 case QED_PTP_FILTER_V2_L4_EVENT:
217 enable_cfg = 0x5;
218 rule_mask = 0x3FAA;
219 break;
220 case QED_PTP_FILTER_V2_L4_GEN:
221 enable_cfg = 0x5;
222 rule_mask = 0x3FEE;
223 break;
224 case QED_PTP_FILTER_V2_L2_EVENT:
225 enable_cfg = 0x5;
226 rule_mask = 0x3CFF;
227 break;
228 case QED_PTP_FILTER_V2_L2_GEN:
229 enable_cfg = 0x5;
230 rule_mask = 0x3EFF;
231 break;
232 case QED_PTP_FILTER_V2_EVENT:
233 enable_cfg = 0x5;
234 rule_mask = 0x3CAA;
235 break;
236 case QED_PTP_FILTER_V2_GEN:
237 enable_cfg = 0x5;
238 rule_mask = 0x3EEE;
239 break;
240 default:
241 DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
242 return -EINVAL;
243 }
244
245 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0);
246 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
247 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
248
249 if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
250 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
251 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
252 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
253 } else {
254 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
255 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0);
256 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
257 }
258
259 /* Reset possibly old timestamps */
260 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
261 QED_TIMESTAMP_MASK);
262
263 return 0;
264}
265
266/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
267 * FW/HW accepts the adjustment value in terms of 3 parameters:
268 * Drift period - adjustment happens once in certain number of nano seconds.
269 * Drift value - time is adjusted by a certain value, for example by 5 ns.
270 * Drift direction - add or subtract the adjustment value.
271 * The routine translates ppb into the adjustment triplet in an optimal manner.
272 */
273static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
274{
275 s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
276 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
277 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
278 u32 drift_ctr_cfg = 0, drift_state;
279 int drift_dir = 1;
280
281 if (ppb < 0) {
282 ppb = -ppb;
283 drift_dir = 0;
284 }
285
286 if (ppb > 1) {
287 s64 best_dif = ppb, best_approx_dev = 1;
288
289 /* Adjustment value is up to +/-7ns, find an optimal value in
290 * this range.
291 */
292 for (val = 7; val > 0; val--) {
293 period = div_s64(val * 1000000000, ppb);
294 period -= 8;
295 period >>= 4;
296 if (period < 1)
297 period = 1;
298 if (period > 0xFFFFFFE)
299 period = 0xFFFFFFE;
300
301 /* Check both rounding ends for approximate error */
302 approx_dev = period * 16 + 8;
303 dif = ppb * approx_dev - val * 1000000000;
304 dif2 = dif + 16 * ppb;
305
306 if (dif < 0)
307 dif = -dif;
308 if (dif2 < 0)
309 dif2 = -dif2;
310
311 /* Determine which end gives better approximation */
312 if (dif * (approx_dev + 16) > dif2 * approx_dev) {
313 period++;
314 approx_dev += 16;
315 dif = dif2;
316 }
317
318 /* Track best approximation found so far */
319 if (best_dif * approx_dev > dif * best_approx_dev) {
320 best_dif = dif;
321 best_val = val;
322 best_period = period;
323 best_approx_dev = approx_dev;
324 }
325 }
326 } else if (ppb == 1) {
327 /* This is a special case as its the only value which wouldn't
328 * fit in a s64 variable. In order to prevent castings simple
329 * handle it seperately.
330 */
331 best_val = 4;
332 best_period = 0xee6b27f;
333 } else {
334 best_val = 0;
335 best_period = 0xFFFFFFF;
336 }
337
338 drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
339 (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
340 (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
341
342 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
343
344 drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
345 if (drift_state & 1) {
346 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
347 drift_ctr_cfg);
348 } else {
349 DP_INFO(p_hwfn, "Drift counter is not reset\n");
350 return -EINVAL;
351 }
352
353 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
354
355 return 0;
356}
357
358static int qed_ptp_hw_enable(struct qed_dev *cdev)
359{
360 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
361 struct qed_ptt *p_ptt;
362 int rc;
363
364 p_ptt = qed_ptt_acquire(p_hwfn);
365 if (!p_ptt) {
366 DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
367 return -EBUSY;
368 }
369
370 p_hwfn->p_ptp_ptt = p_ptt;
371
372 rc = qed_ptp_res_lock(p_hwfn, p_ptt);
373 if (rc) {
374 DP_INFO(p_hwfn,
375 "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
376 qed_ptt_release(p_hwfn, p_ptt);
377 p_hwfn->p_ptp_ptt = NULL;
378 return rc;
379 }
380
381 /* Reset PTP event detection rules - will be configured in the IOCTL */
382 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
383 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
384 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
385 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
386
387 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
388 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
389
390 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
391
392 /* Pause free running counter */
393 if (QED_IS_BB_B0(p_hwfn->cdev))
394 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
395 if (QED_IS_AH(p_hwfn->cdev))
396 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
397
398 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
399 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
400 /* Resume free running counter */
401 if (QED_IS_BB_B0(p_hwfn->cdev))
402 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
403 if (QED_IS_AH(p_hwfn->cdev)) {
404 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
405 qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
406 }
407
408 /* Disable drift register */
409 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
410 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
411
412 /* Reset possibly old timestamps */
413 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
414 QED_TIMESTAMP_MASK);
415 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
416
417 return 0;
418}
419
420static int qed_ptp_hw_disable(struct qed_dev *cdev)
421{
422 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
423 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
424
425 qed_ptp_res_unlock(p_hwfn, p_ptt);
426
427 /* Reset PTP event detection rules */
428 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
429 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
430
431 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
432 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
433
434 /* Disable the PTP feature */
435 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
436 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
437
438 qed_ptt_release(p_hwfn, p_ptt);
439 p_hwfn->p_ptp_ptt = NULL;
440
441 return 0;
442}
443
444const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
445 .cfg_filters = qed_ptp_hw_cfg_filters,
446 .read_rx_ts = qed_ptp_hw_read_rx_ts,
447 .read_tx_ts = qed_ptp_hw_read_tx_ts,
448 .read_cc = qed_ptp_hw_read_cc,
449 .adjfreq = qed_ptp_hw_adjfreq,
450 .disable = qed_ptp_hw_disable,
451 .enable = qed_ptp_hw_enable,
452};