Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2/* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#include "qede_ptp.h"
8#define QEDE_PTP_TX_TIMEOUT (2 * HZ)
9
10struct qede_ptp {
11 const struct qed_eth_ptp_ops *ops;
12 struct ptp_clock_info clock_info;
13 struct cyclecounter cc;
14 struct timecounter tc;
15 struct ptp_clock *clock;
16 struct work_struct work;
17 unsigned long ptp_tx_start;
18 struct qede_dev *edev;
19 struct sk_buff *tx_skb;
20
21 /* ptp spinlock is used for protecting the cycle/time counter fields
22 * and, also for serializing the qed PTP API invocations.
23 */
24 spinlock_t lock;
25 bool hw_ts_ioctl_called;
26 u16 tx_type;
27 u16 rx_filter;
28};
29
30/**
31 * qede_ptp_adjfine() - Adjust the frequency of the PTP cycle counter.
32 *
33 * @info: The PTP clock info structure.
34 * @scaled_ppm: Scaled parts per million adjustment from base.
35 *
36 * Scaled parts per million is ppm with a 16-bit binary fractional field.
37 *
38 * Return: Zero on success, negative errno otherwise.
39 */
40static int qede_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
41{
42 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
43 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
44 struct qede_dev *edev = ptp->edev;
45 int rc;
46
47 __qede_lock(edev);
48 if (edev->state == QEDE_STATE_OPEN) {
49 spin_lock_bh(&ptp->lock);
50 rc = ptp->ops->adjfreq(edev->cdev, ppb);
51 spin_unlock_bh(&ptp->lock);
52 } else {
53 DP_ERR(edev, "PTP adjfine called while interface is down\n");
54 rc = -EFAULT;
55 }
56 __qede_unlock(edev);
57
58 return rc;
59}
60
61static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
62{
63 struct qede_dev *edev;
64 struct qede_ptp *ptp;
65
66 ptp = container_of(info, struct qede_ptp, clock_info);
67 edev = ptp->edev;
68
69 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
70 delta);
71
72 spin_lock_bh(&ptp->lock);
73 timecounter_adjtime(&ptp->tc, delta);
74 spin_unlock_bh(&ptp->lock);
75
76 return 0;
77}
78
79static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
80{
81 struct qede_dev *edev;
82 struct qede_ptp *ptp;
83 u64 ns;
84
85 ptp = container_of(info, struct qede_ptp, clock_info);
86 edev = ptp->edev;
87
88 spin_lock_bh(&ptp->lock);
89 ns = timecounter_read(&ptp->tc);
90 spin_unlock_bh(&ptp->lock);
91
92 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
93
94 *ts = ns_to_timespec64(ns);
95
96 return 0;
97}
98
99static int qede_ptp_settime(struct ptp_clock_info *info,
100 const struct timespec64 *ts)
101{
102 struct qede_dev *edev;
103 struct qede_ptp *ptp;
104 u64 ns;
105
106 ptp = container_of(info, struct qede_ptp, clock_info);
107 edev = ptp->edev;
108
109 ns = timespec64_to_ns(ts);
110
111 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
112
113 /* Re-init the timecounter */
114 spin_lock_bh(&ptp->lock);
115 timecounter_init(&ptp->tc, &ptp->cc, ns);
116 spin_unlock_bh(&ptp->lock);
117
118 return 0;
119}
120
121/* Enable (or disable) ancillary features of the phc subsystem */
122static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
123 struct ptp_clock_request *rq,
124 int on)
125{
126 struct qede_dev *edev;
127 struct qede_ptp *ptp;
128
129 ptp = container_of(info, struct qede_ptp, clock_info);
130 edev = ptp->edev;
131
132 DP_ERR(edev, "PHC ancillary features are not supported\n");
133
134 return -ENOTSUPP;
135}
136
137static void qede_ptp_task(struct work_struct *work)
138{
139 struct skb_shared_hwtstamps shhwtstamps;
140 struct qede_dev *edev;
141 struct qede_ptp *ptp;
142 u64 timestamp, ns;
143 bool timedout;
144 int rc;
145
146 ptp = container_of(work, struct qede_ptp, work);
147 edev = ptp->edev;
148 timedout = time_is_before_jiffies(ptp->ptp_tx_start +
149 QEDE_PTP_TX_TIMEOUT);
150
151 /* Read Tx timestamp registers */
152 spin_lock_bh(&ptp->lock);
153 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp);
154 spin_unlock_bh(&ptp->lock);
155 if (rc) {
156 if (unlikely(timedout)) {
157 DP_INFO(edev, "Tx timestamp is not recorded\n");
158 dev_kfree_skb_any(ptp->tx_skb);
159 ptp->tx_skb = NULL;
160 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
161 &edev->flags);
162 edev->ptp_skip_txts++;
163 } else {
164 /* Reschedule to keep checking for a valid TS value */
165 schedule_work(&ptp->work);
166 }
167 return;
168 }
169
170 ns = timecounter_cyc2time(&ptp->tc, timestamp);
171 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
172 shhwtstamps.hwtstamp = ns_to_ktime(ns);
173 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
174 dev_kfree_skb_any(ptp->tx_skb);
175 ptp->tx_skb = NULL;
176 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
177
178 DP_VERBOSE(edev, QED_MSG_DEBUG,
179 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
180 timestamp, ns);
181}
182
183/* Read the PHC. This API is invoked with ptp_lock held. */
184static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
185{
186 struct qede_dev *edev;
187 struct qede_ptp *ptp;
188 u64 phc_cycles;
189 int rc;
190
191 ptp = container_of(cc, struct qede_ptp, cc);
192 edev = ptp->edev;
193 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
194 if (rc)
195 WARN_ONCE(1, "PHC read err %d\n", rc);
196
197 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
198
199 return phc_cycles;
200}
201
202static int qede_ptp_cfg_filters(struct qede_dev *edev)
203{
204 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
205 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
206 struct qede_ptp *ptp = edev->ptp;
207
208 if (!ptp)
209 return -EIO;
210
211 if (!ptp->hw_ts_ioctl_called) {
212 DP_INFO(edev, "TS IOCTL not called\n");
213 return 0;
214 }
215
216 switch (ptp->tx_type) {
217 case HWTSTAMP_TX_ON:
218 set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
219 tx_type = QED_PTP_HWTSTAMP_TX_ON;
220 break;
221
222 case HWTSTAMP_TX_OFF:
223 clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
224 tx_type = QED_PTP_HWTSTAMP_TX_OFF;
225 break;
226
227 case HWTSTAMP_TX_ONESTEP_SYNC:
228 case HWTSTAMP_TX_ONESTEP_P2P:
229 DP_ERR(edev, "One-step timestamping is not supported\n");
230 return -ERANGE;
231 }
232
233 spin_lock_bh(&ptp->lock);
234 switch (ptp->rx_filter) {
235 case HWTSTAMP_FILTER_NONE:
236 rx_filter = QED_PTP_FILTER_NONE;
237 break;
238 case HWTSTAMP_FILTER_ALL:
239 case HWTSTAMP_FILTER_SOME:
240 case HWTSTAMP_FILTER_NTP_ALL:
241 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
242 rx_filter = QED_PTP_FILTER_ALL;
243 break;
244 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
245 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
246 rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
247 break;
248 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
249 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
250 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
251 /* Initialize PTP detection for UDP/IPv4 events */
252 rx_filter = QED_PTP_FILTER_V1_L4_GEN;
253 break;
254 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
255 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
256 rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
257 break;
258 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
259 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
260 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
261 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
262 rx_filter = QED_PTP_FILTER_V2_L4_GEN;
263 break;
264 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
265 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
266 rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
267 break;
268 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
269 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
270 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
271 /* Initialize PTP detection L2 events */
272 rx_filter = QED_PTP_FILTER_V2_L2_GEN;
273 break;
274 case HWTSTAMP_FILTER_PTP_V2_EVENT:
275 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
276 rx_filter = QED_PTP_FILTER_V2_EVENT;
277 break;
278 case HWTSTAMP_FILTER_PTP_V2_SYNC:
279 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
280 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
281 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
282 rx_filter = QED_PTP_FILTER_V2_GEN;
283 break;
284 }
285
286 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
287
288 spin_unlock_bh(&ptp->lock);
289
290 return 0;
291}
292
293int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
294{
295 struct hwtstamp_config config;
296 struct qede_ptp *ptp;
297 int rc;
298
299 ptp = edev->ptp;
300 if (!ptp)
301 return -EIO;
302
303 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
304 return -EFAULT;
305
306 DP_VERBOSE(edev, QED_MSG_DEBUG,
307 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
308 config.tx_type, config.rx_filter);
309
310 ptp->hw_ts_ioctl_called = 1;
311 ptp->tx_type = config.tx_type;
312 ptp->rx_filter = config.rx_filter;
313
314 rc = qede_ptp_cfg_filters(edev);
315 if (rc)
316 return rc;
317
318 config.rx_filter = ptp->rx_filter;
319
320 return copy_to_user(ifr->ifr_data, &config,
321 sizeof(config)) ? -EFAULT : 0;
322}
323
324int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
325{
326 struct qede_ptp *ptp = edev->ptp;
327
328 if (!ptp) {
329 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
330
331 return 0;
332 }
333
334 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
335 SOF_TIMESTAMPING_TX_HARDWARE |
336 SOF_TIMESTAMPING_RX_HARDWARE |
337 SOF_TIMESTAMPING_RAW_HARDWARE;
338
339 if (ptp->clock)
340 info->phc_index = ptp_clock_index(ptp->clock);
341
342 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
343 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
344 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
345 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
346 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
347 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
348 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
349 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
350 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
351 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
352 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
353 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
354 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
355
356 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
357
358 return 0;
359}
360
361void qede_ptp_disable(struct qede_dev *edev)
362{
363 struct qede_ptp *ptp;
364
365 ptp = edev->ptp;
366 if (!ptp)
367 return;
368
369 if (ptp->clock) {
370 ptp_clock_unregister(ptp->clock);
371 ptp->clock = NULL;
372 }
373
374 /* Cancel PTP work queue. Should be done after the Tx queues are
375 * drained to prevent additional scheduling.
376 */
377 cancel_work_sync(&ptp->work);
378 if (ptp->tx_skb) {
379 dev_kfree_skb_any(ptp->tx_skb);
380 ptp->tx_skb = NULL;
381 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
382 }
383
384 /* Disable PTP in HW */
385 spin_lock_bh(&ptp->lock);
386 ptp->ops->disable(edev->cdev);
387 spin_unlock_bh(&ptp->lock);
388
389 kfree(ptp);
390 edev->ptp = NULL;
391}
392
393static int qede_ptp_init(struct qede_dev *edev)
394{
395 struct qede_ptp *ptp;
396 int rc;
397
398 ptp = edev->ptp;
399 if (!ptp)
400 return -EINVAL;
401
402 spin_lock_init(&ptp->lock);
403
404 /* Configure PTP in HW */
405 rc = ptp->ops->enable(edev->cdev);
406 if (rc) {
407 DP_INFO(edev, "PTP HW enable failed\n");
408 return rc;
409 }
410
411 /* Init work queue for Tx timestamping */
412 INIT_WORK(&ptp->work, qede_ptp_task);
413
414 /* Init cyclecounter and timecounter */
415 memset(&ptp->cc, 0, sizeof(ptp->cc));
416 ptp->cc.read = qede_ptp_read_cc;
417 ptp->cc.mask = CYCLECOUNTER_MASK(64);
418 ptp->cc.shift = 0;
419 ptp->cc.mult = 1;
420
421 timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
422
423 return 0;
424}
425
426int qede_ptp_enable(struct qede_dev *edev)
427{
428 struct qede_ptp *ptp;
429 int rc;
430
431 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
432 if (!ptp) {
433 DP_INFO(edev, "Failed to allocate struct for PTP\n");
434 return -ENOMEM;
435 }
436
437 ptp->edev = edev;
438 ptp->ops = edev->ops->ptp;
439 if (!ptp->ops) {
440 DP_INFO(edev, "PTP enable failed\n");
441 rc = -EIO;
442 goto err1;
443 }
444
445 edev->ptp = ptp;
446
447 rc = qede_ptp_init(edev);
448 if (rc)
449 goto err1;
450
451 qede_ptp_cfg_filters(edev);
452
453 /* Fill the ptp_clock_info struct and register PTP clock */
454 ptp->clock_info.owner = THIS_MODULE;
455 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
456 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
457 ptp->clock_info.n_alarm = 0;
458 ptp->clock_info.n_ext_ts = 0;
459 ptp->clock_info.n_per_out = 0;
460 ptp->clock_info.pps = 0;
461 ptp->clock_info.adjfine = qede_ptp_adjfine;
462 ptp->clock_info.adjtime = qede_ptp_adjtime;
463 ptp->clock_info.gettime64 = qede_ptp_gettime;
464 ptp->clock_info.settime64 = qede_ptp_settime;
465 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
466
467 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
468 if (IS_ERR(ptp->clock)) {
469 DP_ERR(edev, "PTP clock registration failed\n");
470 qede_ptp_disable(edev);
471 rc = -EINVAL;
472 goto err2;
473 }
474
475 return 0;
476
477err1:
478 kfree(ptp);
479err2:
480 edev->ptp = NULL;
481
482 return rc;
483}
484
485void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
486{
487 struct qede_ptp *ptp;
488
489 ptp = edev->ptp;
490 if (!ptp)
491 return;
492
493 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
494 &edev->flags)) {
495 DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
496 edev->ptp_skip_txts++;
497 return;
498 }
499
500 if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
501 DP_VERBOSE(edev, QED_MSG_DEBUG,
502 "Tx timestamping was not enabled, this pkt will not be timestamped\n");
503 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
504 edev->ptp_skip_txts++;
505 } else if (unlikely(ptp->tx_skb)) {
506 DP_VERBOSE(edev, QED_MSG_DEBUG,
507 "Device supports a single outstanding pkt to ts, It will not be ts\n");
508 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
509 edev->ptp_skip_txts++;
510 } else {
511 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
512 /* schedule check for Tx timestamp */
513 ptp->tx_skb = skb_get(skb);
514 ptp->ptp_tx_start = jiffies;
515 schedule_work(&ptp->work);
516 }
517}
518
519void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
520{
521 struct qede_ptp *ptp;
522 u64 timestamp, ns;
523 int rc;
524
525 ptp = edev->ptp;
526 if (!ptp)
527 return;
528
529 spin_lock_bh(&ptp->lock);
530 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp);
531 if (rc) {
532 spin_unlock_bh(&ptp->lock);
533 DP_INFO(edev, "Invalid Rx timestamp\n");
534 return;
535 }
536
537 ns = timecounter_cyc2time(&ptp->tc, timestamp);
538 spin_unlock_bh(&ptp->lock);
539 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
540 DP_VERBOSE(edev, QED_MSG_DEBUG,
541 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
542 timestamp, ns);
543}
1/* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "qede_ptp.h"
33
34struct qede_ptp {
35 const struct qed_eth_ptp_ops *ops;
36 struct ptp_clock_info clock_info;
37 struct cyclecounter cc;
38 struct timecounter tc;
39 struct ptp_clock *clock;
40 struct work_struct work;
41 struct qede_dev *edev;
42 struct sk_buff *tx_skb;
43
44 /* ptp spinlock is used for protecting the cycle/time counter fields
45 * and, also for serializing the qed PTP API invocations.
46 */
47 spinlock_t lock;
48 bool hw_ts_ioctl_called;
49 u16 tx_type;
50 u16 rx_filter;
51};
52
53/**
54 * qede_ptp_adjfreq
55 * @ptp: the ptp clock structure
56 * @ppb: parts per billion adjustment from base
57 *
58 * Adjust the frequency of the ptp cycle counter by the
59 * indicated ppb from the base frequency.
60 */
61static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
62{
63 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
64 struct qede_dev *edev = ptp->edev;
65 int rc;
66
67 __qede_lock(edev);
68 if (edev->state == QEDE_STATE_OPEN) {
69 spin_lock_bh(&ptp->lock);
70 rc = ptp->ops->adjfreq(edev->cdev, ppb);
71 spin_unlock_bh(&ptp->lock);
72 } else {
73 DP_ERR(edev, "PTP adjfreq called while interface is down\n");
74 rc = -EFAULT;
75 }
76 __qede_unlock(edev);
77
78 return rc;
79}
80
81static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
82{
83 struct qede_dev *edev;
84 struct qede_ptp *ptp;
85
86 ptp = container_of(info, struct qede_ptp, clock_info);
87 edev = ptp->edev;
88
89 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
90 delta);
91
92 spin_lock_bh(&ptp->lock);
93 timecounter_adjtime(&ptp->tc, delta);
94 spin_unlock_bh(&ptp->lock);
95
96 return 0;
97}
98
99static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
100{
101 struct qede_dev *edev;
102 struct qede_ptp *ptp;
103 u64 ns;
104
105 ptp = container_of(info, struct qede_ptp, clock_info);
106 edev = ptp->edev;
107
108 spin_lock_bh(&ptp->lock);
109 ns = timecounter_read(&ptp->tc);
110 spin_unlock_bh(&ptp->lock);
111
112 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
113
114 *ts = ns_to_timespec64(ns);
115
116 return 0;
117}
118
119static int qede_ptp_settime(struct ptp_clock_info *info,
120 const struct timespec64 *ts)
121{
122 struct qede_dev *edev;
123 struct qede_ptp *ptp;
124 u64 ns;
125
126 ptp = container_of(info, struct qede_ptp, clock_info);
127 edev = ptp->edev;
128
129 ns = timespec64_to_ns(ts);
130
131 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
132
133 /* Re-init the timecounter */
134 spin_lock_bh(&ptp->lock);
135 timecounter_init(&ptp->tc, &ptp->cc, ns);
136 spin_unlock_bh(&ptp->lock);
137
138 return 0;
139}
140
141/* Enable (or disable) ancillary features of the phc subsystem */
142static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
143 struct ptp_clock_request *rq,
144 int on)
145{
146 struct qede_dev *edev;
147 struct qede_ptp *ptp;
148
149 ptp = container_of(info, struct qede_ptp, clock_info);
150 edev = ptp->edev;
151
152 DP_ERR(edev, "PHC ancillary features are not supported\n");
153
154 return -ENOTSUPP;
155}
156
157static void qede_ptp_task(struct work_struct *work)
158{
159 struct skb_shared_hwtstamps shhwtstamps;
160 struct qede_dev *edev;
161 struct qede_ptp *ptp;
162 u64 timestamp, ns;
163 int rc;
164
165 ptp = container_of(work, struct qede_ptp, work);
166 edev = ptp->edev;
167
168 /* Read Tx timestamp registers */
169 spin_lock_bh(&ptp->lock);
170 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp);
171 spin_unlock_bh(&ptp->lock);
172 if (rc) {
173 /* Reschedule to keep checking for a valid timestamp value */
174 schedule_work(&ptp->work);
175 return;
176 }
177
178 ns = timecounter_cyc2time(&ptp->tc, timestamp);
179 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
180 shhwtstamps.hwtstamp = ns_to_ktime(ns);
181 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
182 dev_kfree_skb_any(ptp->tx_skb);
183 ptp->tx_skb = NULL;
184 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
185
186 DP_VERBOSE(edev, QED_MSG_DEBUG,
187 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
188 timestamp, ns);
189}
190
191/* Read the PHC. This API is invoked with ptp_lock held. */
192static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
193{
194 struct qede_dev *edev;
195 struct qede_ptp *ptp;
196 u64 phc_cycles;
197 int rc;
198
199 ptp = container_of(cc, struct qede_ptp, cc);
200 edev = ptp->edev;
201 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
202 if (rc)
203 WARN_ONCE(1, "PHC read err %d\n", rc);
204
205 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
206
207 return phc_cycles;
208}
209
210static int qede_ptp_cfg_filters(struct qede_dev *edev)
211{
212 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
213 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
214 struct qede_ptp *ptp = edev->ptp;
215
216 if (!ptp)
217 return -EIO;
218
219 if (!ptp->hw_ts_ioctl_called) {
220 DP_INFO(edev, "TS IOCTL not called\n");
221 return 0;
222 }
223
224 switch (ptp->tx_type) {
225 case HWTSTAMP_TX_ON:
226 edev->flags |= QEDE_TX_TIMESTAMPING_EN;
227 tx_type = QED_PTP_HWTSTAMP_TX_ON;
228 break;
229
230 case HWTSTAMP_TX_OFF:
231 edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
232 tx_type = QED_PTP_HWTSTAMP_TX_OFF;
233 break;
234
235 case HWTSTAMP_TX_ONESTEP_SYNC:
236 DP_ERR(edev, "One-step timestamping is not supported\n");
237 return -ERANGE;
238 }
239
240 spin_lock_bh(&ptp->lock);
241 switch (ptp->rx_filter) {
242 case HWTSTAMP_FILTER_NONE:
243 rx_filter = QED_PTP_FILTER_NONE;
244 break;
245 case HWTSTAMP_FILTER_ALL:
246 case HWTSTAMP_FILTER_SOME:
247 case HWTSTAMP_FILTER_NTP_ALL:
248 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
249 rx_filter = QED_PTP_FILTER_ALL;
250 break;
251 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
252 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
253 rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
254 break;
255 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
256 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
257 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
258 /* Initialize PTP detection for UDP/IPv4 events */
259 rx_filter = QED_PTP_FILTER_V1_L4_GEN;
260 break;
261 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
262 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
263 rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
264 break;
265 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
266 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
267 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
268 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
269 rx_filter = QED_PTP_FILTER_V2_L4_GEN;
270 break;
271 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
272 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
273 rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
274 break;
275 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
276 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
277 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
278 /* Initialize PTP detection L2 events */
279 rx_filter = QED_PTP_FILTER_V2_L2_GEN;
280 break;
281 case HWTSTAMP_FILTER_PTP_V2_EVENT:
282 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
283 rx_filter = QED_PTP_FILTER_V2_EVENT;
284 break;
285 case HWTSTAMP_FILTER_PTP_V2_SYNC:
286 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
287 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
288 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
289 rx_filter = QED_PTP_FILTER_V2_GEN;
290 break;
291 }
292
293 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
294
295 spin_unlock_bh(&ptp->lock);
296
297 return 0;
298}
299
300int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
301{
302 struct hwtstamp_config config;
303 struct qede_ptp *ptp;
304 int rc;
305
306 ptp = edev->ptp;
307 if (!ptp)
308 return -EIO;
309
310 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
311 return -EFAULT;
312
313 DP_VERBOSE(edev, QED_MSG_DEBUG,
314 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
315 config.tx_type, config.rx_filter);
316
317 if (config.flags) {
318 DP_ERR(edev, "config.flags is reserved for future use\n");
319 return -EINVAL;
320 }
321
322 ptp->hw_ts_ioctl_called = 1;
323 ptp->tx_type = config.tx_type;
324 ptp->rx_filter = config.rx_filter;
325
326 rc = qede_ptp_cfg_filters(edev);
327 if (rc)
328 return rc;
329
330 config.rx_filter = ptp->rx_filter;
331
332 return copy_to_user(ifr->ifr_data, &config,
333 sizeof(config)) ? -EFAULT : 0;
334}
335
336int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
337{
338 struct qede_ptp *ptp = edev->ptp;
339
340 if (!ptp)
341 return -EIO;
342
343 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
344 SOF_TIMESTAMPING_RX_SOFTWARE |
345 SOF_TIMESTAMPING_SOFTWARE |
346 SOF_TIMESTAMPING_TX_HARDWARE |
347 SOF_TIMESTAMPING_RX_HARDWARE |
348 SOF_TIMESTAMPING_RAW_HARDWARE;
349
350 if (ptp->clock)
351 info->phc_index = ptp_clock_index(ptp->clock);
352 else
353 info->phc_index = -1;
354
355 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
356 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
357 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
358 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
359 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
360 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
361 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
362 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
363 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
364 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
365 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
366 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
367 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
368
369 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
370
371 return 0;
372}
373
374void qede_ptp_disable(struct qede_dev *edev)
375{
376 struct qede_ptp *ptp;
377
378 ptp = edev->ptp;
379 if (!ptp)
380 return;
381
382 if (ptp->clock) {
383 ptp_clock_unregister(ptp->clock);
384 ptp->clock = NULL;
385 }
386
387 /* Cancel PTP work queue. Should be done after the Tx queues are
388 * drained to prevent additional scheduling.
389 */
390 cancel_work_sync(&ptp->work);
391 if (ptp->tx_skb) {
392 dev_kfree_skb_any(ptp->tx_skb);
393 ptp->tx_skb = NULL;
394 }
395
396 /* Disable PTP in HW */
397 spin_lock_bh(&ptp->lock);
398 ptp->ops->disable(edev->cdev);
399 spin_unlock_bh(&ptp->lock);
400
401 kfree(ptp);
402 edev->ptp = NULL;
403}
404
405static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
406{
407 struct qede_ptp *ptp;
408 int rc;
409
410 ptp = edev->ptp;
411 if (!ptp)
412 return -EINVAL;
413
414 spin_lock_init(&ptp->lock);
415
416 /* Configure PTP in HW */
417 rc = ptp->ops->enable(edev->cdev);
418 if (rc) {
419 DP_INFO(edev, "PTP HW enable failed\n");
420 return rc;
421 }
422
423 /* Init work queue for Tx timestamping */
424 INIT_WORK(&ptp->work, qede_ptp_task);
425
426 /* Init cyclecounter and timecounter. This is done only in the first
427 * load. If done in every load, PTP application will fail when doing
428 * unload / load (e.g. MTU change) while it is running.
429 */
430 if (init_tc) {
431 memset(&ptp->cc, 0, sizeof(ptp->cc));
432 ptp->cc.read = qede_ptp_read_cc;
433 ptp->cc.mask = CYCLECOUNTER_MASK(64);
434 ptp->cc.shift = 0;
435 ptp->cc.mult = 1;
436
437 timecounter_init(&ptp->tc, &ptp->cc,
438 ktime_to_ns(ktime_get_real()));
439 }
440
441 return rc;
442}
443
444int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
445{
446 struct qede_ptp *ptp;
447 int rc;
448
449 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
450 if (!ptp) {
451 DP_INFO(edev, "Failed to allocate struct for PTP\n");
452 return -ENOMEM;
453 }
454
455 ptp->edev = edev;
456 ptp->ops = edev->ops->ptp;
457 if (!ptp->ops) {
458 DP_INFO(edev, "PTP enable failed\n");
459 rc = -EIO;
460 goto err1;
461 }
462
463 edev->ptp = ptp;
464
465 rc = qede_ptp_init(edev, init_tc);
466 if (rc)
467 goto err1;
468
469 qede_ptp_cfg_filters(edev);
470
471 /* Fill the ptp_clock_info struct and register PTP clock */
472 ptp->clock_info.owner = THIS_MODULE;
473 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
474 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
475 ptp->clock_info.n_alarm = 0;
476 ptp->clock_info.n_ext_ts = 0;
477 ptp->clock_info.n_per_out = 0;
478 ptp->clock_info.pps = 0;
479 ptp->clock_info.adjfreq = qede_ptp_adjfreq;
480 ptp->clock_info.adjtime = qede_ptp_adjtime;
481 ptp->clock_info.gettime64 = qede_ptp_gettime;
482 ptp->clock_info.settime64 = qede_ptp_settime;
483 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
484
485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
486 if (IS_ERR(ptp->clock)) {
487 rc = -EINVAL;
488 DP_ERR(edev, "PTP clock registration failed\n");
489 goto err2;
490 }
491
492 return 0;
493
494err2:
495 qede_ptp_disable(edev);
496 ptp->clock = NULL;
497err1:
498 kfree(ptp);
499 edev->ptp = NULL;
500
501 return rc;
502}
503
504void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
505{
506 struct qede_ptp *ptp;
507
508 ptp = edev->ptp;
509 if (!ptp)
510 return;
511
512 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
513 return;
514
515 if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
516 DP_NOTICE(edev,
517 "Tx timestamping was not enabled, this packet will not be timestamped\n");
518 } else if (unlikely(ptp->tx_skb)) {
519 DP_NOTICE(edev,
520 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
521 } else {
522 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
523 /* schedule check for Tx timestamp */
524 ptp->tx_skb = skb_get(skb);
525 schedule_work(&ptp->work);
526 }
527}
528
529void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
530{
531 struct qede_ptp *ptp;
532 u64 timestamp, ns;
533 int rc;
534
535 ptp = edev->ptp;
536 if (!ptp)
537 return;
538
539 spin_lock_bh(&ptp->lock);
540 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp);
541 if (rc) {
542 spin_unlock_bh(&ptp->lock);
543 DP_INFO(edev, "Invalid Rx timestamp\n");
544 return;
545 }
546
547 ns = timecounter_cyc2time(&ptp->tc, timestamp);
548 spin_unlock_bh(&ptp->lock);
549 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
550 DP_VERBOSE(edev, QED_MSG_DEBUG,
551 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
552 timestamp, ns);
553}