Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2021 Pensando Systems, Inc */
3
4#include <linux/netdevice.h>
5#include <linux/etherdevice.h>
6
7#include "ionic.h"
8#include "ionic_bus.h"
9#include "ionic_lif.h"
10#include "ionic_ethtool.h"
11
12static int ionic_hwstamp_tx_mode(int config_tx_type)
13{
14 switch (config_tx_type) {
15 case HWTSTAMP_TX_OFF:
16 return IONIC_TXSTAMP_OFF;
17 case HWTSTAMP_TX_ON:
18 return IONIC_TXSTAMP_ON;
19 case HWTSTAMP_TX_ONESTEP_SYNC:
20 return IONIC_TXSTAMP_ONESTEP_SYNC;
21 case HWTSTAMP_TX_ONESTEP_P2P:
22 return IONIC_TXSTAMP_ONESTEP_P2P;
23 default:
24 return -ERANGE;
25 }
26}
27
28static u64 ionic_hwstamp_rx_filt(int config_rx_filter)
29{
30 switch (config_rx_filter) {
31 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
32 return IONIC_PKT_CLS_PTP1_ALL;
33 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
34 return IONIC_PKT_CLS_PTP1_SYNC;
35 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
36 return IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ;
37
38 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
39 return IONIC_PKT_CLS_PTP2_L4_ALL;
40 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
41 return IONIC_PKT_CLS_PTP2_L4_SYNC;
42 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
43 return IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ;
44
45 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
46 return IONIC_PKT_CLS_PTP2_L2_ALL;
47 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
48 return IONIC_PKT_CLS_PTP2_L2_SYNC;
49 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
50 return IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ;
51
52 case HWTSTAMP_FILTER_PTP_V2_EVENT:
53 return IONIC_PKT_CLS_PTP2_ALL;
54 case HWTSTAMP_FILTER_PTP_V2_SYNC:
55 return IONIC_PKT_CLS_PTP2_SYNC;
56 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
57 return IONIC_PKT_CLS_PTP2_SYNC | IONIC_PKT_CLS_PTP2_DREQ;
58
59 case HWTSTAMP_FILTER_NTP_ALL:
60 return IONIC_PKT_CLS_NTP_ALL;
61
62 default:
63 return 0;
64 }
65}
66
67static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
68 struct hwtstamp_config *new_ts)
69{
70 struct ionic *ionic = lif->ionic;
71 struct hwtstamp_config *config;
72 struct hwtstamp_config ts;
73 int tx_mode = 0;
74 u64 rx_filt = 0;
75 int err, err2;
76 bool rx_all;
77 __le64 mask;
78
79 if (!lif->phc || !lif->phc->ptp)
80 return -EOPNOTSUPP;
81
82 mutex_lock(&lif->phc->config_lock);
83
84 if (new_ts) {
85 config = new_ts;
86 } else {
87 /* If called with new_ts == NULL, replay the previous request
88 * primarily for recovery after a FW_RESET.
89 * We saved the previous configuration request info, so copy
90 * the previous request for reference, clear the current state
91 * to match the device's reset state, and run with it.
92 */
93 config = &ts;
94 memcpy(config, &lif->phc->ts_config, sizeof(*config));
95 memset(&lif->phc->ts_config, 0, sizeof(lif->phc->ts_config));
96 lif->phc->ts_config_tx_mode = 0;
97 lif->phc->ts_config_rx_filt = 0;
98 }
99
100 tx_mode = ionic_hwstamp_tx_mode(config->tx_type);
101 if (tx_mode < 0) {
102 err = tx_mode;
103 goto err_queues;
104 }
105
106 mask = cpu_to_le64(BIT_ULL(tx_mode));
107 if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) {
108 err = -ERANGE;
109 goto err_queues;
110 }
111
112 rx_filt = ionic_hwstamp_rx_filt(config->rx_filter);
113 rx_all = config->rx_filter != HWTSTAMP_FILTER_NONE && !rx_filt;
114
115 mask = cpu_to_le64(rx_filt);
116 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) != mask) {
117 rx_filt = 0;
118 rx_all = true;
119 config->rx_filter = HWTSTAMP_FILTER_ALL;
120 }
121
122 dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n",
123 config->rx_filter, rx_filt, rx_all);
124
125 if (tx_mode) {
126 err = ionic_lif_create_hwstamp_txq(lif);
127 if (err)
128 goto err_queues;
129 }
130
131 if (rx_filt) {
132 err = ionic_lif_create_hwstamp_rxq(lif);
133 if (err)
134 goto err_queues;
135 }
136
137 if (tx_mode != lif->phc->ts_config_tx_mode) {
138 err = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
139 if (err)
140 goto err_txmode;
141 }
142
143 if (rx_filt != lif->phc->ts_config_rx_filt) {
144 err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
145 if (err)
146 goto err_rxfilt;
147 }
148
149 if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) {
150 err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all);
151 if (err)
152 goto err_rxall;
153 }
154
155 memcpy(&lif->phc->ts_config, config, sizeof(*config));
156 lif->phc->ts_config_rx_filt = rx_filt;
157 lif->phc->ts_config_tx_mode = tx_mode;
158
159 mutex_unlock(&lif->phc->config_lock);
160
161 return 0;
162
163err_rxall:
164 if (rx_filt != lif->phc->ts_config_rx_filt) {
165 rx_filt = lif->phc->ts_config_rx_filt;
166 err2 = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
167 if (err2)
168 dev_err(ionic->dev,
169 "Failed to revert rx timestamp filter: %d\n", err2);
170 }
171err_rxfilt:
172 if (tx_mode != lif->phc->ts_config_tx_mode) {
173 tx_mode = lif->phc->ts_config_tx_mode;
174 err2 = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
175 if (err2)
176 dev_err(ionic->dev,
177 "Failed to revert tx timestamp mode: %d\n", err2);
178 }
179err_txmode:
180 /* special queues remain allocated, just unused */
181err_queues:
182 mutex_unlock(&lif->phc->config_lock);
183 return err;
184}
185
186int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
187{
188 struct hwtstamp_config config;
189 int err;
190
191 if (!lif->phc || !lif->phc->ptp)
192 return -EOPNOTSUPP;
193
194 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
195 return -EFAULT;
196
197 err = ionic_lif_hwstamp_set_ts_config(lif, &config);
198 if (err) {
199 netdev_info(lif->netdev, "hwstamp set failed: %d\n", err);
200 return err;
201 }
202
203 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
204 return -EFAULT;
205
206 return 0;
207}
208
209void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
210{
211 int err;
212
213 if (!lif->phc || !lif->phc->ptp)
214 return;
215
216 err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
217 if (err)
218 netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
219}
220
221int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
222{
223 struct hwtstamp_config config;
224
225 if (!lif->phc || !lif->phc->ptp)
226 return -EOPNOTSUPP;
227
228 mutex_lock(&lif->phc->config_lock);
229 memcpy(&config, &lif->phc->ts_config, sizeof(config));
230 mutex_unlock(&lif->phc->config_lock);
231
232 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
233 return -EFAULT;
234 return 0;
235}
236
237static u64 ionic_hwstamp_read(struct ionic *ionic,
238 struct ptp_system_timestamp *sts)
239{
240 u32 tick_high_before, tick_high, tick_low;
241
242 /* read and discard low part to defeat hw staging of high part */
243 (void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
244
245 tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
246
247 ptp_read_system_prets(sts);
248 tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
249 ptp_read_system_postts(sts);
250
251 tick_high = ioread32(&ionic->idev.hwstamp_regs->tick_high);
252
253 /* If tick_high changed, re-read tick_low once more. Assume tick_high
254 * cannot change again so soon as in the span of re-reading tick_low.
255 */
256 if (tick_high != tick_high_before) {
257 ptp_read_system_prets(sts);
258 tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
259 ptp_read_system_postts(sts);
260 }
261
262 return (u64)tick_low | ((u64)tick_high << 32);
263}
264
265static u64 ionic_cc_read(const struct cyclecounter *cc)
266{
267 struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
268 struct ionic *ionic = phc->lif->ionic;
269
270 return ionic_hwstamp_read(ionic, NULL);
271}
272
273static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx)
274{
275 ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work);
276
277 ctx->cmd.lif_setphc.opcode = IONIC_CMD_LIF_SETPHC;
278 ctx->cmd.lif_setphc.lif_index = cpu_to_le16(phc->lif->index);
279
280 ctx->cmd.lif_setphc.tick = cpu_to_le64(phc->tc.cycle_last);
281 ctx->cmd.lif_setphc.nsec = cpu_to_le64(phc->tc.nsec);
282 ctx->cmd.lif_setphc.frac = cpu_to_le64(phc->tc.frac);
283 ctx->cmd.lif_setphc.mult = cpu_to_le32(phc->cc.mult);
284 ctx->cmd.lif_setphc.shift = cpu_to_le32(phc->cc.shift);
285
286 return ionic_adminq_post(phc->lif, ctx);
287}
288
289static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
290{
291 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
292 struct ionic_admin_ctx ctx = {};
293 unsigned long irqflags;
294 s64 adj;
295 int err;
296
297 /* Reject phc adjustments during device upgrade */
298 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
299 return -EBUSY;
300
301 /* Adjustment value scaled by 2^16 million */
302 adj = (s64)scaled_ppm * phc->init_cc_mult;
303
304 /* Adjustment value to scale */
305 adj /= (s64)SCALED_PPM;
306
307 /* Final adjusted multiplier */
308 adj += phc->init_cc_mult;
309
310 spin_lock_irqsave(&phc->lock, irqflags);
311
312 /* update the point-in-time basis to now, before adjusting the rate */
313 timecounter_read(&phc->tc);
314 phc->cc.mult = adj;
315
316 /* Setphc commands are posted in-order, sequenced by phc->lock. We
317 * need to drop the lock before waiting for the command to complete.
318 */
319 err = ionic_setphc_cmd(phc, &ctx);
320
321 spin_unlock_irqrestore(&phc->lock, irqflags);
322
323 return ionic_adminq_wait(phc->lif, &ctx, err);
324}
325
326static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
327{
328 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
329 struct ionic_admin_ctx ctx = {};
330 unsigned long irqflags;
331 int err;
332
333 /* Reject phc adjustments during device upgrade */
334 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
335 return -EBUSY;
336
337 spin_lock_irqsave(&phc->lock, irqflags);
338
339 timecounter_adjtime(&phc->tc, delta);
340
341 /* Setphc commands are posted in-order, sequenced by phc->lock. We
342 * need to drop the lock before waiting for the command to complete.
343 */
344 err = ionic_setphc_cmd(phc, &ctx);
345
346 spin_unlock_irqrestore(&phc->lock, irqflags);
347
348 return ionic_adminq_wait(phc->lif, &ctx, err);
349}
350
351static int ionic_phc_settime64(struct ptp_clock_info *info,
352 const struct timespec64 *ts)
353{
354 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
355 struct ionic_admin_ctx ctx = {};
356 unsigned long irqflags;
357 int err;
358 u64 ns;
359
360 /* Reject phc adjustments during device upgrade */
361 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
362 return -EBUSY;
363
364 ns = timespec64_to_ns(ts);
365
366 spin_lock_irqsave(&phc->lock, irqflags);
367
368 timecounter_init(&phc->tc, &phc->cc, ns);
369
370 /* Setphc commands are posted in-order, sequenced by phc->lock. We
371 * need to drop the lock before waiting for the command to complete.
372 */
373 err = ionic_setphc_cmd(phc, &ctx);
374
375 spin_unlock_irqrestore(&phc->lock, irqflags);
376
377 return ionic_adminq_wait(phc->lif, &ctx, err);
378}
379
380static int ionic_phc_gettimex64(struct ptp_clock_info *info,
381 struct timespec64 *ts,
382 struct ptp_system_timestamp *sts)
383{
384 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
385 struct ionic *ionic = phc->lif->ionic;
386 unsigned long irqflags;
387 u64 tick, ns;
388
389 /* Do not attempt to read device time during upgrade */
390 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
391 return -EBUSY;
392
393 spin_lock_irqsave(&phc->lock, irqflags);
394
395 tick = ionic_hwstamp_read(ionic, sts);
396
397 ns = timecounter_cyc2time(&phc->tc, tick);
398
399 spin_unlock_irqrestore(&phc->lock, irqflags);
400
401 *ts = ns_to_timespec64(ns);
402
403 return 0;
404}
405
406static long ionic_phc_aux_work(struct ptp_clock_info *info)
407{
408 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
409 struct ionic_admin_ctx ctx = {};
410 unsigned long irqflags;
411 int err;
412
413 /* Do not update phc during device upgrade, but keep polling to resume
414 * after upgrade. Since we don't update the point in time basis, there
415 * is no expectation that we are maintaining the phc time during the
416 * upgrade. After upgrade, it will need to be readjusted back to the
417 * correct time by the ptp daemon.
418 */
419 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
420 return phc->aux_work_delay;
421
422 spin_lock_irqsave(&phc->lock, irqflags);
423
424 /* update point-in-time basis to now */
425 timecounter_read(&phc->tc);
426
427 /* Setphc commands are posted in-order, sequenced by phc->lock. We
428 * need to drop the lock before waiting for the command to complete.
429 */
430 err = ionic_setphc_cmd(phc, &ctx);
431
432 spin_unlock_irqrestore(&phc->lock, irqflags);
433
434 ionic_adminq_wait(phc->lif, &ctx, err);
435
436 return phc->aux_work_delay;
437}
438
439ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 tick)
440{
441 unsigned long irqflags;
442 u64 ns;
443
444 if (!lif->phc)
445 return 0;
446
447 spin_lock_irqsave(&lif->phc->lock, irqflags);
448 ns = timecounter_cyc2time(&lif->phc->tc, tick);
449 spin_unlock_irqrestore(&lif->phc->lock, irqflags);
450
451 return ns_to_ktime(ns);
452}
453
454static const struct ptp_clock_info ionic_ptp_info = {
455 .owner = THIS_MODULE,
456 .name = "ionic_ptp",
457 .adjfine = ionic_phc_adjfine,
458 .adjtime = ionic_phc_adjtime,
459 .gettimex64 = ionic_phc_gettimex64,
460 .settime64 = ionic_phc_settime64,
461 .do_aux_work = ionic_phc_aux_work,
462};
463
464void ionic_lif_register_phc(struct ionic_lif *lif)
465{
466 if (!lif->phc || !(lif->hw_features & IONIC_ETH_HW_TIMESTAMP))
467 return;
468
469 lif->phc->ptp = ptp_clock_register(&lif->phc->ptp_info, lif->ionic->dev);
470
471 if (IS_ERR(lif->phc->ptp)) {
472 dev_warn(lif->ionic->dev, "Cannot register phc device: %ld\n",
473 PTR_ERR(lif->phc->ptp));
474
475 lif->phc->ptp = NULL;
476 }
477
478 if (lif->phc->ptp)
479 ptp_schedule_worker(lif->phc->ptp, lif->phc->aux_work_delay);
480}
481
482void ionic_lif_unregister_phc(struct ionic_lif *lif)
483{
484 if (!lif->phc || !lif->phc->ptp)
485 return;
486
487 ptp_clock_unregister(lif->phc->ptp);
488
489 lif->phc->ptp = NULL;
490}
491
492void ionic_lif_alloc_phc(struct ionic_lif *lif)
493{
494 struct ionic *ionic = lif->ionic;
495 struct ionic_phc *phc;
496 u64 delay, diff, mult;
497 u64 frac = 0;
498 u64 features;
499 u32 shift;
500
501 if (!ionic->idev.hwstamp_regs)
502 return;
503
504 features = le64_to_cpu(ionic->ident.lif.eth.config.features);
505 if (!(features & IONIC_ETH_HW_TIMESTAMP))
506 return;
507
508 phc = devm_kzalloc(ionic->dev, sizeof(*phc), GFP_KERNEL);
509 if (!phc)
510 return;
511
512 phc->lif = lif;
513
514 phc->cc.read = ionic_cc_read;
515 phc->cc.mask = le64_to_cpu(ionic->ident.dev.hwstamp_mask);
516 phc->cc.mult = le32_to_cpu(ionic->ident.dev.hwstamp_mult);
517 phc->cc.shift = le32_to_cpu(ionic->ident.dev.hwstamp_shift);
518
519 if (!phc->cc.mult) {
520 dev_err(lif->ionic->dev,
521 "Invalid device PHC mask multiplier %u, disabling HW timestamp support\n",
522 phc->cc.mult);
523 devm_kfree(lif->ionic->dev, phc);
524 lif->phc = NULL;
525 return;
526 }
527
528 dev_dbg(lif->ionic->dev, "Device PHC mask %#llx mult %u shift %u\n",
529 phc->cc.mask, phc->cc.mult, phc->cc.shift);
530
531 spin_lock_init(&phc->lock);
532 mutex_init(&phc->config_lock);
533
534 /* max ticks is limited by the multiplier, or by the update period. */
535 if (phc->cc.shift + 2 + ilog2(IONIC_PHC_UPDATE_NS) >= 64) {
536 /* max ticks that do not overflow when multiplied by max
537 * adjusted multiplier (twice the initial multiplier)
538 */
539 diff = U64_MAX / phc->cc.mult / 2;
540 } else {
541 /* approx ticks at four times the update period */
542 diff = (u64)IONIC_PHC_UPDATE_NS << (phc->cc.shift + 2);
543 diff = DIV_ROUND_UP(diff, phc->cc.mult);
544 }
545
546 /* transform to bitmask */
547 diff |= diff >> 1;
548 diff |= diff >> 2;
549 diff |= diff >> 4;
550 diff |= diff >> 8;
551 diff |= diff >> 16;
552 diff |= diff >> 32;
553
554 /* constrain to the hardware bitmask, and use this as the bitmask */
555 diff &= phc->cc.mask;
556 phc->cc.mask = diff;
557
558 /* the wrap period is now defined by diff (or phc->cc.mask)
559 *
560 * we will update the time basis at about 1/4 the wrap period, so
561 * should not see a difference of more than +/- diff/4.
562 *
563 * this is sufficient not see a difference of more than +/- diff/2, as
564 * required by timecounter_cyc2time, to detect an old time stamp.
565 *
566 * adjust the initial multiplier, being careful to avoid overflow:
567 * - do not overflow 63 bits: init_cc_mult * SCALED_PPM
568 * - do not overflow 64 bits: max_mult * (diff / 2)
569 *
570 * we want to increase the initial multiplier as much as possible, to
571 * allow for more precise adjustment in ionic_phc_adjfine.
572 *
573 * only adjust the multiplier if we can double it or more.
574 */
575 mult = U64_MAX / 2 / max(diff / 2, SCALED_PPM);
576 shift = mult / phc->cc.mult;
577 if (shift >= 2) {
578 /* initial multiplier will be 2^n of hardware cc.mult */
579 shift = fls(shift);
580 /* increase cc.mult and cc.shift by the same 2^n and n. */
581 phc->cc.mult <<= shift;
582 phc->cc.shift += shift;
583 }
584
585 dev_dbg(lif->ionic->dev, "Initial PHC mask %#llx mult %u shift %u\n",
586 phc->cc.mask, phc->cc.mult, phc->cc.shift);
587
588 /* frequency adjustments are relative to the initial multiplier */
589 phc->init_cc_mult = phc->cc.mult;
590
591 timecounter_init(&phc->tc, &phc->cc, ktime_get_real_ns());
592
593 /* Update cycle_last at 1/4 the wrap period, or IONIC_PHC_UPDATE_NS */
594 delay = min_t(u64, IONIC_PHC_UPDATE_NS,
595 cyclecounter_cyc2ns(&phc->cc, diff / 4, 0, &frac));
596 dev_dbg(lif->ionic->dev, "Work delay %llu ms\n", delay / NSEC_PER_MSEC);
597
598 phc->aux_work_delay = nsecs_to_jiffies(delay);
599
600 phc->ptp_info = ionic_ptp_info;
601
602 /* We have allowed to adjust the multiplier up to +/- 1 part per 1.
603 * Here expressed as NORMAL_PPB (1 billion parts per billion).
604 */
605 phc->ptp_info.max_adj = NORMAL_PPB;
606
607 lif->phc = phc;
608}
609
610void ionic_lif_free_phc(struct ionic_lif *lif)
611{
612 if (!lif->phc)
613 return;
614
615 mutex_destroy(&lif->phc->config_lock);
616
617 devm_kfree(lif->ionic->dev, lif->phc);
618 lif->phc = NULL;
619}