Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Microchip KSZ PTP Implementation
3 *
4 * Copyright (C) 2020 ARRI Lighting
5 * Copyright (C) 2022 Microchip Technology Inc.
6 */
7
8#include <linux/dsa/ksz_common.h>
9#include <linux/irq.h>
10#include <linux/irqdomain.h>
11#include <linux/kernel.h>
12#include <linux/ptp_classify.h>
13#include <linux/ptp_clock_kernel.h>
14
15#include "ksz_common.h"
16#include "ksz_ptp.h"
17#include "ksz_ptp_reg.h"
18
19#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
20#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
21#define work_to_xmit_work(w) \
22 container_of((w), struct ksz_deferred_xmit_work, work)
23
24/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
25 * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
26 */
27#define KSZ_MAX_DRIFT_CORR 6249999
28#define KSZ_MAX_PULSE_WIDTH 125000000LL
29
30#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
31#define KSZ_PTP_SUBNS_BITS 32
32
33#define KSZ_PTP_INT_START 13
34
35static int ksz_ptp_tou_gpio(struct ksz_device *dev)
36{
37 int ret;
38
39 if (!is_lan937x(dev))
40 return 0;
41
42 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
43 GPIO_OUT);
44 if (ret)
45 return ret;
46
47 ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
48 LED_OVR_1 | LED_OVR_2);
49 if (ret)
50 return ret;
51
52 return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
53 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
54 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
55}
56
57static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
58{
59 u32 data;
60 int ret;
61
62 /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
63 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
64
65 data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
66 ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
67 if (ret)
68 return ret;
69
70 data = FIELD_PREP(TRIG_INT_M, BIT(unit));
71 ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
72 if (ret)
73 return ret;
74
75 /* Clear reset and set GPIO direction */
76 return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
77 0);
78}
79
80static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
81{
82 u32 data;
83
84 if (pulse_ns & 0x3)
85 return -EINVAL;
86
87 data = (pulse_ns / 8);
88 if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
89 return -ERANGE;
90
91 return 0;
92}
93
94static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
95 struct timespec64 const *ts)
96{
97 int ret;
98
99 /* Hardware has only 32 bit */
100 if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
101 return -EINVAL;
102
103 ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
104 if (ret)
105 return ret;
106
107 ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
108 if (ret)
109 return ret;
110
111 return 0;
112}
113
114static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
115{
116 u32 data;
117 int ret;
118
119 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
120 if (ret)
121 return ret;
122
123 /* Check error flag:
124 * - the ACTIVE flag is NOT cleared an error!
125 */
126 ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
127 if (ret)
128 return ret;
129
130 if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
131 dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
132 unit);
133 ret = -EIO;
134 /* Unit will be reset on next access */
135 return ret;
136 }
137
138 return 0;
139}
140
141static int ksz_ptp_configure_perout(struct ksz_device *dev,
142 u32 cycle_width_ns, u32 pulse_width_ns,
143 struct timespec64 const *target_time,
144 u8 index)
145{
146 u32 data;
147 int ret;
148
149 data = FIELD_PREP(TRIG_NOTIFY, 1) |
150 FIELD_PREP(TRIG_GPO_M, index) |
151 FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
152 ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
153 if (ret)
154 return ret;
155
156 ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
157 if (ret)
158 return ret;
159
160 /* Set cycle count 0 - Infinite */
161 ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
162 if (ret)
163 return ret;
164
165 data = (pulse_width_ns / 8);
166 ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
167 if (ret)
168 return ret;
169
170 ret = ksz_ptp_tou_target_time_set(dev, target_time);
171 if (ret)
172 return ret;
173
174 return 0;
175}
176
177static int ksz_ptp_enable_perout(struct ksz_device *dev,
178 struct ptp_perout_request const *request,
179 int on)
180{
181 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
182 u64 req_pulse_width_ns;
183 u64 cycle_width_ns;
184 u64 pulse_width_ns;
185 int pin = 0;
186 u32 data32;
187 int ret;
188
189 if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
190 return -EOPNOTSUPP;
191
192 if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
193 ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
194 return -EBUSY;
195
196 pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
197 if (pin < 0)
198 return -EINVAL;
199
200 data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
201 FIELD_PREP(PTP_TOU_INDEX, request->index);
202 ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
203 PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
204 if (ret)
205 return ret;
206
207 ret = ksz_ptp_tou_reset(dev, request->index);
208 if (ret)
209 return ret;
210
211 if (!on) {
212 ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
213 return 0;
214 }
215
216 ptp_data->perout_target_time_first.tv_sec = request->start.sec;
217 ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
218
219 ptp_data->perout_period.tv_sec = request->period.sec;
220 ptp_data->perout_period.tv_nsec = request->period.nsec;
221
222 cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
223 if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
224 return -EINVAL;
225
226 if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
227 pulse_width_ns = request->on.sec * NSEC_PER_SEC +
228 request->on.nsec;
229 } else {
230 /* Use a duty cycle of 50%. Maximum pulse width supported by the
231 * hardware is a little bit more than 125 ms.
232 */
233 req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
234 request->period.nsec) / 2;
235 pulse_width_ns = min_t(u64, req_pulse_width_ns,
236 KSZ_MAX_PULSE_WIDTH);
237 }
238
239 ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
240 if (ret)
241 return ret;
242
243 ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
244 &ptp_data->perout_target_time_first,
245 pin);
246 if (ret)
247 return ret;
248
249 ret = ksz_ptp_tou_gpio(dev);
250 if (ret)
251 return ret;
252
253 ret = ksz_ptp_tou_start(dev, request->index);
254 if (ret)
255 return ret;
256
257 ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
258
259 return 0;
260}
261
262static int ksz_ptp_enable_mode(struct ksz_device *dev)
263{
264 struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
265 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
266 struct ksz_port *prt;
267 struct dsa_port *dp;
268 bool tag_en = false;
269
270 dsa_switch_for_each_user_port(dp, dev->ds) {
271 prt = &dev->ports[dp->index];
272 if (prt->hwts_tx_en || prt->hwts_rx_en) {
273 tag_en = true;
274 break;
275 }
276 }
277
278 if (tag_en) {
279 ptp_schedule_worker(ptp_data->clock, 0);
280 } else {
281 ptp_cancel_worker_sync(ptp_data->clock);
282 }
283
284 tagger_data->hwtstamp_set_state(dev->ds, tag_en);
285
286 return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
287 tag_en ? PTP_ENABLE : 0);
288}
289
290/* The function is return back the capability of timestamping feature when
291 * requested through ethtool -T <interface> utility
292 */
293int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
294{
295 struct ksz_device *dev = ds->priv;
296 struct ksz_ptp_data *ptp_data;
297
298 ptp_data = &dev->ptp_data;
299
300 if (!ptp_data->clock)
301 return -ENODEV;
302
303 ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
304 SOF_TIMESTAMPING_RX_HARDWARE |
305 SOF_TIMESTAMPING_RAW_HARDWARE;
306
307 ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
308
309 if (is_lan937x(dev))
310 ts->tx_types |= BIT(HWTSTAMP_TX_ON);
311
312 ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
313 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
314 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
315 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
316
317 ts->phc_index = ptp_clock_index(ptp_data->clock);
318
319 return 0;
320}
321
322int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
323{
324 struct ksz_device *dev = ds->priv;
325 struct hwtstamp_config *config;
326 struct ksz_port *prt;
327
328 prt = &dev->ports[port];
329 config = &prt->tstamp_config;
330
331 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
332 -EFAULT : 0;
333}
334
335static int ksz_set_hwtstamp_config(struct ksz_device *dev,
336 struct ksz_port *prt,
337 struct hwtstamp_config *config)
338{
339 int ret;
340
341 if (config->flags)
342 return -EINVAL;
343
344 switch (config->tx_type) {
345 case HWTSTAMP_TX_OFF:
346 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
347 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
348 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
349 prt->hwts_tx_en = false;
350 break;
351 case HWTSTAMP_TX_ONESTEP_P2P:
352 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
353 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
354 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
355 prt->hwts_tx_en = true;
356
357 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
358 if (ret)
359 return ret;
360
361 break;
362 case HWTSTAMP_TX_ON:
363 if (!is_lan937x(dev))
364 return -ERANGE;
365
366 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
367 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
368 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
369 prt->hwts_tx_en = true;
370
371 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
372 if (ret)
373 return ret;
374
375 break;
376 default:
377 return -ERANGE;
378 }
379
380 switch (config->rx_filter) {
381 case HWTSTAMP_FILTER_NONE:
382 prt->hwts_rx_en = false;
383 break;
384 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
385 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
386 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
387 prt->hwts_rx_en = true;
388 break;
389 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
390 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
391 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
392 prt->hwts_rx_en = true;
393 break;
394 case HWTSTAMP_FILTER_PTP_V2_EVENT:
395 case HWTSTAMP_FILTER_PTP_V2_SYNC:
396 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
397 prt->hwts_rx_en = true;
398 break;
399 default:
400 config->rx_filter = HWTSTAMP_FILTER_NONE;
401 return -ERANGE;
402 }
403
404 return ksz_ptp_enable_mode(dev);
405}
406
407int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
408{
409 struct ksz_device *dev = ds->priv;
410 struct hwtstamp_config config;
411 struct ksz_port *prt;
412 int ret;
413
414 prt = &dev->ports[port];
415
416 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
417 return -EFAULT;
418
419 ret = ksz_set_hwtstamp_config(dev, prt, &config);
420 if (ret)
421 return ret;
422
423 memcpy(&prt->tstamp_config, &config, sizeof(config));
424
425 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
426 return -EFAULT;
427
428 return 0;
429}
430
431static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
432{
433 struct timespec64 ptp_clock_time;
434 struct ksz_ptp_data *ptp_data;
435 struct timespec64 diff;
436 struct timespec64 ts;
437
438 ptp_data = &dev->ptp_data;
439 ts = ktime_to_timespec64(tstamp);
440
441 spin_lock_bh(&ptp_data->clock_lock);
442 ptp_clock_time = ptp_data->clock_time;
443 spin_unlock_bh(&ptp_data->clock_lock);
444
445 /* calculate full time from partial time stamp */
446 ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
447
448 /* find nearest possible point in time */
449 diff = timespec64_sub(ts, ptp_clock_time);
450 if (diff.tv_sec > 2)
451 ts.tv_sec -= 4;
452 else if (diff.tv_sec < -2)
453 ts.tv_sec += 4;
454
455 return timespec64_to_ktime(ts);
456}
457
458bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
459 unsigned int type)
460{
461 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
462 struct ksz_device *dev = ds->priv;
463 struct ptp_header *ptp_hdr;
464 struct ksz_port *prt;
465 u8 ptp_msg_type;
466 ktime_t tstamp;
467 s64 correction;
468
469 prt = &dev->ports[port];
470
471 tstamp = KSZ_SKB_CB(skb)->tstamp;
472 memset(hwtstamps, 0, sizeof(*hwtstamps));
473 hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
474
475 if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
476 goto out;
477
478 ptp_hdr = ptp_parse_header(skb, type);
479 if (!ptp_hdr)
480 goto out;
481
482 ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
483 if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
484 goto out;
485
486 /* Only subtract the partial time stamp from the correction field. When
487 * the hardware adds the egress time stamp to the correction field of
488 * the PDelay_Resp message on tx, also only the partial time stamp will
489 * be added.
490 */
491 correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
492 correction -= ktime_to_ns(tstamp) << 16;
493
494 ptp_header_update_correction(skb, type, ptp_hdr, correction);
495
496out:
497 return false;
498}
499
500void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
501{
502 struct ksz_device *dev = ds->priv;
503 struct ptp_header *hdr;
504 struct sk_buff *clone;
505 struct ksz_port *prt;
506 unsigned int type;
507 u8 ptp_msg_type;
508
509 prt = &dev->ports[port];
510
511 if (!prt->hwts_tx_en)
512 return;
513
514 type = ptp_classify_raw(skb);
515 if (type == PTP_CLASS_NONE)
516 return;
517
518 hdr = ptp_parse_header(skb, type);
519 if (!hdr)
520 return;
521
522 ptp_msg_type = ptp_get_msgtype(hdr, type);
523
524 switch (ptp_msg_type) {
525 case PTP_MSGTYPE_SYNC:
526 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
527 return;
528 break;
529 case PTP_MSGTYPE_PDELAY_REQ:
530 break;
531 case PTP_MSGTYPE_PDELAY_RESP:
532 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
533 KSZ_SKB_CB(skb)->ptp_type = type;
534 KSZ_SKB_CB(skb)->update_correction = true;
535 return;
536 }
537 break;
538
539 default:
540 return;
541 }
542
543 clone = skb_clone_sk(skb);
544 if (!clone)
545 return;
546
547 /* caching the value to be used in tag_ksz.c */
548 KSZ_SKB_CB(skb)->clone = clone;
549}
550
551static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
552 struct ksz_port *prt, struct sk_buff *skb)
553{
554 struct skb_shared_hwtstamps hwtstamps = {};
555 int ret;
556
557 /* timeout must include DSA conduit to transmit data, tstamp latency,
558 * IRQ latency and time for reading the time stamp.
559 */
560 ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
561 msecs_to_jiffies(100));
562 if (!ret)
563 return;
564
565 hwtstamps.hwtstamp = prt->tstamp_msg;
566 skb_complete_tx_timestamp(skb, &hwtstamps);
567}
568
569void ksz_port_deferred_xmit(struct kthread_work *work)
570{
571 struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
572 struct sk_buff *clone, *skb = xmit_work->skb;
573 struct dsa_switch *ds = xmit_work->dp->ds;
574 struct ksz_device *dev = ds->priv;
575 struct ksz_port *prt;
576
577 prt = &dev->ports[xmit_work->dp->index];
578
579 clone = KSZ_SKB_CB(skb)->clone;
580
581 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
582
583 reinit_completion(&prt->tstamp_msg_comp);
584
585 dsa_enqueue_skb(skb, skb->dev);
586
587 ksz_ptp_txtstamp_skb(dev, prt, clone);
588
589 kfree(xmit_work);
590}
591
592static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
593{
594 u32 nanoseconds;
595 u32 seconds;
596 u8 phase;
597 int ret;
598
599 /* Copy current PTP clock into shadow registers and read */
600 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
601 if (ret)
602 return ret;
603
604 ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
605 if (ret)
606 return ret;
607
608 ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
609 if (ret)
610 return ret;
611
612 ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
613 if (ret)
614 return ret;
615
616 ts->tv_sec = seconds;
617 ts->tv_nsec = nanoseconds + phase * 8;
618
619 return 0;
620}
621
622static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
623{
624 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
625 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
626 int ret;
627
628 mutex_lock(&ptp_data->lock);
629 ret = _ksz_ptp_gettime(dev, ts);
630 mutex_unlock(&ptp_data->lock);
631
632 return ret;
633}
634
635static int ksz_ptp_restart_perout(struct ksz_device *dev)
636{
637 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
638 s64 now_ns, first_ns, period_ns, next_ns;
639 struct ptp_perout_request request;
640 struct timespec64 next;
641 struct timespec64 now;
642 unsigned int count;
643 int ret;
644
645 dev_info(dev->dev, "Restarting periodic output signal\n");
646
647 ret = _ksz_ptp_gettime(dev, &now);
648 if (ret)
649 return ret;
650
651 now_ns = timespec64_to_ns(&now);
652 first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
653
654 /* Calculate next perout event based on start time and period */
655 period_ns = timespec64_to_ns(&ptp_data->perout_period);
656
657 if (first_ns < now_ns) {
658 count = div_u64(now_ns - first_ns, period_ns);
659 next_ns = first_ns + count * period_ns;
660 } else {
661 next_ns = first_ns;
662 }
663
664 /* Ensure 100 ms guard time prior next event */
665 while (next_ns < now_ns + 100000000)
666 next_ns += period_ns;
667
668 /* Restart periodic output signal */
669 next = ns_to_timespec64(next_ns);
670 request.start.sec = next.tv_sec;
671 request.start.nsec = next.tv_nsec;
672 request.period.sec = ptp_data->perout_period.tv_sec;
673 request.period.nsec = ptp_data->perout_period.tv_nsec;
674 request.index = 0;
675 request.flags = 0;
676
677 return ksz_ptp_enable_perout(dev, &request, 1);
678}
679
680static int ksz_ptp_settime(struct ptp_clock_info *ptp,
681 const struct timespec64 *ts)
682{
683 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
684 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
685 int ret;
686
687 mutex_lock(&ptp_data->lock);
688
689 /* Write to shadow registers and Load PTP clock */
690 ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
691 if (ret)
692 goto unlock;
693
694 ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
695 if (ret)
696 goto unlock;
697
698 ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
699 if (ret)
700 goto unlock;
701
702 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
703 if (ret)
704 goto unlock;
705
706 switch (ptp_data->tou_mode) {
707 case KSZ_PTP_TOU_IDLE:
708 break;
709
710 case KSZ_PTP_TOU_PEROUT:
711 ret = ksz_ptp_restart_perout(dev);
712 if (ret)
713 goto unlock;
714
715 break;
716 }
717
718 spin_lock_bh(&ptp_data->clock_lock);
719 ptp_data->clock_time = *ts;
720 spin_unlock_bh(&ptp_data->clock_lock);
721
722unlock:
723 mutex_unlock(&ptp_data->lock);
724
725 return ret;
726}
727
728static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
729{
730 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
731 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
732 u64 base, adj;
733 bool negative;
734 u32 data32;
735 int ret;
736
737 mutex_lock(&ptp_data->lock);
738
739 if (scaled_ppm) {
740 base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
741 negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
742
743 data32 = (u32)adj;
744 data32 &= PTP_SUBNANOSEC_M;
745 if (!negative)
746 data32 |= PTP_RATE_DIR;
747
748 ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
749 if (ret)
750 goto unlock;
751
752 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
753 PTP_CLK_ADJ_ENABLE);
754 if (ret)
755 goto unlock;
756 } else {
757 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
758 if (ret)
759 goto unlock;
760 }
761
762unlock:
763 mutex_unlock(&ptp_data->lock);
764 return ret;
765}
766
767static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
768{
769 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
770 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
771 struct timespec64 delta64 = ns_to_timespec64(delta);
772 s32 sec, nsec;
773 u16 data16;
774 int ret;
775
776 mutex_lock(&ptp_data->lock);
777
778 /* do not use ns_to_timespec64(),
779 * both sec and nsec are subtracted by hw
780 */
781 sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
782
783 ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
784 if (ret)
785 goto unlock;
786
787 ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
788 if (ret)
789 goto unlock;
790
791 ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
792 if (ret)
793 goto unlock;
794
795 data16 |= PTP_STEP_ADJ;
796
797 /* PTP_STEP_DIR -- 0: subtract, 1: add */
798 if (delta < 0)
799 data16 &= ~PTP_STEP_DIR;
800 else
801 data16 |= PTP_STEP_DIR;
802
803 ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
804 if (ret)
805 goto unlock;
806
807 switch (ptp_data->tou_mode) {
808 case KSZ_PTP_TOU_IDLE:
809 break;
810
811 case KSZ_PTP_TOU_PEROUT:
812 ret = ksz_ptp_restart_perout(dev);
813 if (ret)
814 goto unlock;
815
816 break;
817 }
818
819 spin_lock_bh(&ptp_data->clock_lock);
820 ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
821 spin_unlock_bh(&ptp_data->clock_lock);
822
823unlock:
824 mutex_unlock(&ptp_data->lock);
825 return ret;
826}
827
828static int ksz_ptp_enable(struct ptp_clock_info *ptp,
829 struct ptp_clock_request *req, int on)
830{
831 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
832 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
833 int ret;
834
835 switch (req->type) {
836 case PTP_CLK_REQ_PEROUT:
837 mutex_lock(&ptp_data->lock);
838 ret = ksz_ptp_enable_perout(dev, &req->perout, on);
839 mutex_unlock(&ptp_data->lock);
840 break;
841 default:
842 return -EOPNOTSUPP;
843 }
844
845 return ret;
846}
847
848static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
849 enum ptp_pin_function func, unsigned int chan)
850{
851 int ret = 0;
852
853 switch (func) {
854 case PTP_PF_NONE:
855 case PTP_PF_PEROUT:
856 break;
857 default:
858 ret = -1;
859 break;
860 }
861
862 return ret;
863}
864
865/* Function is pointer to the do_aux_work in the ptp_clock capability */
866static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
867{
868 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
869 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
870 struct timespec64 ts;
871 int ret;
872
873 mutex_lock(&ptp_data->lock);
874 ret = _ksz_ptp_gettime(dev, &ts);
875 if (ret)
876 goto out;
877
878 spin_lock_bh(&ptp_data->clock_lock);
879 ptp_data->clock_time = ts;
880 spin_unlock_bh(&ptp_data->clock_lock);
881
882out:
883 mutex_unlock(&ptp_data->lock);
884
885 return HZ; /* reschedule in 1 second */
886}
887
888static int ksz_ptp_start_clock(struct ksz_device *dev)
889{
890 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
891 int ret;
892
893 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
894 if (ret)
895 return ret;
896
897 ptp_data->clock_time.tv_sec = 0;
898 ptp_data->clock_time.tv_nsec = 0;
899
900 return 0;
901}
902
903int ksz_ptp_clock_register(struct dsa_switch *ds)
904{
905 struct ksz_device *dev = ds->priv;
906 struct ksz_ptp_data *ptp_data;
907 int ret;
908 u8 i;
909
910 ptp_data = &dev->ptp_data;
911 mutex_init(&ptp_data->lock);
912 spin_lock_init(&ptp_data->clock_lock);
913
914 ptp_data->caps.owner = THIS_MODULE;
915 snprintf(ptp_data->caps.name, 16, "Microchip Clock");
916 ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
917 ptp_data->caps.gettime64 = ksz_ptp_gettime;
918 ptp_data->caps.settime64 = ksz_ptp_settime;
919 ptp_data->caps.adjfine = ksz_ptp_adjfine;
920 ptp_data->caps.adjtime = ksz_ptp_adjtime;
921 ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
922 ptp_data->caps.enable = ksz_ptp_enable;
923 ptp_data->caps.verify = ksz_ptp_verify_pin;
924 ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
925 ptp_data->caps.n_per_out = 3;
926
927 ret = ksz_ptp_start_clock(dev);
928 if (ret)
929 return ret;
930
931 for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
932 struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
933
934 snprintf(ptp_pin->name,
935 sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
936 ptp_pin->index = i;
937 ptp_pin->func = PTP_PF_NONE;
938 }
939
940 ptp_data->caps.pin_config = ptp_data->pin_config;
941
942 /* Currently only P2P mode is supported. When 802_1AS bit is set, it
943 * forwards all PTP packets to host port and none to other ports.
944 */
945 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
946 PTP_TC_P2P | PTP_802_1AS);
947 if (ret)
948 return ret;
949
950 ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
951 if (IS_ERR_OR_NULL(ptp_data->clock))
952 return PTR_ERR(ptp_data->clock);
953
954 return 0;
955}
956
957void ksz_ptp_clock_unregister(struct dsa_switch *ds)
958{
959 struct ksz_device *dev = ds->priv;
960 struct ksz_ptp_data *ptp_data;
961
962 ptp_data = &dev->ptp_data;
963
964 if (ptp_data->clock)
965 ptp_clock_unregister(ptp_data->clock);
966}
967
968static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
969{
970 struct ksz_ptp_irq *ptpmsg_irq = dev_id;
971 struct ksz_device *dev;
972 struct ksz_port *port;
973 u32 tstamp_raw;
974 ktime_t tstamp;
975 int ret;
976
977 port = ptpmsg_irq->port;
978 dev = port->ksz_dev;
979
980 if (ptpmsg_irq->ts_en) {
981 ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
982 if (ret)
983 return IRQ_NONE;
984
985 tstamp = ksz_decode_tstamp(tstamp_raw);
986
987 port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
988
989 complete(&port->tstamp_msg_comp);
990 }
991
992 return IRQ_HANDLED;
993}
994
995static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
996{
997 struct ksz_irq *ptpirq = dev_id;
998 unsigned int nhandled = 0;
999 struct ksz_device *dev;
1000 unsigned int sub_irq;
1001 u16 data;
1002 int ret;
1003 u8 n;
1004
1005 dev = ptpirq->dev;
1006
1007 ret = ksz_read16(dev, ptpirq->reg_status, &data);
1008 if (ret)
1009 goto out;
1010
1011 /* Clear the interrupts W1C */
1012 ret = ksz_write16(dev, ptpirq->reg_status, data);
1013 if (ret)
1014 return IRQ_NONE;
1015
1016 for (n = 0; n < ptpirq->nirqs; ++n) {
1017 if (data & BIT(n + KSZ_PTP_INT_START)) {
1018 sub_irq = irq_find_mapping(ptpirq->domain, n);
1019 handle_nested_irq(sub_irq);
1020 ++nhandled;
1021 }
1022 }
1023
1024out:
1025 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
1026}
1027
1028static void ksz_ptp_irq_mask(struct irq_data *d)
1029{
1030 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1031
1032 kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
1033}
1034
1035static void ksz_ptp_irq_unmask(struct irq_data *d)
1036{
1037 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1038
1039 kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
1040}
1041
1042static void ksz_ptp_irq_bus_lock(struct irq_data *d)
1043{
1044 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1045
1046 mutex_lock(&kirq->dev->lock_irq);
1047}
1048
1049static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
1050{
1051 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1052 struct ksz_device *dev = kirq->dev;
1053 int ret;
1054
1055 ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
1056 if (ret)
1057 dev_err(dev->dev, "failed to change IRQ mask\n");
1058
1059 mutex_unlock(&dev->lock_irq);
1060}
1061
1062static const struct irq_chip ksz_ptp_irq_chip = {
1063 .name = "ksz-irq",
1064 .irq_mask = ksz_ptp_irq_mask,
1065 .irq_unmask = ksz_ptp_irq_unmask,
1066 .irq_bus_lock = ksz_ptp_irq_bus_lock,
1067 .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
1068};
1069
1070static int ksz_ptp_irq_domain_map(struct irq_domain *d,
1071 unsigned int irq, irq_hw_number_t hwirq)
1072{
1073 irq_set_chip_data(irq, d->host_data);
1074 irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
1075 irq_set_noprobe(irq);
1076
1077 return 0;
1078}
1079
1080static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
1081 .map = ksz_ptp_irq_domain_map,
1082 .xlate = irq_domain_xlate_twocell,
1083};
1084
1085static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
1086{
1087 struct ksz_ptp_irq *ptpmsg_irq;
1088
1089 ptpmsg_irq = &port->ptpmsg_irq[n];
1090
1091 free_irq(ptpmsg_irq->num, ptpmsg_irq);
1092 irq_dispose_mapping(ptpmsg_irq->num);
1093}
1094
1095static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
1096{
1097 u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
1098 REG_PTP_PORT_SYNC_TS};
1099 static const char * const name[] = {"pdresp-msg", "xdreq-msg",
1100 "sync-msg"};
1101 const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
1102 struct ksz_ptp_irq *ptpmsg_irq;
1103
1104 ptpmsg_irq = &port->ptpmsg_irq[n];
1105
1106 ptpmsg_irq->port = port;
1107 ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
1108
1109 strscpy(ptpmsg_irq->name, name[n]);
1110
1111 ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
1112 if (ptpmsg_irq->num < 0)
1113 return ptpmsg_irq->num;
1114
1115 return request_threaded_irq(ptpmsg_irq->num, NULL,
1116 ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
1117 ptpmsg_irq->name, ptpmsg_irq);
1118}
1119
1120int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
1121{
1122 struct ksz_device *dev = ds->priv;
1123 const struct ksz_dev_ops *ops = dev->dev_ops;
1124 struct ksz_port *port = &dev->ports[p];
1125 struct ksz_irq *ptpirq = &port->ptpirq;
1126 int irq;
1127 int ret;
1128
1129 ptpirq->dev = dev;
1130 ptpirq->masked = 0;
1131 ptpirq->nirqs = 3;
1132 ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
1133 ptpirq->reg_status = ops->get_port_addr(p,
1134 REG_PTP_PORT_TX_INT_STATUS__2);
1135 snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
1136
1137 init_completion(&port->tstamp_msg_comp);
1138
1139 ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
1140 &ksz_ptp_irq_domain_ops, ptpirq);
1141 if (!ptpirq->domain)
1142 return -ENOMEM;
1143
1144 for (irq = 0; irq < ptpirq->nirqs; irq++)
1145 irq_create_mapping(ptpirq->domain, irq);
1146
1147 ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
1148 if (ptpirq->irq_num < 0) {
1149 ret = ptpirq->irq_num;
1150 goto out;
1151 }
1152
1153 ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
1154 IRQF_ONESHOT, ptpirq->name, ptpirq);
1155 if (ret)
1156 goto out;
1157
1158 for (irq = 0; irq < ptpirq->nirqs; irq++) {
1159 ret = ksz_ptp_msg_irq_setup(port, irq);
1160 if (ret)
1161 goto out_ptp_msg;
1162 }
1163
1164 return 0;
1165
1166out_ptp_msg:
1167 free_irq(ptpirq->irq_num, ptpirq);
1168 while (irq--)
1169 free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
1170out:
1171 for (irq = 0; irq < ptpirq->nirqs; irq++)
1172 irq_dispose_mapping(port->ptpmsg_irq[irq].num);
1173
1174 irq_domain_remove(ptpirq->domain);
1175
1176 return ret;
1177}
1178
1179void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
1180{
1181 struct ksz_device *dev = ds->priv;
1182 struct ksz_port *port = &dev->ports[p];
1183 struct ksz_irq *ptpirq = &port->ptpirq;
1184 u8 n;
1185
1186 for (n = 0; n < ptpirq->nirqs; n++)
1187 ksz_ptp_msg_irq_free(port, n);
1188
1189 free_irq(ptpirq->irq_num, ptpirq);
1190 irq_dispose_mapping(ptpirq->irq_num);
1191
1192 irq_domain_remove(ptpirq->domain);
1193}
1194
1195MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
1196MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
1197MODULE_DESCRIPTION("PTP support for KSZ switch");
1198MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/* Microchip KSZ PTP Implementation
3 *
4 * Copyright (C) 2020 ARRI Lighting
5 * Copyright (C) 2022 Microchip Technology Inc.
6 */
7
8#include <linux/dsa/ksz_common.h>
9#include <linux/irq.h>
10#include <linux/irqdomain.h>
11#include <linux/kernel.h>
12#include <linux/ptp_classify.h>
13#include <linux/ptp_clock_kernel.h>
14
15#include "ksz_common.h"
16#include "ksz_ptp.h"
17#include "ksz_ptp_reg.h"
18
19#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
20#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
21#define work_to_xmit_work(w) \
22 container_of((w), struct ksz_deferred_xmit_work, work)
23
24/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
25 * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
26 */
27#define KSZ_MAX_DRIFT_CORR 6249999
28#define KSZ_MAX_PULSE_WIDTH 125000000LL
29
30#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
31#define KSZ_PTP_SUBNS_BITS 32
32
33#define KSZ_PTP_INT_START 13
34
35static int ksz_ptp_tou_gpio(struct ksz_device *dev)
36{
37 int ret;
38
39 if (!is_lan937x(dev))
40 return 0;
41
42 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
43 GPIO_OUT);
44 if (ret)
45 return ret;
46
47 ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
48 LED_OVR_1 | LED_OVR_2);
49 if (ret)
50 return ret;
51
52 return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
53 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
54 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
55}
56
57static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
58{
59 u32 data;
60 int ret;
61
62 /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
63 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
64
65 data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
66 ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
67 if (ret)
68 return ret;
69
70 data = FIELD_PREP(TRIG_INT_M, BIT(unit));
71 ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
72 if (ret)
73 return ret;
74
75 /* Clear reset and set GPIO direction */
76 return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
77 0);
78}
79
80static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
81{
82 u32 data;
83
84 if (pulse_ns & 0x3)
85 return -EINVAL;
86
87 data = (pulse_ns / 8);
88 if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
89 return -ERANGE;
90
91 return 0;
92}
93
94static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
95 struct timespec64 const *ts)
96{
97 int ret;
98
99 /* Hardware has only 32 bit */
100 if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
101 return -EINVAL;
102
103 ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
104 if (ret)
105 return ret;
106
107 ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
108 if (ret)
109 return ret;
110
111 return 0;
112}
113
114static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
115{
116 u32 data;
117 int ret;
118
119 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
120 if (ret)
121 return ret;
122
123 /* Check error flag:
124 * - the ACTIVE flag is NOT cleared an error!
125 */
126 ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
127 if (ret)
128 return ret;
129
130 if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
131 dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
132 unit);
133 ret = -EIO;
134 /* Unit will be reset on next access */
135 return ret;
136 }
137
138 return 0;
139}
140
141static int ksz_ptp_configure_perout(struct ksz_device *dev,
142 u32 cycle_width_ns, u32 pulse_width_ns,
143 struct timespec64 const *target_time,
144 u8 index)
145{
146 u32 data;
147 int ret;
148
149 data = FIELD_PREP(TRIG_NOTIFY, 1) |
150 FIELD_PREP(TRIG_GPO_M, index) |
151 FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
152 ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
153 if (ret)
154 return ret;
155
156 ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
157 if (ret)
158 return ret;
159
160 /* Set cycle count 0 - Infinite */
161 ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
162 if (ret)
163 return ret;
164
165 data = (pulse_width_ns / 8);
166 ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
167 if (ret)
168 return ret;
169
170 ret = ksz_ptp_tou_target_time_set(dev, target_time);
171 if (ret)
172 return ret;
173
174 return 0;
175}
176
177static int ksz_ptp_enable_perout(struct ksz_device *dev,
178 struct ptp_perout_request const *request,
179 int on)
180{
181 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
182 u64 req_pulse_width_ns;
183 u64 cycle_width_ns;
184 u64 pulse_width_ns;
185 int pin = 0;
186 u32 data32;
187 int ret;
188
189 if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
190 return -EOPNOTSUPP;
191
192 if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
193 ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
194 return -EBUSY;
195
196 pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
197 if (pin < 0)
198 return -EINVAL;
199
200 data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
201 FIELD_PREP(PTP_TOU_INDEX, request->index);
202 ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
203 PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
204 if (ret)
205 return ret;
206
207 ret = ksz_ptp_tou_reset(dev, request->index);
208 if (ret)
209 return ret;
210
211 if (!on) {
212 ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
213 return 0;
214 }
215
216 ptp_data->perout_target_time_first.tv_sec = request->start.sec;
217 ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
218
219 ptp_data->perout_period.tv_sec = request->period.sec;
220 ptp_data->perout_period.tv_nsec = request->period.nsec;
221
222 cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
223 if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
224 return -EINVAL;
225
226 if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
227 pulse_width_ns = request->on.sec * NSEC_PER_SEC +
228 request->on.nsec;
229 } else {
230 /* Use a duty cycle of 50%. Maximum pulse width supported by the
231 * hardware is a little bit more than 125 ms.
232 */
233 req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
234 request->period.nsec) / 2;
235 pulse_width_ns = min_t(u64, req_pulse_width_ns,
236 KSZ_MAX_PULSE_WIDTH);
237 }
238
239 ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
240 if (ret)
241 return ret;
242
243 ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
244 &ptp_data->perout_target_time_first,
245 pin);
246 if (ret)
247 return ret;
248
249 ret = ksz_ptp_tou_gpio(dev);
250 if (ret)
251 return ret;
252
253 ret = ksz_ptp_tou_start(dev, request->index);
254 if (ret)
255 return ret;
256
257 ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
258
259 return 0;
260}
261
262static int ksz_ptp_enable_mode(struct ksz_device *dev)
263{
264 struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
265 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
266 struct ksz_port *prt;
267 struct dsa_port *dp;
268 bool tag_en = false;
269 int ret;
270
271 dsa_switch_for_each_user_port(dp, dev->ds) {
272 prt = &dev->ports[dp->index];
273 if (prt->hwts_tx_en || prt->hwts_rx_en) {
274 tag_en = true;
275 break;
276 }
277 }
278
279 if (tag_en) {
280 ret = ptp_schedule_worker(ptp_data->clock, 0);
281 if (ret)
282 return ret;
283 } else {
284 ptp_cancel_worker_sync(ptp_data->clock);
285 }
286
287 tagger_data->hwtstamp_set_state(dev->ds, tag_en);
288
289 return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
290 tag_en ? PTP_ENABLE : 0);
291}
292
293/* The function is return back the capability of timestamping feature when
294 * requested through ethtool -T <interface> utility
295 */
296int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
297{
298 struct ksz_device *dev = ds->priv;
299 struct ksz_ptp_data *ptp_data;
300
301 ptp_data = &dev->ptp_data;
302
303 if (!ptp_data->clock)
304 return -ENODEV;
305
306 ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
307 SOF_TIMESTAMPING_RX_HARDWARE |
308 SOF_TIMESTAMPING_RAW_HARDWARE;
309
310 ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
311
312 if (is_lan937x(dev))
313 ts->tx_types |= BIT(HWTSTAMP_TX_ON);
314
315 ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
316 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
317 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
318 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
319
320 ts->phc_index = ptp_clock_index(ptp_data->clock);
321
322 return 0;
323}
324
325int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
326{
327 struct ksz_device *dev = ds->priv;
328 struct hwtstamp_config *config;
329 struct ksz_port *prt;
330
331 prt = &dev->ports[port];
332 config = &prt->tstamp_config;
333
334 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
335 -EFAULT : 0;
336}
337
338static int ksz_set_hwtstamp_config(struct ksz_device *dev,
339 struct ksz_port *prt,
340 struct hwtstamp_config *config)
341{
342 int ret;
343
344 if (config->flags)
345 return -EINVAL;
346
347 switch (config->tx_type) {
348 case HWTSTAMP_TX_OFF:
349 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
350 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
351 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
352 prt->hwts_tx_en = false;
353 break;
354 case HWTSTAMP_TX_ONESTEP_P2P:
355 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
356 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
357 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
358 prt->hwts_tx_en = true;
359
360 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
361 if (ret)
362 return ret;
363
364 break;
365 case HWTSTAMP_TX_ON:
366 if (!is_lan937x(dev))
367 return -ERANGE;
368
369 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
370 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
371 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
372 prt->hwts_tx_en = true;
373
374 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
375 if (ret)
376 return ret;
377
378 break;
379 default:
380 return -ERANGE;
381 }
382
383 switch (config->rx_filter) {
384 case HWTSTAMP_FILTER_NONE:
385 prt->hwts_rx_en = false;
386 break;
387 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
388 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
389 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
390 prt->hwts_rx_en = true;
391 break;
392 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
393 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
394 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
395 prt->hwts_rx_en = true;
396 break;
397 case HWTSTAMP_FILTER_PTP_V2_EVENT:
398 case HWTSTAMP_FILTER_PTP_V2_SYNC:
399 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
400 prt->hwts_rx_en = true;
401 break;
402 default:
403 config->rx_filter = HWTSTAMP_FILTER_NONE;
404 return -ERANGE;
405 }
406
407 return ksz_ptp_enable_mode(dev);
408}
409
410int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
411{
412 struct ksz_device *dev = ds->priv;
413 struct hwtstamp_config config;
414 struct ksz_port *prt;
415 int ret;
416
417 prt = &dev->ports[port];
418
419 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
420 return -EFAULT;
421
422 ret = ksz_set_hwtstamp_config(dev, prt, &config);
423 if (ret)
424 return ret;
425
426 memcpy(&prt->tstamp_config, &config, sizeof(config));
427
428 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
429 return -EFAULT;
430
431 return 0;
432}
433
434static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
435{
436 struct timespec64 ptp_clock_time;
437 struct ksz_ptp_data *ptp_data;
438 struct timespec64 diff;
439 struct timespec64 ts;
440
441 ptp_data = &dev->ptp_data;
442 ts = ktime_to_timespec64(tstamp);
443
444 spin_lock_bh(&ptp_data->clock_lock);
445 ptp_clock_time = ptp_data->clock_time;
446 spin_unlock_bh(&ptp_data->clock_lock);
447
448 /* calculate full time from partial time stamp */
449 ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
450
451 /* find nearest possible point in time */
452 diff = timespec64_sub(ts, ptp_clock_time);
453 if (diff.tv_sec > 2)
454 ts.tv_sec -= 4;
455 else if (diff.tv_sec < -2)
456 ts.tv_sec += 4;
457
458 return timespec64_to_ktime(ts);
459}
460
461bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
462 unsigned int type)
463{
464 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
465 struct ksz_device *dev = ds->priv;
466 struct ptp_header *ptp_hdr;
467 struct ksz_port *prt;
468 u8 ptp_msg_type;
469 ktime_t tstamp;
470 s64 correction;
471
472 prt = &dev->ports[port];
473
474 tstamp = KSZ_SKB_CB(skb)->tstamp;
475 memset(hwtstamps, 0, sizeof(*hwtstamps));
476 hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
477
478 if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
479 goto out;
480
481 ptp_hdr = ptp_parse_header(skb, type);
482 if (!ptp_hdr)
483 goto out;
484
485 ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
486 if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
487 goto out;
488
489 /* Only subtract the partial time stamp from the correction field. When
490 * the hardware adds the egress time stamp to the correction field of
491 * the PDelay_Resp message on tx, also only the partial time stamp will
492 * be added.
493 */
494 correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
495 correction -= ktime_to_ns(tstamp) << 16;
496
497 ptp_header_update_correction(skb, type, ptp_hdr, correction);
498
499out:
500 return false;
501}
502
503void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
504{
505 struct ksz_device *dev = ds->priv;
506 struct ptp_header *hdr;
507 struct sk_buff *clone;
508 struct ksz_port *prt;
509 unsigned int type;
510 u8 ptp_msg_type;
511
512 prt = &dev->ports[port];
513
514 if (!prt->hwts_tx_en)
515 return;
516
517 type = ptp_classify_raw(skb);
518 if (type == PTP_CLASS_NONE)
519 return;
520
521 hdr = ptp_parse_header(skb, type);
522 if (!hdr)
523 return;
524
525 ptp_msg_type = ptp_get_msgtype(hdr, type);
526
527 switch (ptp_msg_type) {
528 case PTP_MSGTYPE_SYNC:
529 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
530 return;
531 break;
532 case PTP_MSGTYPE_PDELAY_REQ:
533 break;
534 case PTP_MSGTYPE_PDELAY_RESP:
535 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
536 KSZ_SKB_CB(skb)->ptp_type = type;
537 KSZ_SKB_CB(skb)->update_correction = true;
538 return;
539 }
540 break;
541
542 default:
543 return;
544 }
545
546 clone = skb_clone_sk(skb);
547 if (!clone)
548 return;
549
550 /* caching the value to be used in tag_ksz.c */
551 KSZ_SKB_CB(skb)->clone = clone;
552}
553
554static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
555 struct ksz_port *prt, struct sk_buff *skb)
556{
557 struct skb_shared_hwtstamps hwtstamps = {};
558 int ret;
559
560 /* timeout must include DSA conduit to transmit data, tstamp latency,
561 * IRQ latency and time for reading the time stamp.
562 */
563 ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
564 msecs_to_jiffies(100));
565 if (!ret)
566 return;
567
568 hwtstamps.hwtstamp = prt->tstamp_msg;
569 skb_complete_tx_timestamp(skb, &hwtstamps);
570}
571
572void ksz_port_deferred_xmit(struct kthread_work *work)
573{
574 struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
575 struct sk_buff *clone, *skb = xmit_work->skb;
576 struct dsa_switch *ds = xmit_work->dp->ds;
577 struct ksz_device *dev = ds->priv;
578 struct ksz_port *prt;
579
580 prt = &dev->ports[xmit_work->dp->index];
581
582 clone = KSZ_SKB_CB(skb)->clone;
583
584 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
585
586 reinit_completion(&prt->tstamp_msg_comp);
587
588 dsa_enqueue_skb(skb, skb->dev);
589
590 ksz_ptp_txtstamp_skb(dev, prt, clone);
591
592 kfree(xmit_work);
593}
594
595static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
596{
597 u32 nanoseconds;
598 u32 seconds;
599 u8 phase;
600 int ret;
601
602 /* Copy current PTP clock into shadow registers and read */
603 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
604 if (ret)
605 return ret;
606
607 ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
608 if (ret)
609 return ret;
610
611 ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
612 if (ret)
613 return ret;
614
615 ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
616 if (ret)
617 return ret;
618
619 ts->tv_sec = seconds;
620 ts->tv_nsec = nanoseconds + phase * 8;
621
622 return 0;
623}
624
625static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
626{
627 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
628 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
629 int ret;
630
631 mutex_lock(&ptp_data->lock);
632 ret = _ksz_ptp_gettime(dev, ts);
633 mutex_unlock(&ptp_data->lock);
634
635 return ret;
636}
637
638static int ksz_ptp_restart_perout(struct ksz_device *dev)
639{
640 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
641 s64 now_ns, first_ns, period_ns, next_ns;
642 struct ptp_perout_request request;
643 struct timespec64 next;
644 struct timespec64 now;
645 unsigned int count;
646 int ret;
647
648 dev_info(dev->dev, "Restarting periodic output signal\n");
649
650 ret = _ksz_ptp_gettime(dev, &now);
651 if (ret)
652 return ret;
653
654 now_ns = timespec64_to_ns(&now);
655 first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
656
657 /* Calculate next perout event based on start time and period */
658 period_ns = timespec64_to_ns(&ptp_data->perout_period);
659
660 if (first_ns < now_ns) {
661 count = div_u64(now_ns - first_ns, period_ns);
662 next_ns = first_ns + count * period_ns;
663 } else {
664 next_ns = first_ns;
665 }
666
667 /* Ensure 100 ms guard time prior next event */
668 while (next_ns < now_ns + 100000000)
669 next_ns += period_ns;
670
671 /* Restart periodic output signal */
672 next = ns_to_timespec64(next_ns);
673 request.start.sec = next.tv_sec;
674 request.start.nsec = next.tv_nsec;
675 request.period.sec = ptp_data->perout_period.tv_sec;
676 request.period.nsec = ptp_data->perout_period.tv_nsec;
677 request.index = 0;
678 request.flags = 0;
679
680 return ksz_ptp_enable_perout(dev, &request, 1);
681}
682
683static int ksz_ptp_settime(struct ptp_clock_info *ptp,
684 const struct timespec64 *ts)
685{
686 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
687 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
688 int ret;
689
690 mutex_lock(&ptp_data->lock);
691
692 /* Write to shadow registers and Load PTP clock */
693 ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
694 if (ret)
695 goto unlock;
696
697 ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
698 if (ret)
699 goto unlock;
700
701 ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
702 if (ret)
703 goto unlock;
704
705 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
706 if (ret)
707 goto unlock;
708
709 switch (ptp_data->tou_mode) {
710 case KSZ_PTP_TOU_IDLE:
711 break;
712
713 case KSZ_PTP_TOU_PEROUT:
714 ret = ksz_ptp_restart_perout(dev);
715 if (ret)
716 goto unlock;
717
718 break;
719 }
720
721 spin_lock_bh(&ptp_data->clock_lock);
722 ptp_data->clock_time = *ts;
723 spin_unlock_bh(&ptp_data->clock_lock);
724
725unlock:
726 mutex_unlock(&ptp_data->lock);
727
728 return ret;
729}
730
731static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
732{
733 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
734 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
735 u64 base, adj;
736 bool negative;
737 u32 data32;
738 int ret;
739
740 mutex_lock(&ptp_data->lock);
741
742 if (scaled_ppm) {
743 base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
744 negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
745
746 data32 = (u32)adj;
747 data32 &= PTP_SUBNANOSEC_M;
748 if (!negative)
749 data32 |= PTP_RATE_DIR;
750
751 ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
752 if (ret)
753 goto unlock;
754
755 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
756 PTP_CLK_ADJ_ENABLE);
757 if (ret)
758 goto unlock;
759 } else {
760 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
761 if (ret)
762 goto unlock;
763 }
764
765unlock:
766 mutex_unlock(&ptp_data->lock);
767 return ret;
768}
769
770static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
771{
772 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
773 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
774 struct timespec64 delta64 = ns_to_timespec64(delta);
775 s32 sec, nsec;
776 u16 data16;
777 int ret;
778
779 mutex_lock(&ptp_data->lock);
780
781 /* do not use ns_to_timespec64(),
782 * both sec and nsec are subtracted by hw
783 */
784 sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
785
786 ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
787 if (ret)
788 goto unlock;
789
790 ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
791 if (ret)
792 goto unlock;
793
794 ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
795 if (ret)
796 goto unlock;
797
798 data16 |= PTP_STEP_ADJ;
799
800 /* PTP_STEP_DIR -- 0: subtract, 1: add */
801 if (delta < 0)
802 data16 &= ~PTP_STEP_DIR;
803 else
804 data16 |= PTP_STEP_DIR;
805
806 ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
807 if (ret)
808 goto unlock;
809
810 switch (ptp_data->tou_mode) {
811 case KSZ_PTP_TOU_IDLE:
812 break;
813
814 case KSZ_PTP_TOU_PEROUT:
815 ret = ksz_ptp_restart_perout(dev);
816 if (ret)
817 goto unlock;
818
819 break;
820 }
821
822 spin_lock_bh(&ptp_data->clock_lock);
823 ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
824 spin_unlock_bh(&ptp_data->clock_lock);
825
826unlock:
827 mutex_unlock(&ptp_data->lock);
828 return ret;
829}
830
831static int ksz_ptp_enable(struct ptp_clock_info *ptp,
832 struct ptp_clock_request *req, int on)
833{
834 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
835 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
836 int ret;
837
838 switch (req->type) {
839 case PTP_CLK_REQ_PEROUT:
840 mutex_lock(&ptp_data->lock);
841 ret = ksz_ptp_enable_perout(dev, &req->perout, on);
842 mutex_unlock(&ptp_data->lock);
843 break;
844 default:
845 return -EOPNOTSUPP;
846 }
847
848 return ret;
849}
850
851static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
852 enum ptp_pin_function func, unsigned int chan)
853{
854 int ret = 0;
855
856 switch (func) {
857 case PTP_PF_NONE:
858 case PTP_PF_PEROUT:
859 break;
860 default:
861 ret = -1;
862 break;
863 }
864
865 return ret;
866}
867
868/* Function is pointer to the do_aux_work in the ptp_clock capability */
869static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
870{
871 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
872 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
873 struct timespec64 ts;
874 int ret;
875
876 mutex_lock(&ptp_data->lock);
877 ret = _ksz_ptp_gettime(dev, &ts);
878 if (ret)
879 goto out;
880
881 spin_lock_bh(&ptp_data->clock_lock);
882 ptp_data->clock_time = ts;
883 spin_unlock_bh(&ptp_data->clock_lock);
884
885out:
886 mutex_unlock(&ptp_data->lock);
887
888 return HZ; /* reschedule in 1 second */
889}
890
891static int ksz_ptp_start_clock(struct ksz_device *dev)
892{
893 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
894 int ret;
895
896 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
897 if (ret)
898 return ret;
899
900 ptp_data->clock_time.tv_sec = 0;
901 ptp_data->clock_time.tv_nsec = 0;
902
903 return 0;
904}
905
906int ksz_ptp_clock_register(struct dsa_switch *ds)
907{
908 struct ksz_device *dev = ds->priv;
909 struct ksz_ptp_data *ptp_data;
910 int ret;
911 u8 i;
912
913 ptp_data = &dev->ptp_data;
914 mutex_init(&ptp_data->lock);
915 spin_lock_init(&ptp_data->clock_lock);
916
917 ptp_data->caps.owner = THIS_MODULE;
918 snprintf(ptp_data->caps.name, 16, "Microchip Clock");
919 ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
920 ptp_data->caps.gettime64 = ksz_ptp_gettime;
921 ptp_data->caps.settime64 = ksz_ptp_settime;
922 ptp_data->caps.adjfine = ksz_ptp_adjfine;
923 ptp_data->caps.adjtime = ksz_ptp_adjtime;
924 ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
925 ptp_data->caps.enable = ksz_ptp_enable;
926 ptp_data->caps.verify = ksz_ptp_verify_pin;
927 ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
928 ptp_data->caps.n_per_out = 3;
929
930 ret = ksz_ptp_start_clock(dev);
931 if (ret)
932 return ret;
933
934 for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
935 struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
936
937 snprintf(ptp_pin->name,
938 sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
939 ptp_pin->index = i;
940 ptp_pin->func = PTP_PF_NONE;
941 }
942
943 ptp_data->caps.pin_config = ptp_data->pin_config;
944
945 /* Currently only P2P mode is supported. When 802_1AS bit is set, it
946 * forwards all PTP packets to host port and none to other ports.
947 */
948 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
949 PTP_TC_P2P | PTP_802_1AS);
950 if (ret)
951 return ret;
952
953 ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
954 if (IS_ERR_OR_NULL(ptp_data->clock))
955 return PTR_ERR(ptp_data->clock);
956
957 return 0;
958}
959
960void ksz_ptp_clock_unregister(struct dsa_switch *ds)
961{
962 struct ksz_device *dev = ds->priv;
963 struct ksz_ptp_data *ptp_data;
964
965 ptp_data = &dev->ptp_data;
966
967 if (ptp_data->clock)
968 ptp_clock_unregister(ptp_data->clock);
969}
970
971static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
972{
973 struct ksz_ptp_irq *ptpmsg_irq = dev_id;
974 struct ksz_device *dev;
975 struct ksz_port *port;
976 u32 tstamp_raw;
977 ktime_t tstamp;
978 int ret;
979
980 port = ptpmsg_irq->port;
981 dev = port->ksz_dev;
982
983 if (ptpmsg_irq->ts_en) {
984 ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
985 if (ret)
986 return IRQ_NONE;
987
988 tstamp = ksz_decode_tstamp(tstamp_raw);
989
990 port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
991
992 complete(&port->tstamp_msg_comp);
993 }
994
995 return IRQ_HANDLED;
996}
997
998static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
999{
1000 struct ksz_irq *ptpirq = dev_id;
1001 unsigned int nhandled = 0;
1002 struct ksz_device *dev;
1003 unsigned int sub_irq;
1004 u16 data;
1005 int ret;
1006 u8 n;
1007
1008 dev = ptpirq->dev;
1009
1010 ret = ksz_read16(dev, ptpirq->reg_status, &data);
1011 if (ret)
1012 goto out;
1013
1014 /* Clear the interrupts W1C */
1015 ret = ksz_write16(dev, ptpirq->reg_status, data);
1016 if (ret)
1017 return IRQ_NONE;
1018
1019 for (n = 0; n < ptpirq->nirqs; ++n) {
1020 if (data & BIT(n + KSZ_PTP_INT_START)) {
1021 sub_irq = irq_find_mapping(ptpirq->domain, n);
1022 handle_nested_irq(sub_irq);
1023 ++nhandled;
1024 }
1025 }
1026
1027out:
1028 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
1029}
1030
1031static void ksz_ptp_irq_mask(struct irq_data *d)
1032{
1033 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1034
1035 kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
1036}
1037
1038static void ksz_ptp_irq_unmask(struct irq_data *d)
1039{
1040 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1041
1042 kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
1043}
1044
1045static void ksz_ptp_irq_bus_lock(struct irq_data *d)
1046{
1047 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1048
1049 mutex_lock(&kirq->dev->lock_irq);
1050}
1051
1052static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
1053{
1054 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1055 struct ksz_device *dev = kirq->dev;
1056 int ret;
1057
1058 ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
1059 if (ret)
1060 dev_err(dev->dev, "failed to change IRQ mask\n");
1061
1062 mutex_unlock(&dev->lock_irq);
1063}
1064
1065static const struct irq_chip ksz_ptp_irq_chip = {
1066 .name = "ksz-irq",
1067 .irq_mask = ksz_ptp_irq_mask,
1068 .irq_unmask = ksz_ptp_irq_unmask,
1069 .irq_bus_lock = ksz_ptp_irq_bus_lock,
1070 .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
1071};
1072
1073static int ksz_ptp_irq_domain_map(struct irq_domain *d,
1074 unsigned int irq, irq_hw_number_t hwirq)
1075{
1076 irq_set_chip_data(irq, d->host_data);
1077 irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
1078 irq_set_noprobe(irq);
1079
1080 return 0;
1081}
1082
1083static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
1084 .map = ksz_ptp_irq_domain_map,
1085 .xlate = irq_domain_xlate_twocell,
1086};
1087
1088static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
1089{
1090 struct ksz_ptp_irq *ptpmsg_irq;
1091
1092 ptpmsg_irq = &port->ptpmsg_irq[n];
1093
1094 free_irq(ptpmsg_irq->num, ptpmsg_irq);
1095 irq_dispose_mapping(ptpmsg_irq->num);
1096}
1097
1098static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
1099{
1100 u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
1101 REG_PTP_PORT_SYNC_TS};
1102 static const char * const name[] = {"pdresp-msg", "xdreq-msg",
1103 "sync-msg"};
1104 const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
1105 struct ksz_ptp_irq *ptpmsg_irq;
1106
1107 ptpmsg_irq = &port->ptpmsg_irq[n];
1108
1109 ptpmsg_irq->port = port;
1110 ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
1111
1112 snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
1113
1114 ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
1115 if (ptpmsg_irq->num < 0)
1116 return ptpmsg_irq->num;
1117
1118 return request_threaded_irq(ptpmsg_irq->num, NULL,
1119 ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
1120 ptpmsg_irq->name, ptpmsg_irq);
1121}
1122
1123int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
1124{
1125 struct ksz_device *dev = ds->priv;
1126 const struct ksz_dev_ops *ops = dev->dev_ops;
1127 struct ksz_port *port = &dev->ports[p];
1128 struct ksz_irq *ptpirq = &port->ptpirq;
1129 int irq;
1130 int ret;
1131
1132 ptpirq->dev = dev;
1133 ptpirq->masked = 0;
1134 ptpirq->nirqs = 3;
1135 ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
1136 ptpirq->reg_status = ops->get_port_addr(p,
1137 REG_PTP_PORT_TX_INT_STATUS__2);
1138 snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
1139
1140 init_completion(&port->tstamp_msg_comp);
1141
1142 ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
1143 &ksz_ptp_irq_domain_ops, ptpirq);
1144 if (!ptpirq->domain)
1145 return -ENOMEM;
1146
1147 for (irq = 0; irq < ptpirq->nirqs; irq++)
1148 irq_create_mapping(ptpirq->domain, irq);
1149
1150 ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
1151 if (ptpirq->irq_num < 0) {
1152 ret = ptpirq->irq_num;
1153 goto out;
1154 }
1155
1156 ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
1157 IRQF_ONESHOT, ptpirq->name, ptpirq);
1158 if (ret)
1159 goto out;
1160
1161 for (irq = 0; irq < ptpirq->nirqs; irq++) {
1162 ret = ksz_ptp_msg_irq_setup(port, irq);
1163 if (ret)
1164 goto out_ptp_msg;
1165 }
1166
1167 return 0;
1168
1169out_ptp_msg:
1170 free_irq(ptpirq->irq_num, ptpirq);
1171 while (irq--)
1172 free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
1173out:
1174 for (irq = 0; irq < ptpirq->nirqs; irq++)
1175 irq_dispose_mapping(port->ptpmsg_irq[irq].num);
1176
1177 irq_domain_remove(ptpirq->domain);
1178
1179 return ret;
1180}
1181
1182void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
1183{
1184 struct ksz_device *dev = ds->priv;
1185 struct ksz_port *port = &dev->ports[p];
1186 struct ksz_irq *ptpirq = &port->ptpirq;
1187 u8 n;
1188
1189 for (n = 0; n < ptpirq->nirqs; n++)
1190 ksz_ptp_msg_irq_free(port, n);
1191
1192 free_irq(ptpirq->irq_num, ptpirq);
1193 irq_dispose_mapping(ptpirq->irq_num);
1194
1195 irq_domain_remove(ptpirq->domain);
1196}
1197
1198MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
1199MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
1200MODULE_DESCRIPTION("PTP support for KSZ switch");
1201MODULE_LICENSE("GPL");