Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
4 *
5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/workqueue.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/clk.h>
29#include <linux/platform_device.h>
30#include <linux/phy.h>
31#include <linux/fec.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_gpio.h>
35#include <linux/of_net.h>
36
37#include "fec.h"
38
39/* FEC 1588 register bits */
40#define FEC_T_CTRL_SLAVE 0x00002000
41#define FEC_T_CTRL_CAPTURE 0x00000800
42#define FEC_T_CTRL_RESTART 0x00000200
43#define FEC_T_CTRL_PERIOD_RST 0x00000030
44#define FEC_T_CTRL_PERIOD_EN 0x00000010
45#define FEC_T_CTRL_ENABLE 0x00000001
46
47#define FEC_T_INC_MASK 0x0000007f
48#define FEC_T_INC_OFFSET 0
49#define FEC_T_INC_CORR_MASK 0x00007f00
50#define FEC_T_INC_CORR_OFFSET 8
51
52#define FEC_T_CTRL_PINPER 0x00000080
53#define FEC_T_TF0_MASK 0x00000001
54#define FEC_T_TF0_OFFSET 0
55#define FEC_T_TF1_MASK 0x00000002
56#define FEC_T_TF1_OFFSET 1
57#define FEC_T_TF2_MASK 0x00000004
58#define FEC_T_TF2_OFFSET 2
59#define FEC_T_TF3_MASK 0x00000008
60#define FEC_T_TF3_OFFSET 3
61#define FEC_T_TDRE_MASK 0x00000001
62#define FEC_T_TDRE_OFFSET 0
63#define FEC_T_TMODE_MASK 0x0000003C
64#define FEC_T_TMODE_OFFSET 2
65#define FEC_T_TIE_MASK 0x00000040
66#define FEC_T_TIE_OFFSET 6
67#define FEC_T_TF_MASK 0x00000080
68#define FEC_T_TF_OFFSET 7
69
70#define FEC_ATIME_CTRL 0x400
71#define FEC_ATIME 0x404
72#define FEC_ATIME_EVT_OFFSET 0x408
73#define FEC_ATIME_EVT_PERIOD 0x40c
74#define FEC_ATIME_CORR 0x410
75#define FEC_ATIME_INC 0x414
76#define FEC_TS_TIMESTAMP 0x418
77
78#define FEC_TGSR 0x604
79#define FEC_TCSR(n) (0x608 + n * 0x08)
80#define FEC_TCCR(n) (0x60C + n * 0x08)
81#define MAX_TIMER_CHANNEL 3
82#define FEC_TMODE_TOGGLE 0x05
83#define FEC_HIGH_PULSE 0x0F
84
85#define FEC_CC_MULT (1 << 31)
86#define FEC_COUNTER_PERIOD (1 << 31)
87#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
88#define FEC_CHANNLE_0 0
89#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
90
91/**
92 * fec_ptp_enable_pps
93 * @fep: the fec_enet_private structure handle
94 * @enable: enable the channel pps output
95 *
96 * This function enble the PPS ouput on the timer channel.
97 */
98static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
99{
100 unsigned long flags;
101 u32 val, tempval;
102 struct timespec64 ts;
103 u64 ns;
104 val = 0;
105
106 if (fep->pps_enable == enable)
107 return 0;
108
109 fep->pps_channel = DEFAULT_PPS_CHANNEL;
110 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
111
112 spin_lock_irqsave(&fep->tmreg_lock, flags);
113
114 if (enable) {
115 /* clear capture or output compare interrupt status if have.
116 */
117 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
118
119 /* It is recommended to double check the TMODE field in the
120 * TCSR register to be cleared before the first compare counter
121 * is written into TCCR register. Just add a double check.
122 */
123 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
124 do {
125 val &= ~(FEC_T_TMODE_MASK);
126 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
127 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
128 } while (val & FEC_T_TMODE_MASK);
129
130 /* Dummy read counter to update the counter */
131 timecounter_read(&fep->tc);
132 /* We want to find the first compare event in the next
133 * second point. So we need to know what the ptp time
134 * is now and how many nanoseconds is ahead to get next second.
135 * The remaining nanosecond ahead before the next second would be
136 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
137 * to current timer would be next second.
138 */
139 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
140 tempval |= FEC_T_CTRL_CAPTURE;
141 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
142
143 tempval = readl(fep->hwp + FEC_ATIME);
144 /* Convert the ptp local counter to 1588 timestamp */
145 ns = timecounter_cyc2time(&fep->tc, tempval);
146 ts = ns_to_timespec64(ns);
147
148 /* The tempval is less than 3 seconds, and so val is less than
149 * 4 seconds. No overflow for 32bit calculation.
150 */
151 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
152
153 /* Need to consider the situation that the current time is
154 * very close to the second point, which means NSEC_PER_SEC
155 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
156 * is still running when we calculate the first compare event, it is
157 * possible that the remaining nanoseonds run out before the compare
158 * counter is calculated and written into TCCR register. To avoid
159 * this possibility, we will set the compare event to be the next
160 * of next second. The current setting is 31-bit timer and wrap
161 * around over 2 seconds. So it is okay to set the next of next
162 * seond for the timer.
163 */
164 val += NSEC_PER_SEC;
165
166 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
167 * ptp counter, which maybe cause 32-bit wrap. Since the
168 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
169 * We can ensure the wrap will not cause issue. If the offset
170 * is bigger than fep->cc.mask would be a error.
171 */
172 val &= fep->cc.mask;
173 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
174
175 /* Calculate the second the compare event timestamp */
176 fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
177
178 /* * Enable compare event when overflow */
179 val = readl(fep->hwp + FEC_ATIME_CTRL);
180 val |= FEC_T_CTRL_PINPER;
181 writel(val, fep->hwp + FEC_ATIME_CTRL);
182
183 /* Compare channel setting. */
184 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
185 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
186 val &= ~(1 << FEC_T_TDRE_OFFSET);
187 val &= ~(FEC_T_TMODE_MASK);
188 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
189 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
190
191 /* Write the second compare event timestamp and calculate
192 * the third timestamp. Refer the TCCR register detail in the spec.
193 */
194 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
195 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
196 } else {
197 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
198 }
199
200 fep->pps_enable = enable;
201 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
202
203 return 0;
204}
205
206/**
207 * fec_ptp_read - read raw cycle counter (to be used by time counter)
208 * @cc: the cyclecounter structure
209 *
210 * this function reads the cyclecounter registers and is called by the
211 * cyclecounter structure used to construct a ns counter from the
212 * arbitrary fixed point registers
213 */
214static u64 fec_ptp_read(const struct cyclecounter *cc)
215{
216 struct fec_enet_private *fep =
217 container_of(cc, struct fec_enet_private, cc);
218 u32 tempval;
219
220 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
221 tempval |= FEC_T_CTRL_CAPTURE;
222 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
223
224 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
225 udelay(1);
226
227 return readl(fep->hwp + FEC_ATIME);
228}
229
230/**
231 * fec_ptp_start_cyclecounter - create the cycle counter from hw
232 * @ndev: network device
233 *
234 * this function initializes the timecounter and cyclecounter
235 * structures for use in generated a ns counter from the arbitrary
236 * fixed point cycles registers in the hardware.
237 */
238void fec_ptp_start_cyclecounter(struct net_device *ndev)
239{
240 struct fec_enet_private *fep = netdev_priv(ndev);
241 unsigned long flags;
242 int inc;
243
244 inc = 1000000000 / fep->cycle_speed;
245
246 /* grab the ptp lock */
247 spin_lock_irqsave(&fep->tmreg_lock, flags);
248
249 /* 1ns counter */
250 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
251
252 /* use 31-bit timer counter */
253 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
254
255 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
256 fep->hwp + FEC_ATIME_CTRL);
257
258 memset(&fep->cc, 0, sizeof(fep->cc));
259 fep->cc.read = fec_ptp_read;
260 fep->cc.mask = CLOCKSOURCE_MASK(31);
261 fep->cc.shift = 31;
262 fep->cc.mult = FEC_CC_MULT;
263
264 /* reset the ns time counter */
265 timecounter_init(&fep->tc, &fep->cc, 0);
266
267 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
268}
269
270/**
271 * fec_ptp_adjfreq - adjust ptp cycle frequency
272 * @ptp: the ptp clock structure
273 * @ppb: parts per billion adjustment from base
274 *
275 * Adjust the frequency of the ptp cycle counter by the
276 * indicated ppb from the base frequency.
277 *
278 * Because ENET hardware frequency adjust is complex,
279 * using software method to do that.
280 */
281static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
282{
283 unsigned long flags;
284 int neg_adj = 0;
285 u32 i, tmp;
286 u32 corr_inc, corr_period;
287 u32 corr_ns;
288 u64 lhs, rhs;
289
290 struct fec_enet_private *fep =
291 container_of(ptp, struct fec_enet_private, ptp_caps);
292
293 if (ppb == 0)
294 return 0;
295
296 if (ppb < 0) {
297 ppb = -ppb;
298 neg_adj = 1;
299 }
300
301 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
302 * Try to find the corr_inc between 1 to fep->ptp_inc to
303 * meet adjustment requirement.
304 */
305 lhs = NSEC_PER_SEC;
306 rhs = (u64)ppb * (u64)fep->ptp_inc;
307 for (i = 1; i <= fep->ptp_inc; i++) {
308 if (lhs >= rhs) {
309 corr_inc = i;
310 corr_period = div_u64(lhs, rhs);
311 break;
312 }
313 lhs += NSEC_PER_SEC;
314 }
315 /* Not found? Set it to high value - double speed
316 * correct in every clock step.
317 */
318 if (i > fep->ptp_inc) {
319 corr_inc = fep->ptp_inc;
320 corr_period = 1;
321 }
322
323 if (neg_adj)
324 corr_ns = fep->ptp_inc - corr_inc;
325 else
326 corr_ns = fep->ptp_inc + corr_inc;
327
328 spin_lock_irqsave(&fep->tmreg_lock, flags);
329
330 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
331 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
332 writel(tmp, fep->hwp + FEC_ATIME_INC);
333 corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
334 writel(corr_period, fep->hwp + FEC_ATIME_CORR);
335 /* dummy read to update the timer. */
336 timecounter_read(&fep->tc);
337
338 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
339
340 return 0;
341}
342
343/**
344 * fec_ptp_adjtime
345 * @ptp: the ptp clock structure
346 * @delta: offset to adjust the cycle counter by
347 *
348 * adjust the timer by resetting the timecounter structure.
349 */
350static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
351{
352 struct fec_enet_private *fep =
353 container_of(ptp, struct fec_enet_private, ptp_caps);
354 unsigned long flags;
355
356 spin_lock_irqsave(&fep->tmreg_lock, flags);
357 timecounter_adjtime(&fep->tc, delta);
358 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
359
360 return 0;
361}
362
363/**
364 * fec_ptp_gettime
365 * @ptp: the ptp clock structure
366 * @ts: timespec structure to hold the current time value
367 *
368 * read the timecounter and return the correct value on ns,
369 * after converting it into a struct timespec.
370 */
371static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
372{
373 struct fec_enet_private *adapter =
374 container_of(ptp, struct fec_enet_private, ptp_caps);
375 u64 ns;
376 unsigned long flags;
377
378 mutex_lock(&adapter->ptp_clk_mutex);
379 /* Check the ptp clock */
380 if (!adapter->ptp_clk_on) {
381 mutex_unlock(&adapter->ptp_clk_mutex);
382 return -EINVAL;
383 }
384 spin_lock_irqsave(&adapter->tmreg_lock, flags);
385 ns = timecounter_read(&adapter->tc);
386 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
387 mutex_unlock(&adapter->ptp_clk_mutex);
388
389 *ts = ns_to_timespec64(ns);
390
391 return 0;
392}
393
394/**
395 * fec_ptp_settime
396 * @ptp: the ptp clock structure
397 * @ts: the timespec containing the new time for the cycle counter
398 *
399 * reset the timecounter to use a new base value instead of the kernel
400 * wall timer value.
401 */
402static int fec_ptp_settime(struct ptp_clock_info *ptp,
403 const struct timespec64 *ts)
404{
405 struct fec_enet_private *fep =
406 container_of(ptp, struct fec_enet_private, ptp_caps);
407
408 u64 ns;
409 unsigned long flags;
410 u32 counter;
411
412 mutex_lock(&fep->ptp_clk_mutex);
413 /* Check the ptp clock */
414 if (!fep->ptp_clk_on) {
415 mutex_unlock(&fep->ptp_clk_mutex);
416 return -EINVAL;
417 }
418
419 ns = timespec64_to_ns(ts);
420 /* Get the timer value based on timestamp.
421 * Update the counter with the masked value.
422 */
423 counter = ns & fep->cc.mask;
424
425 spin_lock_irqsave(&fep->tmreg_lock, flags);
426 writel(counter, fep->hwp + FEC_ATIME);
427 timecounter_init(&fep->tc, &fep->cc, ns);
428 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
429 mutex_unlock(&fep->ptp_clk_mutex);
430 return 0;
431}
432
433/**
434 * fec_ptp_enable
435 * @ptp: the ptp clock structure
436 * @rq: the requested feature to change
437 * @on: whether to enable or disable the feature
438 *
439 */
440static int fec_ptp_enable(struct ptp_clock_info *ptp,
441 struct ptp_clock_request *rq, int on)
442{
443 struct fec_enet_private *fep =
444 container_of(ptp, struct fec_enet_private, ptp_caps);
445 int ret = 0;
446
447 if (rq->type == PTP_CLK_REQ_PPS) {
448 ret = fec_ptp_enable_pps(fep, on);
449
450 return ret;
451 }
452 return -EOPNOTSUPP;
453}
454
455/**
456 * fec_ptp_disable_hwts - disable hardware time stamping
457 * @ndev: pointer to net_device
458 */
459void fec_ptp_disable_hwts(struct net_device *ndev)
460{
461 struct fec_enet_private *fep = netdev_priv(ndev);
462
463 fep->hwts_tx_en = 0;
464 fep->hwts_rx_en = 0;
465}
466
467int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
468{
469 struct fec_enet_private *fep = netdev_priv(ndev);
470
471 struct hwtstamp_config config;
472
473 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
474 return -EFAULT;
475
476 /* reserved for future extensions */
477 if (config.flags)
478 return -EINVAL;
479
480 switch (config.tx_type) {
481 case HWTSTAMP_TX_OFF:
482 fep->hwts_tx_en = 0;
483 break;
484 case HWTSTAMP_TX_ON:
485 fep->hwts_tx_en = 1;
486 break;
487 default:
488 return -ERANGE;
489 }
490
491 switch (config.rx_filter) {
492 case HWTSTAMP_FILTER_NONE:
493 fep->hwts_rx_en = 0;
494 break;
495
496 default:
497 fep->hwts_rx_en = 1;
498 config.rx_filter = HWTSTAMP_FILTER_ALL;
499 break;
500 }
501
502 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
503 -EFAULT : 0;
504}
505
506int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
507{
508 struct fec_enet_private *fep = netdev_priv(ndev);
509 struct hwtstamp_config config;
510
511 config.flags = 0;
512 config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
513 config.rx_filter = (fep->hwts_rx_en ?
514 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
515
516 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
517 -EFAULT : 0;
518}
519
520/*
521 * fec_time_keep - call timecounter_read every second to avoid timer overrun
522 * because ENET just support 32bit counter, will timeout in 4s
523 */
524static void fec_time_keep(struct work_struct *work)
525{
526 struct delayed_work *dwork = to_delayed_work(work);
527 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
528 unsigned long flags;
529
530 mutex_lock(&fep->ptp_clk_mutex);
531 if (fep->ptp_clk_on) {
532 spin_lock_irqsave(&fep->tmreg_lock, flags);
533 timecounter_read(&fep->tc);
534 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
535 }
536 mutex_unlock(&fep->ptp_clk_mutex);
537
538 schedule_delayed_work(&fep->time_keep, HZ);
539}
540
541/* This function checks the pps event and reloads the timer compare counter. */
542static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
543{
544 struct net_device *ndev = dev_id;
545 struct fec_enet_private *fep = netdev_priv(ndev);
546 u32 val;
547 u8 channel = fep->pps_channel;
548 struct ptp_clock_event event;
549
550 val = readl(fep->hwp + FEC_TCSR(channel));
551 if (val & FEC_T_TF_MASK) {
552 /* Write the next next compare(not the next according the spec)
553 * value to the register
554 */
555 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
556 do {
557 writel(val, fep->hwp + FEC_TCSR(channel));
558 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
559
560 /* Update the counter; */
561 fep->next_counter = (fep->next_counter + fep->reload_period) &
562 fep->cc.mask;
563
564 event.type = PTP_CLOCK_PPS;
565 ptp_clock_event(fep->ptp_clock, &event);
566 return IRQ_HANDLED;
567 }
568
569 return IRQ_NONE;
570}
571
572/**
573 * fec_ptp_init
574 * @pdev: The FEC network adapter
575 * @irq_idx: the interrupt index
576 *
577 * This function performs the required steps for enabling ptp
578 * support. If ptp support has already been loaded it simply calls the
579 * cyclecounter init routine and exits.
580 */
581
582void fec_ptp_init(struct platform_device *pdev, int irq_idx)
583{
584 struct net_device *ndev = platform_get_drvdata(pdev);
585 struct fec_enet_private *fep = netdev_priv(ndev);
586 int irq;
587 int ret;
588
589 fep->ptp_caps.owner = THIS_MODULE;
590 strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
591
592 fep->ptp_caps.max_adj = 250000000;
593 fep->ptp_caps.n_alarm = 0;
594 fep->ptp_caps.n_ext_ts = 0;
595 fep->ptp_caps.n_per_out = 0;
596 fep->ptp_caps.n_pins = 0;
597 fep->ptp_caps.pps = 1;
598 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
599 fep->ptp_caps.adjtime = fec_ptp_adjtime;
600 fep->ptp_caps.gettime64 = fec_ptp_gettime;
601 fep->ptp_caps.settime64 = fec_ptp_settime;
602 fep->ptp_caps.enable = fec_ptp_enable;
603
604 fep->cycle_speed = clk_get_rate(fep->clk_ptp);
605 if (!fep->cycle_speed) {
606 fep->cycle_speed = NSEC_PER_SEC;
607 dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
608 }
609 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
610
611 spin_lock_init(&fep->tmreg_lock);
612
613 fec_ptp_start_cyclecounter(ndev);
614
615 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
616
617 irq = platform_get_irq_byname_optional(pdev, "pps");
618 if (irq < 0)
619 irq = platform_get_irq_optional(pdev, irq_idx);
620 /* Failure to get an irq is not fatal,
621 * only the PTP_CLOCK_PPS clock events should stop
622 */
623 if (irq >= 0) {
624 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
625 0, pdev->name, ndev);
626 if (ret < 0)
627 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
628 ret);
629 }
630
631 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
632 if (IS_ERR(fep->ptp_clock)) {
633 fep->ptp_clock = NULL;
634 dev_err(&pdev->dev, "ptp_clock_register failed\n");
635 }
636
637 schedule_delayed_work(&fep->time_keep, HZ);
638}
639
640void fec_ptp_stop(struct platform_device *pdev)
641{
642 struct net_device *ndev = platform_get_drvdata(pdev);
643 struct fec_enet_private *fep = netdev_priv(ndev);
644
645 cancel_delayed_work_sync(&fep->time_keep);
646 if (fep->ptp_clock)
647 ptp_clock_unregister(fep->ptp_clock);
648}