Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/* PTP 1588 clock using the Renesas Ethernet AVB
  3 *
  4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
  5 * Copyright (C) 2015 Renesas Solutions Corp.
  6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  7 */
  8
  9#include "ravb.h"
 10
 11static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
 12{
 13	struct net_device *ndev = priv->ndev;
 14	int error;
 15
 16	error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
 17	if (error)
 18		return error;
 19
 20	ravb_modify(ndev, GCCR, request, request);
 21	return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
 22}
 23
 24/* Caller must hold the lock */
 25static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
 26{
 27	struct net_device *ndev = priv->ndev;
 28	int error;
 29
 30	error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
 31	if (error)
 32		return error;
 33
 34	ts->tv_nsec = ravb_read(ndev, GCT0);
 35	ts->tv_sec  = ravb_read(ndev, GCT1) |
 36		((s64)ravb_read(ndev, GCT2) << 32);
 37
 38	return 0;
 39}
 40
 41/* Caller must hold the lock */
 42static int ravb_ptp_time_write(struct ravb_private *priv,
 43				const struct timespec64 *ts)
 44{
 45	struct net_device *ndev = priv->ndev;
 46	int error;
 47	u32 gccr;
 48
 49	error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
 50	if (error)
 51		return error;
 52
 53	gccr = ravb_read(ndev, GCCR);
 54	if (gccr & GCCR_LTO)
 55		return -EBUSY;
 56	ravb_write(ndev, ts->tv_nsec, GTO0);
 57	ravb_write(ndev, ts->tv_sec,  GTO1);
 58	ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
 59	ravb_write(ndev, gccr | GCCR_LTO, GCCR);
 60
 61	return 0;
 62}
 63
 64/* Caller must hold the lock */
 65static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
 66{
 67	struct net_device *ndev = priv->ndev;
 68	/* When the comparison value (GPTC.PTCV) is in range of
 69	 * [x-1 to x+1] (x is the configured increment value in
 70	 * GTI.TIV), it may happen that a comparison match is
 71	 * not detected when the timer wraps around.
 72	 */
 73	u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
 74	u32 gccr;
 75
 76	if (ns < gti_ns_plus_1)
 77		ns = gti_ns_plus_1;
 78	else if (ns > 0 - gti_ns_plus_1)
 79		ns = 0 - gti_ns_plus_1;
 80
 81	gccr = ravb_read(ndev, GCCR);
 82	if (gccr & GCCR_LPTC)
 83		return -EBUSY;
 84	ravb_write(ndev, ns, GPTC);
 85	ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
 86
 87	return 0;
 88}
 89
 90/* PTP clock operations */
 91static int ravb_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
 92{
 93	struct ravb_private *priv = container_of(ptp, struct ravb_private,
 94						 ptp.info);
 95	struct net_device *ndev = priv->ndev;
 96	unsigned long flags;
 97	u32 addend;
 
 98	u32 gccr;
 99
100	addend = (u32)adjust_by_scaled_ppm(priv->ptp.default_addend,
101					   scaled_ppm);
 
 
 
 
 
 
102
103	spin_lock_irqsave(&priv->lock, flags);
104
105	priv->ptp.current_addend = addend;
106
107	gccr = ravb_read(ndev, GCCR);
108	if (gccr & GCCR_LTI) {
109		spin_unlock_irqrestore(&priv->lock, flags);
110		return -EBUSY;
111	}
112	ravb_write(ndev, addend & GTI_TIV, GTI);
113	ravb_write(ndev, gccr | GCCR_LTI, GCCR);
114
115	spin_unlock_irqrestore(&priv->lock, flags);
116
117	return 0;
118}
119
120static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
121{
122	struct ravb_private *priv = container_of(ptp, struct ravb_private,
123						 ptp.info);
124	struct timespec64 ts;
125	unsigned long flags;
126	int error;
127
128	spin_lock_irqsave(&priv->lock, flags);
129	error = ravb_ptp_time_read(priv, &ts);
130	if (!error) {
131		u64 now = ktime_to_ns(timespec64_to_ktime(ts));
132
133		ts = ns_to_timespec64(now + delta);
134		error = ravb_ptp_time_write(priv, &ts);
135	}
136	spin_unlock_irqrestore(&priv->lock, flags);
137
138	return error;
139}
140
141static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
142{
143	struct ravb_private *priv = container_of(ptp, struct ravb_private,
144						 ptp.info);
145	unsigned long flags;
146	int error;
147
148	spin_lock_irqsave(&priv->lock, flags);
149	error = ravb_ptp_time_read(priv, ts);
150	spin_unlock_irqrestore(&priv->lock, flags);
151
152	return error;
153}
154
155static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
156			      const struct timespec64 *ts)
157{
158	struct ravb_private *priv = container_of(ptp, struct ravb_private,
159						 ptp.info);
160	unsigned long flags;
161	int error;
162
163	spin_lock_irqsave(&priv->lock, flags);
164	error = ravb_ptp_time_write(priv, ts);
165	spin_unlock_irqrestore(&priv->lock, flags);
166
167	return error;
168}
169
170static int ravb_ptp_extts(struct ptp_clock_info *ptp,
171			  struct ptp_extts_request *req, int on)
172{
173	struct ravb_private *priv = container_of(ptp, struct ravb_private,
174						 ptp.info);
175	const struct ravb_hw_info *info = priv->info;
176	struct net_device *ndev = priv->ndev;
177	unsigned long flags;
178
179	/* Reject requests with unsupported flags */
180	if (req->flags & ~(PTP_ENABLE_FEATURE |
181			   PTP_RISING_EDGE |
182			   PTP_FALLING_EDGE |
183			   PTP_STRICT_FLAGS))
184		return -EOPNOTSUPP;
185
186	if (req->index)
187		return -EINVAL;
188
189	if (priv->ptp.extts[req->index] == on)
190		return 0;
191	priv->ptp.extts[req->index] = on;
192
193	spin_lock_irqsave(&priv->lock, flags);
194	if (!info->irq_en_dis)
195		ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
196	else if (on)
197		ravb_write(ndev, GIE_PTCS, GIE);
198	else
199		ravb_write(ndev, GID_PTCD, GID);
200	spin_unlock_irqrestore(&priv->lock, flags);
201
202	return 0;
203}
204
205static int ravb_ptp_perout(struct ptp_clock_info *ptp,
206			   struct ptp_perout_request *req, int on)
207{
208	struct ravb_private *priv = container_of(ptp, struct ravb_private,
209						 ptp.info);
210	const struct ravb_hw_info *info = priv->info;
211	struct net_device *ndev = priv->ndev;
212	struct ravb_ptp_perout *perout;
213	unsigned long flags;
214	int error = 0;
215
216	/* Reject requests with unsupported flags */
217	if (req->flags)
218		return -EOPNOTSUPP;
219
220	if (req->index)
221		return -EINVAL;
222
223	if (on) {
224		u64 start_ns;
225		u64 period_ns;
226
227		start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
228		period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
229
230		if (start_ns > U32_MAX) {
231			netdev_warn(ndev,
232				    "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
233			return -ERANGE;
234		}
235
236		if (period_ns > U32_MAX) {
237			netdev_warn(ndev,
238				    "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
239			return -ERANGE;
240		}
241
242		spin_lock_irqsave(&priv->lock, flags);
243
244		perout = &priv->ptp.perout[req->index];
245		perout->target = (u32)start_ns;
246		perout->period = (u32)period_ns;
247		error = ravb_ptp_update_compare(priv, (u32)start_ns);
248		if (!error) {
249			/* Unmask interrupt */
250			if (!info->irq_en_dis)
251				ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
252			else
253				ravb_write(ndev, GIE_PTMS0, GIE);
254		}
255	} else	{
256		spin_lock_irqsave(&priv->lock, flags);
257
258		perout = &priv->ptp.perout[req->index];
259		perout->period = 0;
260
261		/* Mask interrupt */
262		if (!info->irq_en_dis)
263			ravb_modify(ndev, GIC, GIC_PTME, 0);
264		else
265			ravb_write(ndev, GID_PTMD0, GID);
266	}
267	spin_unlock_irqrestore(&priv->lock, flags);
268
269	return error;
270}
271
272static int ravb_ptp_enable(struct ptp_clock_info *ptp,
273			   struct ptp_clock_request *req, int on)
274{
275	switch (req->type) {
276	case PTP_CLK_REQ_EXTTS:
277		return ravb_ptp_extts(ptp, &req->extts, on);
278	case PTP_CLK_REQ_PEROUT:
279		return ravb_ptp_perout(ptp, &req->perout, on);
280	default:
281		return -EOPNOTSUPP;
282	}
283}
284
285static const struct ptp_clock_info ravb_ptp_info = {
286	.owner		= THIS_MODULE,
287	.name		= "ravb clock",
288	.max_adj	= 50000000,
289	.n_ext_ts	= N_EXT_TS,
290	.n_per_out	= N_PER_OUT,
291	.adjfine	= ravb_ptp_adjfine,
292	.adjtime	= ravb_ptp_adjtime,
293	.gettime64	= ravb_ptp_gettime64,
294	.settime64	= ravb_ptp_settime64,
295	.enable		= ravb_ptp_enable,
296};
297
298/* Caller must hold the lock */
299void ravb_ptp_interrupt(struct net_device *ndev)
300{
301	struct ravb_private *priv = netdev_priv(ndev);
302	u32 gis = ravb_read(ndev, GIS);
303
304	gis &= ravb_read(ndev, GIC);
305	if (gis & GIS_PTCF) {
306		struct ptp_clock_event event;
307
308		event.type = PTP_CLOCK_EXTTS;
309		event.index = 0;
310		event.timestamp = ravb_read(ndev, GCPT);
311		ptp_clock_event(priv->ptp.clock, &event);
312	}
313	if (gis & GIS_PTMF) {
314		struct ravb_ptp_perout *perout = priv->ptp.perout;
315
316		if (perout->period) {
317			perout->target += perout->period;
318			ravb_ptp_update_compare(priv, perout->target);
319		}
320	}
321
322	ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
323}
324
325void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
326{
327	struct ravb_private *priv = netdev_priv(ndev);
328	unsigned long flags;
329
330	priv->ptp.info = ravb_ptp_info;
331
332	priv->ptp.default_addend = ravb_read(ndev, GTI);
333	priv->ptp.current_addend = priv->ptp.default_addend;
334
335	spin_lock_irqsave(&priv->lock, flags);
336	ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
337	ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
338	spin_unlock_irqrestore(&priv->lock, flags);
339
340	priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
341}
342
343void ravb_ptp_stop(struct net_device *ndev)
344{
345	struct ravb_private *priv = netdev_priv(ndev);
346
347	ravb_write(ndev, 0, GIC);
348	ravb_write(ndev, 0, GIS);
349
350	ptp_clock_unregister(priv->ptp.clock);
351}
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/* PTP 1588 clock using the Renesas Ethernet AVB
  3 *
  4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
  5 * Copyright (C) 2015 Renesas Solutions Corp.
  6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  7 */
  8
  9#include "ravb.h"
 10
 11static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
 12{
 13	struct net_device *ndev = priv->ndev;
 14	int error;
 15
 16	error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
 17	if (error)
 18		return error;
 19
 20	ravb_modify(ndev, GCCR, request, request);
 21	return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
 22}
 23
 24/* Caller must hold the lock */
 25static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
 26{
 27	struct net_device *ndev = priv->ndev;
 28	int error;
 29
 30	error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
 31	if (error)
 32		return error;
 33
 34	ts->tv_nsec = ravb_read(ndev, GCT0);
 35	ts->tv_sec  = ravb_read(ndev, GCT1) |
 36		((s64)ravb_read(ndev, GCT2) << 32);
 37
 38	return 0;
 39}
 40
 41/* Caller must hold the lock */
 42static int ravb_ptp_time_write(struct ravb_private *priv,
 43				const struct timespec64 *ts)
 44{
 45	struct net_device *ndev = priv->ndev;
 46	int error;
 47	u32 gccr;
 48
 49	error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
 50	if (error)
 51		return error;
 52
 53	gccr = ravb_read(ndev, GCCR);
 54	if (gccr & GCCR_LTO)
 55		return -EBUSY;
 56	ravb_write(ndev, ts->tv_nsec, GTO0);
 57	ravb_write(ndev, ts->tv_sec,  GTO1);
 58	ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
 59	ravb_write(ndev, gccr | GCCR_LTO, GCCR);
 60
 61	return 0;
 62}
 63
 64/* Caller must hold the lock */
 65static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
 66{
 67	struct net_device *ndev = priv->ndev;
 68	/* When the comparison value (GPTC.PTCV) is in range of
 69	 * [x-1 to x+1] (x is the configured increment value in
 70	 * GTI.TIV), it may happen that a comparison match is
 71	 * not detected when the timer wraps around.
 72	 */
 73	u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
 74	u32 gccr;
 75
 76	if (ns < gti_ns_plus_1)
 77		ns = gti_ns_plus_1;
 78	else if (ns > 0 - gti_ns_plus_1)
 79		ns = 0 - gti_ns_plus_1;
 80
 81	gccr = ravb_read(ndev, GCCR);
 82	if (gccr & GCCR_LPTC)
 83		return -EBUSY;
 84	ravb_write(ndev, ns, GPTC);
 85	ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
 86
 87	return 0;
 88}
 89
 90/* PTP clock operations */
 91static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 92{
 93	struct ravb_private *priv = container_of(ptp, struct ravb_private,
 94						 ptp.info);
 95	struct net_device *ndev = priv->ndev;
 96	unsigned long flags;
 97	u32 diff, addend;
 98	bool neg_adj = false;
 99	u32 gccr;
100
101	if (ppb < 0) {
102		neg_adj = true;
103		ppb = -ppb;
104	}
105	addend = priv->ptp.default_addend;
106	diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
107
108	addend = neg_adj ? addend - diff : addend + diff;
109
110	spin_lock_irqsave(&priv->lock, flags);
111
112	priv->ptp.current_addend = addend;
113
114	gccr = ravb_read(ndev, GCCR);
115	if (gccr & GCCR_LTI) {
116		spin_unlock_irqrestore(&priv->lock, flags);
117		return -EBUSY;
118	}
119	ravb_write(ndev, addend & GTI_TIV, GTI);
120	ravb_write(ndev, gccr | GCCR_LTI, GCCR);
121
122	spin_unlock_irqrestore(&priv->lock, flags);
123
124	return 0;
125}
126
127static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
128{
129	struct ravb_private *priv = container_of(ptp, struct ravb_private,
130						 ptp.info);
131	struct timespec64 ts;
132	unsigned long flags;
133	int error;
134
135	spin_lock_irqsave(&priv->lock, flags);
136	error = ravb_ptp_time_read(priv, &ts);
137	if (!error) {
138		u64 now = ktime_to_ns(timespec64_to_ktime(ts));
139
140		ts = ns_to_timespec64(now + delta);
141		error = ravb_ptp_time_write(priv, &ts);
142	}
143	spin_unlock_irqrestore(&priv->lock, flags);
144
145	return error;
146}
147
148static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
149{
150	struct ravb_private *priv = container_of(ptp, struct ravb_private,
151						 ptp.info);
152	unsigned long flags;
153	int error;
154
155	spin_lock_irqsave(&priv->lock, flags);
156	error = ravb_ptp_time_read(priv, ts);
157	spin_unlock_irqrestore(&priv->lock, flags);
158
159	return error;
160}
161
162static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
163			      const struct timespec64 *ts)
164{
165	struct ravb_private *priv = container_of(ptp, struct ravb_private,
166						 ptp.info);
167	unsigned long flags;
168	int error;
169
170	spin_lock_irqsave(&priv->lock, flags);
171	error = ravb_ptp_time_write(priv, ts);
172	spin_unlock_irqrestore(&priv->lock, flags);
173
174	return error;
175}
176
177static int ravb_ptp_extts(struct ptp_clock_info *ptp,
178			  struct ptp_extts_request *req, int on)
179{
180	struct ravb_private *priv = container_of(ptp, struct ravb_private,
181						 ptp.info);
 
182	struct net_device *ndev = priv->ndev;
183	unsigned long flags;
184
185	/* Reject requests with unsupported flags */
186	if (req->flags & ~(PTP_ENABLE_FEATURE |
187			   PTP_RISING_EDGE |
188			   PTP_FALLING_EDGE |
189			   PTP_STRICT_FLAGS))
190		return -EOPNOTSUPP;
191
192	if (req->index)
193		return -EINVAL;
194
195	if (priv->ptp.extts[req->index] == on)
196		return 0;
197	priv->ptp.extts[req->index] = on;
198
199	spin_lock_irqsave(&priv->lock, flags);
200	if (priv->chip_id == RCAR_GEN2)
201		ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
202	else if (on)
203		ravb_write(ndev, GIE_PTCS, GIE);
204	else
205		ravb_write(ndev, GID_PTCD, GID);
206	spin_unlock_irqrestore(&priv->lock, flags);
207
208	return 0;
209}
210
211static int ravb_ptp_perout(struct ptp_clock_info *ptp,
212			   struct ptp_perout_request *req, int on)
213{
214	struct ravb_private *priv = container_of(ptp, struct ravb_private,
215						 ptp.info);
 
216	struct net_device *ndev = priv->ndev;
217	struct ravb_ptp_perout *perout;
218	unsigned long flags;
219	int error = 0;
220
221	/* Reject requests with unsupported flags */
222	if (req->flags)
223		return -EOPNOTSUPP;
224
225	if (req->index)
226		return -EINVAL;
227
228	if (on) {
229		u64 start_ns;
230		u64 period_ns;
231
232		start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
233		period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
234
235		if (start_ns > U32_MAX) {
236			netdev_warn(ndev,
237				    "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
238			return -ERANGE;
239		}
240
241		if (period_ns > U32_MAX) {
242			netdev_warn(ndev,
243				    "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
244			return -ERANGE;
245		}
246
247		spin_lock_irqsave(&priv->lock, flags);
248
249		perout = &priv->ptp.perout[req->index];
250		perout->target = (u32)start_ns;
251		perout->period = (u32)period_ns;
252		error = ravb_ptp_update_compare(priv, (u32)start_ns);
253		if (!error) {
254			/* Unmask interrupt */
255			if (priv->chip_id == RCAR_GEN2)
256				ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
257			else
258				ravb_write(ndev, GIE_PTMS0, GIE);
259		}
260	} else	{
261		spin_lock_irqsave(&priv->lock, flags);
262
263		perout = &priv->ptp.perout[req->index];
264		perout->period = 0;
265
266		/* Mask interrupt */
267		if (priv->chip_id == RCAR_GEN2)
268			ravb_modify(ndev, GIC, GIC_PTME, 0);
269		else
270			ravb_write(ndev, GID_PTMD0, GID);
271	}
272	spin_unlock_irqrestore(&priv->lock, flags);
273
274	return error;
275}
276
277static int ravb_ptp_enable(struct ptp_clock_info *ptp,
278			   struct ptp_clock_request *req, int on)
279{
280	switch (req->type) {
281	case PTP_CLK_REQ_EXTTS:
282		return ravb_ptp_extts(ptp, &req->extts, on);
283	case PTP_CLK_REQ_PEROUT:
284		return ravb_ptp_perout(ptp, &req->perout, on);
285	default:
286		return -EOPNOTSUPP;
287	}
288}
289
290static const struct ptp_clock_info ravb_ptp_info = {
291	.owner		= THIS_MODULE,
292	.name		= "ravb clock",
293	.max_adj	= 50000000,
294	.n_ext_ts	= N_EXT_TS,
295	.n_per_out	= N_PER_OUT,
296	.adjfreq	= ravb_ptp_adjfreq,
297	.adjtime	= ravb_ptp_adjtime,
298	.gettime64	= ravb_ptp_gettime64,
299	.settime64	= ravb_ptp_settime64,
300	.enable		= ravb_ptp_enable,
301};
302
303/* Caller must hold the lock */
304void ravb_ptp_interrupt(struct net_device *ndev)
305{
306	struct ravb_private *priv = netdev_priv(ndev);
307	u32 gis = ravb_read(ndev, GIS);
308
309	gis &= ravb_read(ndev, GIC);
310	if (gis & GIS_PTCF) {
311		struct ptp_clock_event event;
312
313		event.type = PTP_CLOCK_EXTTS;
314		event.index = 0;
315		event.timestamp = ravb_read(ndev, GCPT);
316		ptp_clock_event(priv->ptp.clock, &event);
317	}
318	if (gis & GIS_PTMF) {
319		struct ravb_ptp_perout *perout = priv->ptp.perout;
320
321		if (perout->period) {
322			perout->target += perout->period;
323			ravb_ptp_update_compare(priv, perout->target);
324		}
325	}
326
327	ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
328}
329
330void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
331{
332	struct ravb_private *priv = netdev_priv(ndev);
333	unsigned long flags;
334
335	priv->ptp.info = ravb_ptp_info;
336
337	priv->ptp.default_addend = ravb_read(ndev, GTI);
338	priv->ptp.current_addend = priv->ptp.default_addend;
339
340	spin_lock_irqsave(&priv->lock, flags);
341	ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
342	ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
343	spin_unlock_irqrestore(&priv->lock, flags);
344
345	priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
346}
347
348void ravb_ptp_stop(struct net_device *ndev)
349{
350	struct ravb_private *priv = netdev_priv(ndev);
351
352	ravb_write(ndev, 0, GIC);
353	ravb_write(ndev, 0, GIS);
354
355	ptp_clock_unregister(priv->ptp.clock);
356}