Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PTP 1588 clock support
4 *
5 * Copyright (C) 2010 OMICRON electronics GmbH
6 */
7#include <linux/idr.h>
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/posix-clock.h>
14#include <linux/pps_kernel.h>
15#include <linux/slab.h>
16#include <linux/syscalls.h>
17#include <linux/uaccess.h>
18#include <uapi/linux/sched/types.h>
19
20#include "ptp_private.h"
21
22#define PTP_MAX_ALARMS 4
23#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
24#define PTP_PPS_EVENT PPS_CAPTUREASSERT
25#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
26
27/* private globals */
28
29static dev_t ptp_devt;
30static struct class *ptp_class;
31
32static DEFINE_IDA(ptp_clocks_map);
33
34/* time stamp event queue operations */
35
36static inline int queue_free(struct timestamp_event_queue *q)
37{
38 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
39}
40
41static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
42 struct ptp_clock_event *src)
43{
44 struct ptp_extts_event *dst;
45 unsigned long flags;
46 s64 seconds;
47 u32 remainder;
48
49 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
50
51 spin_lock_irqsave(&queue->lock, flags);
52
53 dst = &queue->buf[queue->tail];
54 dst->index = src->index;
55 dst->t.sec = seconds;
56 dst->t.nsec = remainder;
57
58 if (!queue_free(queue))
59 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
60
61 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
62
63 spin_unlock_irqrestore(&queue->lock, flags);
64}
65
66s32 scaled_ppm_to_ppb(long ppm)
67{
68 /*
69 * The 'freq' field in the 'struct timex' is in parts per
70 * million, but with a 16 bit binary fractional field.
71 *
72 * We want to calculate
73 *
74 * ppb = scaled_ppm * 1000 / 2^16
75 *
76 * which simplifies to
77 *
78 * ppb = scaled_ppm * 125 / 2^13
79 */
80 s64 ppb = 1 + ppm;
81 ppb *= 125;
82 ppb >>= 13;
83 return (s32) ppb;
84}
85EXPORT_SYMBOL(scaled_ppm_to_ppb);
86
87/* posix clock implementation */
88
89static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
90{
91 tp->tv_sec = 0;
92 tp->tv_nsec = 1;
93 return 0;
94}
95
96static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
97{
98 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
99
100 return ptp->info->settime64(ptp->info, tp);
101}
102
103static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
104{
105 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
106 int err;
107
108 if (ptp->info->gettimex64)
109 err = ptp->info->gettimex64(ptp->info, tp, NULL);
110 else
111 err = ptp->info->gettime64(ptp->info, tp);
112 return err;
113}
114
115static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
116{
117 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
118 struct ptp_clock_info *ops;
119 int err = -EOPNOTSUPP;
120
121 ops = ptp->info;
122
123 if (tx->modes & ADJ_SETOFFSET) {
124 struct timespec64 ts;
125 ktime_t kt;
126 s64 delta;
127
128 ts.tv_sec = tx->time.tv_sec;
129 ts.tv_nsec = tx->time.tv_usec;
130
131 if (!(tx->modes & ADJ_NANO))
132 ts.tv_nsec *= 1000;
133
134 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
135 return -EINVAL;
136
137 kt = timespec64_to_ktime(ts);
138 delta = ktime_to_ns(kt);
139 err = ops->adjtime(ops, delta);
140 } else if (tx->modes & ADJ_FREQUENCY) {
141 s32 ppb = scaled_ppm_to_ppb(tx->freq);
142 if (ppb > ops->max_adj || ppb < -ops->max_adj)
143 return -ERANGE;
144 if (ops->adjfine)
145 err = ops->adjfine(ops, tx->freq);
146 else
147 err = ops->adjfreq(ops, ppb);
148 ptp->dialed_frequency = tx->freq;
149 } else if (tx->modes & ADJ_OFFSET) {
150 if (ops->adjphase) {
151 s32 offset = tx->offset;
152
153 if (!(tx->modes & ADJ_NANO))
154 offset *= NSEC_PER_USEC;
155
156 err = ops->adjphase(ops, offset);
157 }
158 } else if (tx->modes == 0) {
159 tx->freq = ptp->dialed_frequency;
160 err = 0;
161 }
162
163 return err;
164}
165
166static struct posix_clock_operations ptp_clock_ops = {
167 .owner = THIS_MODULE,
168 .clock_adjtime = ptp_clock_adjtime,
169 .clock_gettime = ptp_clock_gettime,
170 .clock_getres = ptp_clock_getres,
171 .clock_settime = ptp_clock_settime,
172 .ioctl = ptp_ioctl,
173 .open = ptp_open,
174 .poll = ptp_poll,
175 .read = ptp_read,
176};
177
178static void ptp_clock_release(struct device *dev)
179{
180 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
181
182 ptp_cleanup_pin_groups(ptp);
183 mutex_destroy(&ptp->tsevq_mux);
184 mutex_destroy(&ptp->pincfg_mux);
185 ida_simple_remove(&ptp_clocks_map, ptp->index);
186 kfree(ptp);
187}
188
189static void ptp_aux_kworker(struct kthread_work *work)
190{
191 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
192 aux_work.work);
193 struct ptp_clock_info *info = ptp->info;
194 long delay;
195
196 delay = info->do_aux_work(info);
197
198 if (delay >= 0)
199 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
200}
201
202/* public interface */
203
204struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
205 struct device *parent)
206{
207 struct ptp_clock *ptp;
208 int err = 0, index, major = MAJOR(ptp_devt);
209
210 if (info->n_alarm > PTP_MAX_ALARMS)
211 return ERR_PTR(-EINVAL);
212
213 /* Initialize a clock structure. */
214 err = -ENOMEM;
215 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
216 if (ptp == NULL)
217 goto no_memory;
218
219 index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL);
220 if (index < 0) {
221 err = index;
222 goto no_slot;
223 }
224
225 ptp->clock.ops = ptp_clock_ops;
226 ptp->info = info;
227 ptp->devid = MKDEV(major, index);
228 ptp->index = index;
229 spin_lock_init(&ptp->tsevq.lock);
230 mutex_init(&ptp->tsevq_mux);
231 mutex_init(&ptp->pincfg_mux);
232 init_waitqueue_head(&ptp->tsev_wq);
233
234 if (ptp->info->do_aux_work) {
235 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
236 ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
237 if (IS_ERR(ptp->kworker)) {
238 err = PTR_ERR(ptp->kworker);
239 pr_err("failed to create ptp aux_worker %d\n", err);
240 goto kworker_err;
241 }
242 }
243
244 err = ptp_populate_pin_groups(ptp);
245 if (err)
246 goto no_pin_groups;
247
248 /* Register a new PPS source. */
249 if (info->pps) {
250 struct pps_source_info pps;
251 memset(&pps, 0, sizeof(pps));
252 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
253 pps.mode = PTP_PPS_MODE;
254 pps.owner = info->owner;
255 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
256 if (IS_ERR(ptp->pps_source)) {
257 err = PTR_ERR(ptp->pps_source);
258 pr_err("failed to register pps source\n");
259 goto no_pps;
260 }
261 }
262
263 /* Initialize a new device of our class in our clock structure. */
264 device_initialize(&ptp->dev);
265 ptp->dev.devt = ptp->devid;
266 ptp->dev.class = ptp_class;
267 ptp->dev.parent = parent;
268 ptp->dev.groups = ptp->pin_attr_groups;
269 ptp->dev.release = ptp_clock_release;
270 dev_set_drvdata(&ptp->dev, ptp);
271 dev_set_name(&ptp->dev, "ptp%d", ptp->index);
272
273 /* Create a posix clock and link it to the device. */
274 err = posix_clock_register(&ptp->clock, &ptp->dev);
275 if (err) {
276 pr_err("failed to create posix clock\n");
277 goto no_clock;
278 }
279
280 return ptp;
281
282no_clock:
283 if (ptp->pps_source)
284 pps_unregister_source(ptp->pps_source);
285no_pps:
286 ptp_cleanup_pin_groups(ptp);
287no_pin_groups:
288 if (ptp->kworker)
289 kthread_destroy_worker(ptp->kworker);
290kworker_err:
291 mutex_destroy(&ptp->tsevq_mux);
292 mutex_destroy(&ptp->pincfg_mux);
293 ida_simple_remove(&ptp_clocks_map, index);
294no_slot:
295 kfree(ptp);
296no_memory:
297 return ERR_PTR(err);
298}
299EXPORT_SYMBOL(ptp_clock_register);
300
301int ptp_clock_unregister(struct ptp_clock *ptp)
302{
303 ptp->defunct = 1;
304 wake_up_interruptible(&ptp->tsev_wq);
305
306 if (ptp->kworker) {
307 kthread_cancel_delayed_work_sync(&ptp->aux_work);
308 kthread_destroy_worker(ptp->kworker);
309 }
310
311 /* Release the clock's resources. */
312 if (ptp->pps_source)
313 pps_unregister_source(ptp->pps_source);
314
315 posix_clock_unregister(&ptp->clock);
316
317 return 0;
318}
319EXPORT_SYMBOL(ptp_clock_unregister);
320
321void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
322{
323 struct pps_event_time evt;
324
325 switch (event->type) {
326
327 case PTP_CLOCK_ALARM:
328 break;
329
330 case PTP_CLOCK_EXTTS:
331 enqueue_external_timestamp(&ptp->tsevq, event);
332 wake_up_interruptible(&ptp->tsev_wq);
333 break;
334
335 case PTP_CLOCK_PPS:
336 pps_get_ts(&evt);
337 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
338 break;
339
340 case PTP_CLOCK_PPSUSR:
341 pps_event(ptp->pps_source, &event->pps_times,
342 PTP_PPS_EVENT, NULL);
343 break;
344 }
345}
346EXPORT_SYMBOL(ptp_clock_event);
347
348int ptp_clock_index(struct ptp_clock *ptp)
349{
350 return ptp->index;
351}
352EXPORT_SYMBOL(ptp_clock_index);
353
354int ptp_find_pin(struct ptp_clock *ptp,
355 enum ptp_pin_function func, unsigned int chan)
356{
357 struct ptp_pin_desc *pin = NULL;
358 int i;
359
360 for (i = 0; i < ptp->info->n_pins; i++) {
361 if (ptp->info->pin_config[i].func == func &&
362 ptp->info->pin_config[i].chan == chan) {
363 pin = &ptp->info->pin_config[i];
364 break;
365 }
366 }
367
368 return pin ? i : -1;
369}
370EXPORT_SYMBOL(ptp_find_pin);
371
372int ptp_find_pin_unlocked(struct ptp_clock *ptp,
373 enum ptp_pin_function func, unsigned int chan)
374{
375 int result;
376
377 mutex_lock(&ptp->pincfg_mux);
378
379 result = ptp_find_pin(ptp, func, chan);
380
381 mutex_unlock(&ptp->pincfg_mux);
382
383 return result;
384}
385EXPORT_SYMBOL(ptp_find_pin_unlocked);
386
387int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
388{
389 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
390}
391EXPORT_SYMBOL(ptp_schedule_worker);
392
393void ptp_cancel_worker_sync(struct ptp_clock *ptp)
394{
395 kthread_cancel_delayed_work_sync(&ptp->aux_work);
396}
397EXPORT_SYMBOL(ptp_cancel_worker_sync);
398
399/* module operations */
400
401static void __exit ptp_exit(void)
402{
403 class_destroy(ptp_class);
404 unregister_chrdev_region(ptp_devt, MINORMASK + 1);
405 ida_destroy(&ptp_clocks_map);
406}
407
408static int __init ptp_init(void)
409{
410 int err;
411
412 ptp_class = class_create(THIS_MODULE, "ptp");
413 if (IS_ERR(ptp_class)) {
414 pr_err("ptp: failed to allocate class\n");
415 return PTR_ERR(ptp_class);
416 }
417
418 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
419 if (err < 0) {
420 pr_err("ptp: failed to allocate device region\n");
421 goto no_region;
422 }
423
424 ptp_class->dev_groups = ptp_groups;
425 pr_info("PTP clock support registered\n");
426 return 0;
427
428no_region:
429 class_destroy(ptp_class);
430 return err;
431}
432
433subsys_initcall(ptp_init);
434module_exit(ptp_exit);
435
436MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
437MODULE_DESCRIPTION("PTP clocks support");
438MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PTP 1588 clock support
4 *
5 * Copyright (C) 2010 OMICRON electronics GmbH
6 */
7#include <linux/idr.h>
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/posix-clock.h>
14#include <linux/pps_kernel.h>
15#include <linux/slab.h>
16#include <linux/syscalls.h>
17#include <linux/uaccess.h>
18#include <uapi/linux/sched/types.h>
19
20#include "ptp_private.h"
21
22#define PTP_MAX_ALARMS 4
23#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
24#define PTP_PPS_EVENT PPS_CAPTUREASSERT
25#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
26
27struct class *ptp_class;
28
29/* private globals */
30
31static dev_t ptp_devt;
32
33static DEFINE_IDA(ptp_clocks_map);
34
35/* time stamp event queue operations */
36
37static inline int queue_free(struct timestamp_event_queue *q)
38{
39 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
40}
41
42static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
43 struct ptp_clock_event *src)
44{
45 struct ptp_extts_event *dst;
46 unsigned long flags;
47 s64 seconds;
48 u32 remainder;
49
50 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
51
52 spin_lock_irqsave(&queue->lock, flags);
53
54 dst = &queue->buf[queue->tail];
55 dst->index = src->index;
56 dst->t.sec = seconds;
57 dst->t.nsec = remainder;
58
59 if (!queue_free(queue))
60 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
61
62 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
63
64 spin_unlock_irqrestore(&queue->lock, flags);
65}
66
67/* posix clock implementation */
68
69static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
70{
71 tp->tv_sec = 0;
72 tp->tv_nsec = 1;
73 return 0;
74}
75
76static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
77{
78 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
79
80 if (ptp_clock_freerun(ptp)) {
81 pr_err("ptp: physical clock is free running\n");
82 return -EBUSY;
83 }
84
85 return ptp->info->settime64(ptp->info, tp);
86}
87
88static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
89{
90 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
91 int err;
92
93 if (ptp->info->gettimex64)
94 err = ptp->info->gettimex64(ptp->info, tp, NULL);
95 else
96 err = ptp->info->gettime64(ptp->info, tp);
97 return err;
98}
99
100static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
101{
102 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
103 struct ptp_clock_info *ops;
104 int err = -EOPNOTSUPP;
105
106 if (ptp_clock_freerun(ptp)) {
107 pr_err("ptp: physical clock is free running\n");
108 return -EBUSY;
109 }
110
111 ops = ptp->info;
112
113 if (tx->modes & ADJ_SETOFFSET) {
114 struct timespec64 ts;
115 ktime_t kt;
116 s64 delta;
117
118 ts.tv_sec = tx->time.tv_sec;
119 ts.tv_nsec = tx->time.tv_usec;
120
121 if (!(tx->modes & ADJ_NANO))
122 ts.tv_nsec *= 1000;
123
124 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
125 return -EINVAL;
126
127 kt = timespec64_to_ktime(ts);
128 delta = ktime_to_ns(kt);
129 err = ops->adjtime(ops, delta);
130 } else if (tx->modes & ADJ_FREQUENCY) {
131 long ppb = scaled_ppm_to_ppb(tx->freq);
132 if (ppb > ops->max_adj || ppb < -ops->max_adj)
133 return -ERANGE;
134 err = ops->adjfine(ops, tx->freq);
135 ptp->dialed_frequency = tx->freq;
136 } else if (tx->modes & ADJ_OFFSET) {
137 if (ops->adjphase) {
138 s32 offset = tx->offset;
139
140 if (!(tx->modes & ADJ_NANO))
141 offset *= NSEC_PER_USEC;
142
143 err = ops->adjphase(ops, offset);
144 }
145 } else if (tx->modes == 0) {
146 tx->freq = ptp->dialed_frequency;
147 err = 0;
148 }
149
150 return err;
151}
152
153static struct posix_clock_operations ptp_clock_ops = {
154 .owner = THIS_MODULE,
155 .clock_adjtime = ptp_clock_adjtime,
156 .clock_gettime = ptp_clock_gettime,
157 .clock_getres = ptp_clock_getres,
158 .clock_settime = ptp_clock_settime,
159 .ioctl = ptp_ioctl,
160 .open = ptp_open,
161 .poll = ptp_poll,
162 .read = ptp_read,
163};
164
165static void ptp_clock_release(struct device *dev)
166{
167 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
168
169 ptp_cleanup_pin_groups(ptp);
170 kfree(ptp->vclock_index);
171 mutex_destroy(&ptp->tsevq_mux);
172 mutex_destroy(&ptp->pincfg_mux);
173 mutex_destroy(&ptp->n_vclocks_mux);
174 ida_free(&ptp_clocks_map, ptp->index);
175 kfree(ptp);
176}
177
178static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
179{
180 if (info->getcyclesx64)
181 return info->getcyclesx64(info, ts, NULL);
182 else
183 return info->gettime64(info, ts);
184}
185
186static void ptp_aux_kworker(struct kthread_work *work)
187{
188 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
189 aux_work.work);
190 struct ptp_clock_info *info = ptp->info;
191 long delay;
192
193 delay = info->do_aux_work(info);
194
195 if (delay >= 0)
196 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
197}
198
199/* public interface */
200
201struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
202 struct device *parent)
203{
204 struct ptp_clock *ptp;
205 int err = 0, index, major = MAJOR(ptp_devt);
206 size_t size;
207
208 if (info->n_alarm > PTP_MAX_ALARMS)
209 return ERR_PTR(-EINVAL);
210
211 /* Initialize a clock structure. */
212 err = -ENOMEM;
213 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
214 if (ptp == NULL)
215 goto no_memory;
216
217 index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
218 if (index < 0) {
219 err = index;
220 goto no_slot;
221 }
222
223 ptp->clock.ops = ptp_clock_ops;
224 ptp->info = info;
225 ptp->devid = MKDEV(major, index);
226 ptp->index = index;
227 spin_lock_init(&ptp->tsevq.lock);
228 mutex_init(&ptp->tsevq_mux);
229 mutex_init(&ptp->pincfg_mux);
230 mutex_init(&ptp->n_vclocks_mux);
231 init_waitqueue_head(&ptp->tsev_wq);
232
233 if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
234 ptp->has_cycles = true;
235 if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
236 ptp->info->getcycles64 = ptp_getcycles64;
237 } else {
238 /* Free running cycle counter not supported, use time. */
239 ptp->info->getcycles64 = ptp_getcycles64;
240
241 if (ptp->info->gettimex64)
242 ptp->info->getcyclesx64 = ptp->info->gettimex64;
243
244 if (ptp->info->getcrosststamp)
245 ptp->info->getcrosscycles = ptp->info->getcrosststamp;
246 }
247
248 if (ptp->info->do_aux_work) {
249 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
250 ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
251 if (IS_ERR(ptp->kworker)) {
252 err = PTR_ERR(ptp->kworker);
253 pr_err("failed to create ptp aux_worker %d\n", err);
254 goto kworker_err;
255 }
256 }
257
258 /* PTP virtual clock is being registered under physical clock */
259 if (parent && parent->class && parent->class->name &&
260 strcmp(parent->class->name, "ptp") == 0)
261 ptp->is_virtual_clock = true;
262
263 if (!ptp->is_virtual_clock) {
264 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
265
266 size = sizeof(int) * ptp->max_vclocks;
267 ptp->vclock_index = kzalloc(size, GFP_KERNEL);
268 if (!ptp->vclock_index) {
269 err = -ENOMEM;
270 goto no_mem_for_vclocks;
271 }
272 }
273
274 err = ptp_populate_pin_groups(ptp);
275 if (err)
276 goto no_pin_groups;
277
278 /* Register a new PPS source. */
279 if (info->pps) {
280 struct pps_source_info pps;
281 memset(&pps, 0, sizeof(pps));
282 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
283 pps.mode = PTP_PPS_MODE;
284 pps.owner = info->owner;
285 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
286 if (IS_ERR(ptp->pps_source)) {
287 err = PTR_ERR(ptp->pps_source);
288 pr_err("failed to register pps source\n");
289 goto no_pps;
290 }
291 ptp->pps_source->lookup_cookie = ptp;
292 }
293
294 /* Initialize a new device of our class in our clock structure. */
295 device_initialize(&ptp->dev);
296 ptp->dev.devt = ptp->devid;
297 ptp->dev.class = ptp_class;
298 ptp->dev.parent = parent;
299 ptp->dev.groups = ptp->pin_attr_groups;
300 ptp->dev.release = ptp_clock_release;
301 dev_set_drvdata(&ptp->dev, ptp);
302 dev_set_name(&ptp->dev, "ptp%d", ptp->index);
303
304 /* Create a posix clock and link it to the device. */
305 err = posix_clock_register(&ptp->clock, &ptp->dev);
306 if (err) {
307 if (ptp->pps_source)
308 pps_unregister_source(ptp->pps_source);
309
310 if (ptp->kworker)
311 kthread_destroy_worker(ptp->kworker);
312
313 put_device(&ptp->dev);
314
315 pr_err("failed to create posix clock\n");
316 return ERR_PTR(err);
317 }
318
319 return ptp;
320
321no_pps:
322 ptp_cleanup_pin_groups(ptp);
323no_pin_groups:
324 kfree(ptp->vclock_index);
325no_mem_for_vclocks:
326 if (ptp->kworker)
327 kthread_destroy_worker(ptp->kworker);
328kworker_err:
329 mutex_destroy(&ptp->tsevq_mux);
330 mutex_destroy(&ptp->pincfg_mux);
331 mutex_destroy(&ptp->n_vclocks_mux);
332 ida_free(&ptp_clocks_map, index);
333no_slot:
334 kfree(ptp);
335no_memory:
336 return ERR_PTR(err);
337}
338EXPORT_SYMBOL(ptp_clock_register);
339
340static int unregister_vclock(struct device *dev, void *data)
341{
342 struct ptp_clock *ptp = dev_get_drvdata(dev);
343
344 ptp_vclock_unregister(info_to_vclock(ptp->info));
345 return 0;
346}
347
348int ptp_clock_unregister(struct ptp_clock *ptp)
349{
350 if (ptp_vclock_in_use(ptp)) {
351 device_for_each_child(&ptp->dev, NULL, unregister_vclock);
352 }
353
354 ptp->defunct = 1;
355 wake_up_interruptible(&ptp->tsev_wq);
356
357 if (ptp->kworker) {
358 kthread_cancel_delayed_work_sync(&ptp->aux_work);
359 kthread_destroy_worker(ptp->kworker);
360 }
361
362 /* Release the clock's resources. */
363 if (ptp->pps_source)
364 pps_unregister_source(ptp->pps_source);
365
366 posix_clock_unregister(&ptp->clock);
367
368 return 0;
369}
370EXPORT_SYMBOL(ptp_clock_unregister);
371
372void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
373{
374 struct pps_event_time evt;
375
376 switch (event->type) {
377
378 case PTP_CLOCK_ALARM:
379 break;
380
381 case PTP_CLOCK_EXTTS:
382 enqueue_external_timestamp(&ptp->tsevq, event);
383 wake_up_interruptible(&ptp->tsev_wq);
384 break;
385
386 case PTP_CLOCK_PPS:
387 pps_get_ts(&evt);
388 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
389 break;
390
391 case PTP_CLOCK_PPSUSR:
392 pps_event(ptp->pps_source, &event->pps_times,
393 PTP_PPS_EVENT, NULL);
394 break;
395 }
396}
397EXPORT_SYMBOL(ptp_clock_event);
398
399int ptp_clock_index(struct ptp_clock *ptp)
400{
401 return ptp->index;
402}
403EXPORT_SYMBOL(ptp_clock_index);
404
405int ptp_find_pin(struct ptp_clock *ptp,
406 enum ptp_pin_function func, unsigned int chan)
407{
408 struct ptp_pin_desc *pin = NULL;
409 int i;
410
411 for (i = 0; i < ptp->info->n_pins; i++) {
412 if (ptp->info->pin_config[i].func == func &&
413 ptp->info->pin_config[i].chan == chan) {
414 pin = &ptp->info->pin_config[i];
415 break;
416 }
417 }
418
419 return pin ? i : -1;
420}
421EXPORT_SYMBOL(ptp_find_pin);
422
423int ptp_find_pin_unlocked(struct ptp_clock *ptp,
424 enum ptp_pin_function func, unsigned int chan)
425{
426 int result;
427
428 mutex_lock(&ptp->pincfg_mux);
429
430 result = ptp_find_pin(ptp, func, chan);
431
432 mutex_unlock(&ptp->pincfg_mux);
433
434 return result;
435}
436EXPORT_SYMBOL(ptp_find_pin_unlocked);
437
438int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
439{
440 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
441}
442EXPORT_SYMBOL(ptp_schedule_worker);
443
444void ptp_cancel_worker_sync(struct ptp_clock *ptp)
445{
446 kthread_cancel_delayed_work_sync(&ptp->aux_work);
447}
448EXPORT_SYMBOL(ptp_cancel_worker_sync);
449
450/* module operations */
451
452static void __exit ptp_exit(void)
453{
454 class_destroy(ptp_class);
455 unregister_chrdev_region(ptp_devt, MINORMASK + 1);
456 ida_destroy(&ptp_clocks_map);
457}
458
459static int __init ptp_init(void)
460{
461 int err;
462
463 ptp_class = class_create(THIS_MODULE, "ptp");
464 if (IS_ERR(ptp_class)) {
465 pr_err("ptp: failed to allocate class\n");
466 return PTR_ERR(ptp_class);
467 }
468
469 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
470 if (err < 0) {
471 pr_err("ptp: failed to allocate device region\n");
472 goto no_region;
473 }
474
475 ptp_class->dev_groups = ptp_groups;
476 pr_info("PTP clock support registered\n");
477 return 0;
478
479no_region:
480 class_destroy(ptp_class);
481 return err;
482}
483
484subsys_initcall(ptp_init);
485module_exit(ptp_exit);
486
487MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
488MODULE_DESCRIPTION("PTP clocks support");
489MODULE_LICENSE("GPL");