Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * PTP 1588 clock support
  4 *
  5 * Copyright (C) 2010 OMICRON electronics GmbH
  6 */
  7#include <linux/device.h>
  8#include <linux/err.h>
  9#include <linux/init.h>
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/posix-clock.h>
 13#include <linux/pps_kernel.h>
 14#include <linux/slab.h>
 15#include <linux/syscalls.h>
 16#include <linux/uaccess.h>
 17#include <linux/debugfs.h>
 18#include <linux/xarray.h>
 19#include <uapi/linux/sched/types.h>
 20
 21#include "ptp_private.h"
 22
 23#define PTP_MAX_ALARMS 4
 24#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
 25#define PTP_PPS_EVENT PPS_CAPTUREASSERT
 26#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
 27
 28const struct class ptp_class = {
 29	.name = "ptp",
 30	.dev_groups = ptp_groups
 31};
 32
 33/* private globals */
 34
 35static dev_t ptp_devt;
 36
 37static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
 38
 39/* time stamp event queue operations */
 40
 41static inline int queue_free(struct timestamp_event_queue *q)
 42{
 43	return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
 44}
 45
 46static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
 47				       struct ptp_clock_event *src)
 48{
 49	struct ptp_extts_event *dst;
 50	struct timespec64 offset_ts;
 51	unsigned long flags;
 52	s64 seconds;
 53	u32 remainder;
 54
 55	if (src->type == PTP_CLOCK_EXTTS) {
 56		seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
 57	} else if (src->type == PTP_CLOCK_EXTOFF) {
 58		offset_ts = ns_to_timespec64(src->offset);
 59		seconds = offset_ts.tv_sec;
 60		remainder = offset_ts.tv_nsec;
 61	} else {
 62		WARN(1, "%s: unknown type %d\n", __func__, src->type);
 63		return;
 64	}
 65
 66	spin_lock_irqsave(&queue->lock, flags);
 67
 68	dst = &queue->buf[queue->tail];
 69	dst->index = src->index;
 70	dst->flags = PTP_EXTTS_EVENT_VALID;
 71	dst->t.sec = seconds;
 72	dst->t.nsec = remainder;
 73	if (src->type == PTP_CLOCK_EXTOFF)
 74		dst->flags |= PTP_EXT_OFFSET;
 75
 76	/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
 77	if (!queue_free(queue))
 78		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
 79
 80	WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
 81
 82	spin_unlock_irqrestore(&queue->lock, flags);
 83}
 84
 85/* posix clock implementation */
 86
 87static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
 88{
 89	tp->tv_sec = 0;
 90	tp->tv_nsec = 1;
 91	return 0;
 92}
 93
 94static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
 95{
 96	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 97
 98	if (ptp_clock_freerun(ptp)) {
 99		pr_err("ptp: physical clock is free running\n");
100		return -EBUSY;
101	}
102
103	return  ptp->info->settime64(ptp->info, tp);
104}
105
106static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
107{
108	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
109	int err;
110
111	if (ptp->info->gettimex64)
112		err = ptp->info->gettimex64(ptp->info, tp, NULL);
113	else
114		err = ptp->info->gettime64(ptp->info, tp);
115	return err;
116}
117
118static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
119{
120	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
121	struct ptp_clock_info *ops;
122	int err = -EOPNOTSUPP;
123
124	if (ptp_clock_freerun(ptp)) {
125		pr_err("ptp: physical clock is free running\n");
126		return -EBUSY;
127	}
128
129	ops = ptp->info;
130
131	if (tx->modes & ADJ_SETOFFSET) {
132		struct timespec64 ts;
133		ktime_t kt;
134		s64 delta;
135
136		ts.tv_sec  = tx->time.tv_sec;
137		ts.tv_nsec = tx->time.tv_usec;
138
139		if (!(tx->modes & ADJ_NANO))
140			ts.tv_nsec *= 1000;
141
142		if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
143			return -EINVAL;
144
145		kt = timespec64_to_ktime(ts);
146		delta = ktime_to_ns(kt);
147		err = ops->adjtime(ops, delta);
148	} else if (tx->modes & ADJ_FREQUENCY) {
149		long ppb = scaled_ppm_to_ppb(tx->freq);
150		if (ppb > ops->max_adj || ppb < -ops->max_adj)
151			return -ERANGE;
152		err = ops->adjfine(ops, tx->freq);
153		if (!err)
154			ptp->dialed_frequency = tx->freq;
155	} else if (tx->modes & ADJ_OFFSET) {
156		if (ops->adjphase) {
157			s32 max_phase_adj = ops->getmaxphase(ops);
158			s32 offset = tx->offset;
159
160			if (!(tx->modes & ADJ_NANO))
161				offset *= NSEC_PER_USEC;
162
163			if (offset > max_phase_adj || offset < -max_phase_adj)
164				return -ERANGE;
165
166			err = ops->adjphase(ops, offset);
167		}
168	} else if (tx->modes == 0) {
169		tx->freq = ptp->dialed_frequency;
170		err = 0;
171	}
172
173	return err;
174}
175
176static struct posix_clock_operations ptp_clock_ops = {
177	.owner		= THIS_MODULE,
178	.clock_adjtime	= ptp_clock_adjtime,
179	.clock_gettime	= ptp_clock_gettime,
180	.clock_getres	= ptp_clock_getres,
181	.clock_settime	= ptp_clock_settime,
182	.ioctl		= ptp_ioctl,
183	.open		= ptp_open,
184	.release	= ptp_release,
185	.poll		= ptp_poll,
186	.read		= ptp_read,
187};
188
189static void ptp_clock_release(struct device *dev)
190{
191	struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
192	struct timestamp_event_queue *tsevq;
193	unsigned long flags;
194
195	ptp_cleanup_pin_groups(ptp);
196	kfree(ptp->vclock_index);
197	mutex_destroy(&ptp->pincfg_mux);
198	mutex_destroy(&ptp->n_vclocks_mux);
199	/* Delete first entry */
200	spin_lock_irqsave(&ptp->tsevqs_lock, flags);
201	tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
202				 qlist);
203	list_del(&tsevq->qlist);
204	spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
205	bitmap_free(tsevq->mask);
206	kfree(tsevq);
207	debugfs_remove(ptp->debugfs_root);
208	xa_erase(&ptp_clocks_map, ptp->index);
209	kfree(ptp);
210}
211
212static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
213{
214	if (info->getcyclesx64)
215		return info->getcyclesx64(info, ts, NULL);
216	else
217		return info->gettime64(info, ts);
218}
219
220static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
221{
222	return -EOPNOTSUPP;
223}
224
225static void ptp_aux_kworker(struct kthread_work *work)
226{
227	struct ptp_clock *ptp = container_of(work, struct ptp_clock,
228					     aux_work.work);
229	struct ptp_clock_info *info = ptp->info;
230	long delay;
231
232	delay = info->do_aux_work(info);
233
234	if (delay >= 0)
235		kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
236}
237
238/* public interface */
239
240struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
241				     struct device *parent)
242{
243	struct ptp_clock *ptp;
244	struct timestamp_event_queue *queue = NULL;
245	int err, index, major = MAJOR(ptp_devt);
246	char debugfsname[16];
247	size_t size;
248
249	if (info->n_alarm > PTP_MAX_ALARMS)
250		return ERR_PTR(-EINVAL);
251
252	/* Initialize a clock structure. */
253	ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
254	if (!ptp) {
255		err = -ENOMEM;
256		goto no_memory;
257	}
258
259	err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
260		       GFP_KERNEL);
261	if (err)
262		goto no_slot;
263
264	ptp->clock.ops = ptp_clock_ops;
265	ptp->info = info;
266	ptp->devid = MKDEV(major, index);
267	ptp->index = index;
268	INIT_LIST_HEAD(&ptp->tsevqs);
269	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
270	if (!queue) {
271		err = -ENOMEM;
272		goto no_memory_queue;
273	}
274	list_add_tail(&queue->qlist, &ptp->tsevqs);
275	spin_lock_init(&ptp->tsevqs_lock);
276	queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
277	if (!queue->mask) {
278		err = -ENOMEM;
279		goto no_memory_bitmap;
280	}
281	bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
282	spin_lock_init(&queue->lock);
283	mutex_init(&ptp->pincfg_mux);
284	mutex_init(&ptp->n_vclocks_mux);
285	init_waitqueue_head(&ptp->tsev_wq);
286
287	if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
288		ptp->has_cycles = true;
289		if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
290			ptp->info->getcycles64 = ptp_getcycles64;
291	} else {
292		/* Free running cycle counter not supported, use time. */
293		ptp->info->getcycles64 = ptp_getcycles64;
294
295		if (ptp->info->gettimex64)
296			ptp->info->getcyclesx64 = ptp->info->gettimex64;
297
298		if (ptp->info->getcrosststamp)
299			ptp->info->getcrosscycles = ptp->info->getcrosststamp;
300	}
301
302	if (!ptp->info->enable)
303		ptp->info->enable = ptp_enable;
304
305	if (ptp->info->do_aux_work) {
306		kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
307		ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
308		if (IS_ERR(ptp->kworker)) {
309			err = PTR_ERR(ptp->kworker);
310			pr_err("failed to create ptp aux_worker %d\n", err);
311			goto kworker_err;
312		}
313	}
314
315	/* PTP virtual clock is being registered under physical clock */
316	if (parent && parent->class && parent->class->name &&
317	    strcmp(parent->class->name, "ptp") == 0)
318		ptp->is_virtual_clock = true;
319
320	if (!ptp->is_virtual_clock) {
321		ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
322
323		size = sizeof(int) * ptp->max_vclocks;
324		ptp->vclock_index = kzalloc(size, GFP_KERNEL);
325		if (!ptp->vclock_index) {
326			err = -ENOMEM;
327			goto no_mem_for_vclocks;
328		}
329	}
330
331	err = ptp_populate_pin_groups(ptp);
332	if (err)
333		goto no_pin_groups;
334
335	/* Register a new PPS source. */
336	if (info->pps) {
337		struct pps_source_info pps;
338		memset(&pps, 0, sizeof(pps));
339		snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
340		pps.mode = PTP_PPS_MODE;
341		pps.owner = info->owner;
342		ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
343		if (IS_ERR(ptp->pps_source)) {
344			err = PTR_ERR(ptp->pps_source);
345			pr_err("failed to register pps source\n");
346			goto no_pps;
347		}
348		ptp->pps_source->lookup_cookie = ptp;
349	}
350
351	/* Initialize a new device of our class in our clock structure. */
352	device_initialize(&ptp->dev);
353	ptp->dev.devt = ptp->devid;
354	ptp->dev.class = &ptp_class;
355	ptp->dev.parent = parent;
356	ptp->dev.groups = ptp->pin_attr_groups;
357	ptp->dev.release = ptp_clock_release;
358	dev_set_drvdata(&ptp->dev, ptp);
359	dev_set_name(&ptp->dev, "ptp%d", ptp->index);
360
361	/* Create a posix clock and link it to the device. */
362	err = posix_clock_register(&ptp->clock, &ptp->dev);
363	if (err) {
364		if (ptp->pps_source)
365			pps_unregister_source(ptp->pps_source);
366
367		if (ptp->kworker)
368			kthread_destroy_worker(ptp->kworker);
369
370		put_device(&ptp->dev);
371
372		pr_err("failed to create posix clock\n");
373		return ERR_PTR(err);
374	}
375
376	/* Debugfs initialization */
377	snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
378	ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
379
380	return ptp;
381
382no_pps:
383	ptp_cleanup_pin_groups(ptp);
384no_pin_groups:
385	kfree(ptp->vclock_index);
386no_mem_for_vclocks:
387	if (ptp->kworker)
388		kthread_destroy_worker(ptp->kworker);
389kworker_err:
390	mutex_destroy(&ptp->pincfg_mux);
391	mutex_destroy(&ptp->n_vclocks_mux);
392	bitmap_free(queue->mask);
393no_memory_bitmap:
394	list_del(&queue->qlist);
395	kfree(queue);
396no_memory_queue:
397	xa_erase(&ptp_clocks_map, index);
398no_slot:
399	kfree(ptp);
400no_memory:
401	return ERR_PTR(err);
402}
403EXPORT_SYMBOL(ptp_clock_register);
404
405static int unregister_vclock(struct device *dev, void *data)
406{
407	struct ptp_clock *ptp = dev_get_drvdata(dev);
408
409	ptp_vclock_unregister(info_to_vclock(ptp->info));
410	return 0;
411}
412
413int ptp_clock_unregister(struct ptp_clock *ptp)
414{
415	if (ptp_vclock_in_use(ptp)) {
416		device_for_each_child(&ptp->dev, NULL, unregister_vclock);
417	}
418
419	ptp->defunct = 1;
420	wake_up_interruptible(&ptp->tsev_wq);
421
422	if (ptp->kworker) {
423		kthread_cancel_delayed_work_sync(&ptp->aux_work);
424		kthread_destroy_worker(ptp->kworker);
425	}
426
427	/* Release the clock's resources. */
428	if (ptp->pps_source)
429		pps_unregister_source(ptp->pps_source);
430
431	posix_clock_unregister(&ptp->clock);
432
433	return 0;
434}
435EXPORT_SYMBOL(ptp_clock_unregister);
436
437void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
438{
439	struct timestamp_event_queue *tsevq;
440	struct pps_event_time evt;
441	unsigned long flags;
442
443	switch (event->type) {
444
445	case PTP_CLOCK_ALARM:
446		break;
447
448	case PTP_CLOCK_EXTTS:
449	case PTP_CLOCK_EXTOFF:
450		/* Enqueue timestamp on selected queues */
451		spin_lock_irqsave(&ptp->tsevqs_lock, flags);
452		list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
453			if (test_bit((unsigned int)event->index, tsevq->mask))
454				enqueue_external_timestamp(tsevq, event);
455		}
456		spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
457		wake_up_interruptible(&ptp->tsev_wq);
458		break;
459
460	case PTP_CLOCK_PPS:
461		pps_get_ts(&evt);
462		pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
463		break;
464
465	case PTP_CLOCK_PPSUSR:
466		pps_event(ptp->pps_source, &event->pps_times,
467			  PTP_PPS_EVENT, NULL);
468		break;
469	}
470}
471EXPORT_SYMBOL(ptp_clock_event);
472
473int ptp_clock_index(struct ptp_clock *ptp)
474{
475	return ptp->index;
476}
477EXPORT_SYMBOL(ptp_clock_index);
478
479int ptp_find_pin(struct ptp_clock *ptp,
480		 enum ptp_pin_function func, unsigned int chan)
481{
482	struct ptp_pin_desc *pin = NULL;
483	int i;
484
485	for (i = 0; i < ptp->info->n_pins; i++) {
486		if (ptp->info->pin_config[i].func == func &&
487		    ptp->info->pin_config[i].chan == chan) {
488			pin = &ptp->info->pin_config[i];
489			break;
490		}
491	}
492
493	return pin ? i : -1;
494}
495EXPORT_SYMBOL(ptp_find_pin);
496
497int ptp_find_pin_unlocked(struct ptp_clock *ptp,
498			  enum ptp_pin_function func, unsigned int chan)
499{
500	int result;
501
502	mutex_lock(&ptp->pincfg_mux);
503
504	result = ptp_find_pin(ptp, func, chan);
505
506	mutex_unlock(&ptp->pincfg_mux);
507
508	return result;
509}
510EXPORT_SYMBOL(ptp_find_pin_unlocked);
511
512int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
513{
514	return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
515}
516EXPORT_SYMBOL(ptp_schedule_worker);
517
518void ptp_cancel_worker_sync(struct ptp_clock *ptp)
519{
520	kthread_cancel_delayed_work_sync(&ptp->aux_work);
521}
522EXPORT_SYMBOL(ptp_cancel_worker_sync);
523
524/* module operations */
525
526static void __exit ptp_exit(void)
527{
528	class_unregister(&ptp_class);
529	unregister_chrdev_region(ptp_devt, MINORMASK + 1);
530	xa_destroy(&ptp_clocks_map);
531}
532
533static int __init ptp_init(void)
534{
535	int err;
536
537	err = class_register(&ptp_class);
538	if (err) {
539		pr_err("ptp: failed to allocate class\n");
540		return err;
541	}
542
543	err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
544	if (err < 0) {
545		pr_err("ptp: failed to allocate device region\n");
546		goto no_region;
547	}
548
549	pr_info("PTP clock support registered\n");
550	return 0;
551
552no_region:
553	class_unregister(&ptp_class);
554	return err;
555}
556
557subsys_initcall(ptp_init);
558module_exit(ptp_exit);
559
560MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
561MODULE_DESCRIPTION("PTP clocks support");
562MODULE_LICENSE("GPL");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * PTP 1588 clock support
  4 *
  5 * Copyright (C) 2010 OMICRON electronics GmbH
  6 */
  7#include <linux/device.h>
  8#include <linux/err.h>
  9#include <linux/init.h>
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/posix-clock.h>
 13#include <linux/pps_kernel.h>
 14#include <linux/slab.h>
 15#include <linux/syscalls.h>
 16#include <linux/uaccess.h>
 17#include <linux/debugfs.h>
 18#include <linux/xarray.h>
 19#include <uapi/linux/sched/types.h>
 20
 21#include "ptp_private.h"
 22
 23#define PTP_MAX_ALARMS 4
 24#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
 25#define PTP_PPS_EVENT PPS_CAPTUREASSERT
 26#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
 27
 28const struct class ptp_class = {
 29	.name = "ptp",
 30	.dev_groups = ptp_groups
 31};
 32
 33/* private globals */
 34
 35static dev_t ptp_devt;
 36
 37static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
 38
 39/* time stamp event queue operations */
 40
 41static inline int queue_free(struct timestamp_event_queue *q)
 42{
 43	return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
 44}
 45
 46static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
 47				       struct ptp_clock_event *src)
 48{
 49	struct ptp_extts_event *dst;
 50	struct timespec64 offset_ts;
 51	unsigned long flags;
 52	s64 seconds;
 53	u32 remainder;
 54
 55	if (src->type == PTP_CLOCK_EXTTS) {
 56		seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
 57	} else if (src->type == PTP_CLOCK_EXTOFF) {
 58		offset_ts = ns_to_timespec64(src->offset);
 59		seconds = offset_ts.tv_sec;
 60		remainder = offset_ts.tv_nsec;
 61	} else {
 62		WARN(1, "%s: unknown type %d\n", __func__, src->type);
 63		return;
 64	}
 65
 66	spin_lock_irqsave(&queue->lock, flags);
 67
 68	dst = &queue->buf[queue->tail];
 69	dst->index = src->index;
 70	dst->flags = PTP_EXTTS_EVENT_VALID;
 71	dst->t.sec = seconds;
 72	dst->t.nsec = remainder;
 73	if (src->type == PTP_CLOCK_EXTOFF)
 74		dst->flags |= PTP_EXT_OFFSET;
 75
 76	/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
 77	if (!queue_free(queue))
 78		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
 79
 80	WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
 81
 82	spin_unlock_irqrestore(&queue->lock, flags);
 83}
 84
 85/* posix clock implementation */
 86
 87static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
 88{
 89	tp->tv_sec = 0;
 90	tp->tv_nsec = 1;
 91	return 0;
 92}
 93
 94static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
 95{
 96	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 97
 98	if (ptp_clock_freerun(ptp)) {
 99		pr_err("ptp: physical clock is free running\n");
100		return -EBUSY;
101	}
102
103	return  ptp->info->settime64(ptp->info, tp);
104}
105
106static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
107{
108	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
109	int err;
110
111	if (ptp->info->gettimex64)
112		err = ptp->info->gettimex64(ptp->info, tp, NULL);
113	else
114		err = ptp->info->gettime64(ptp->info, tp);
115	return err;
116}
117
118static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
119{
120	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
121	struct ptp_clock_info *ops;
122	int err = -EOPNOTSUPP;
123
124	if (ptp_clock_freerun(ptp)) {
125		pr_err("ptp: physical clock is free running\n");
126		return -EBUSY;
127	}
128
129	ops = ptp->info;
130
131	if (tx->modes & ADJ_SETOFFSET) {
132		struct timespec64 ts;
133		ktime_t kt;
134		s64 delta;
135
136		ts.tv_sec  = tx->time.tv_sec;
137		ts.tv_nsec = tx->time.tv_usec;
138
139		if (!(tx->modes & ADJ_NANO))
140			ts.tv_nsec *= 1000;
141
142		if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
143			return -EINVAL;
144
145		kt = timespec64_to_ktime(ts);
146		delta = ktime_to_ns(kt);
147		err = ops->adjtime(ops, delta);
148	} else if (tx->modes & ADJ_FREQUENCY) {
149		long ppb = scaled_ppm_to_ppb(tx->freq);
150		if (ppb > ops->max_adj || ppb < -ops->max_adj)
151			return -ERANGE;
152		err = ops->adjfine(ops, tx->freq);
153		ptp->dialed_frequency = tx->freq;
 
154	} else if (tx->modes & ADJ_OFFSET) {
155		if (ops->adjphase) {
156			s32 max_phase_adj = ops->getmaxphase(ops);
157			s32 offset = tx->offset;
158
159			if (!(tx->modes & ADJ_NANO))
160				offset *= NSEC_PER_USEC;
161
162			if (offset > max_phase_adj || offset < -max_phase_adj)
163				return -ERANGE;
164
165			err = ops->adjphase(ops, offset);
166		}
167	} else if (tx->modes == 0) {
168		tx->freq = ptp->dialed_frequency;
169		err = 0;
170	}
171
172	return err;
173}
174
175static struct posix_clock_operations ptp_clock_ops = {
176	.owner		= THIS_MODULE,
177	.clock_adjtime	= ptp_clock_adjtime,
178	.clock_gettime	= ptp_clock_gettime,
179	.clock_getres	= ptp_clock_getres,
180	.clock_settime	= ptp_clock_settime,
181	.ioctl		= ptp_ioctl,
182	.open		= ptp_open,
183	.release	= ptp_release,
184	.poll		= ptp_poll,
185	.read		= ptp_read,
186};
187
188static void ptp_clock_release(struct device *dev)
189{
190	struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
191	struct timestamp_event_queue *tsevq;
192	unsigned long flags;
193
194	ptp_cleanup_pin_groups(ptp);
195	kfree(ptp->vclock_index);
196	mutex_destroy(&ptp->pincfg_mux);
197	mutex_destroy(&ptp->n_vclocks_mux);
198	/* Delete first entry */
199	spin_lock_irqsave(&ptp->tsevqs_lock, flags);
200	tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
201				 qlist);
202	list_del(&tsevq->qlist);
203	spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
204	bitmap_free(tsevq->mask);
205	kfree(tsevq);
206	debugfs_remove(ptp->debugfs_root);
207	xa_erase(&ptp_clocks_map, ptp->index);
208	kfree(ptp);
209}
210
211static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
212{
213	if (info->getcyclesx64)
214		return info->getcyclesx64(info, ts, NULL);
215	else
216		return info->gettime64(info, ts);
217}
218
 
 
 
 
 
219static void ptp_aux_kworker(struct kthread_work *work)
220{
221	struct ptp_clock *ptp = container_of(work, struct ptp_clock,
222					     aux_work.work);
223	struct ptp_clock_info *info = ptp->info;
224	long delay;
225
226	delay = info->do_aux_work(info);
227
228	if (delay >= 0)
229		kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
230}
231
232/* public interface */
233
234struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
235				     struct device *parent)
236{
237	struct ptp_clock *ptp;
238	struct timestamp_event_queue *queue = NULL;
239	int err, index, major = MAJOR(ptp_devt);
240	char debugfsname[16];
241	size_t size;
242
243	if (info->n_alarm > PTP_MAX_ALARMS)
244		return ERR_PTR(-EINVAL);
245
246	/* Initialize a clock structure. */
247	ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
248	if (!ptp) {
249		err = -ENOMEM;
250		goto no_memory;
251	}
252
253	err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
254		       GFP_KERNEL);
255	if (err)
256		goto no_slot;
257
258	ptp->clock.ops = ptp_clock_ops;
259	ptp->info = info;
260	ptp->devid = MKDEV(major, index);
261	ptp->index = index;
262	INIT_LIST_HEAD(&ptp->tsevqs);
263	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
264	if (!queue) {
265		err = -ENOMEM;
266		goto no_memory_queue;
267	}
268	list_add_tail(&queue->qlist, &ptp->tsevqs);
269	spin_lock_init(&ptp->tsevqs_lock);
270	queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
271	if (!queue->mask) {
272		err = -ENOMEM;
273		goto no_memory_bitmap;
274	}
275	bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
276	spin_lock_init(&queue->lock);
277	mutex_init(&ptp->pincfg_mux);
278	mutex_init(&ptp->n_vclocks_mux);
279	init_waitqueue_head(&ptp->tsev_wq);
280
281	if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
282		ptp->has_cycles = true;
283		if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
284			ptp->info->getcycles64 = ptp_getcycles64;
285	} else {
286		/* Free running cycle counter not supported, use time. */
287		ptp->info->getcycles64 = ptp_getcycles64;
288
289		if (ptp->info->gettimex64)
290			ptp->info->getcyclesx64 = ptp->info->gettimex64;
291
292		if (ptp->info->getcrosststamp)
293			ptp->info->getcrosscycles = ptp->info->getcrosststamp;
294	}
 
 
 
295
296	if (ptp->info->do_aux_work) {
297		kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
298		ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
299		if (IS_ERR(ptp->kworker)) {
300			err = PTR_ERR(ptp->kworker);
301			pr_err("failed to create ptp aux_worker %d\n", err);
302			goto kworker_err;
303		}
304	}
305
306	/* PTP virtual clock is being registered under physical clock */
307	if (parent && parent->class && parent->class->name &&
308	    strcmp(parent->class->name, "ptp") == 0)
309		ptp->is_virtual_clock = true;
310
311	if (!ptp->is_virtual_clock) {
312		ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
313
314		size = sizeof(int) * ptp->max_vclocks;
315		ptp->vclock_index = kzalloc(size, GFP_KERNEL);
316		if (!ptp->vclock_index) {
317			err = -ENOMEM;
318			goto no_mem_for_vclocks;
319		}
320	}
321
322	err = ptp_populate_pin_groups(ptp);
323	if (err)
324		goto no_pin_groups;
325
326	/* Register a new PPS source. */
327	if (info->pps) {
328		struct pps_source_info pps;
329		memset(&pps, 0, sizeof(pps));
330		snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
331		pps.mode = PTP_PPS_MODE;
332		pps.owner = info->owner;
333		ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
334		if (IS_ERR(ptp->pps_source)) {
335			err = PTR_ERR(ptp->pps_source);
336			pr_err("failed to register pps source\n");
337			goto no_pps;
338		}
339		ptp->pps_source->lookup_cookie = ptp;
340	}
341
342	/* Initialize a new device of our class in our clock structure. */
343	device_initialize(&ptp->dev);
344	ptp->dev.devt = ptp->devid;
345	ptp->dev.class = &ptp_class;
346	ptp->dev.parent = parent;
347	ptp->dev.groups = ptp->pin_attr_groups;
348	ptp->dev.release = ptp_clock_release;
349	dev_set_drvdata(&ptp->dev, ptp);
350	dev_set_name(&ptp->dev, "ptp%d", ptp->index);
351
352	/* Create a posix clock and link it to the device. */
353	err = posix_clock_register(&ptp->clock, &ptp->dev);
354	if (err) {
355		if (ptp->pps_source)
356			pps_unregister_source(ptp->pps_source);
357
358		if (ptp->kworker)
359			kthread_destroy_worker(ptp->kworker);
360
361		put_device(&ptp->dev);
362
363		pr_err("failed to create posix clock\n");
364		return ERR_PTR(err);
365	}
366
367	/* Debugfs initialization */
368	snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
369	ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
370
371	return ptp;
372
373no_pps:
374	ptp_cleanup_pin_groups(ptp);
375no_pin_groups:
376	kfree(ptp->vclock_index);
377no_mem_for_vclocks:
378	if (ptp->kworker)
379		kthread_destroy_worker(ptp->kworker);
380kworker_err:
381	mutex_destroy(&ptp->pincfg_mux);
382	mutex_destroy(&ptp->n_vclocks_mux);
383	bitmap_free(queue->mask);
384no_memory_bitmap:
385	list_del(&queue->qlist);
386	kfree(queue);
387no_memory_queue:
388	xa_erase(&ptp_clocks_map, index);
389no_slot:
390	kfree(ptp);
391no_memory:
392	return ERR_PTR(err);
393}
394EXPORT_SYMBOL(ptp_clock_register);
395
396static int unregister_vclock(struct device *dev, void *data)
397{
398	struct ptp_clock *ptp = dev_get_drvdata(dev);
399
400	ptp_vclock_unregister(info_to_vclock(ptp->info));
401	return 0;
402}
403
404int ptp_clock_unregister(struct ptp_clock *ptp)
405{
406	if (ptp_vclock_in_use(ptp)) {
407		device_for_each_child(&ptp->dev, NULL, unregister_vclock);
408	}
409
410	ptp->defunct = 1;
411	wake_up_interruptible(&ptp->tsev_wq);
412
413	if (ptp->kworker) {
414		kthread_cancel_delayed_work_sync(&ptp->aux_work);
415		kthread_destroy_worker(ptp->kworker);
416	}
417
418	/* Release the clock's resources. */
419	if (ptp->pps_source)
420		pps_unregister_source(ptp->pps_source);
421
422	posix_clock_unregister(&ptp->clock);
423
424	return 0;
425}
426EXPORT_SYMBOL(ptp_clock_unregister);
427
428void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
429{
430	struct timestamp_event_queue *tsevq;
431	struct pps_event_time evt;
432	unsigned long flags;
433
434	switch (event->type) {
435
436	case PTP_CLOCK_ALARM:
437		break;
438
439	case PTP_CLOCK_EXTTS:
440	case PTP_CLOCK_EXTOFF:
441		/* Enqueue timestamp on selected queues */
442		spin_lock_irqsave(&ptp->tsevqs_lock, flags);
443		list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
444			if (test_bit((unsigned int)event->index, tsevq->mask))
445				enqueue_external_timestamp(tsevq, event);
446		}
447		spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
448		wake_up_interruptible(&ptp->tsev_wq);
449		break;
450
451	case PTP_CLOCK_PPS:
452		pps_get_ts(&evt);
453		pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
454		break;
455
456	case PTP_CLOCK_PPSUSR:
457		pps_event(ptp->pps_source, &event->pps_times,
458			  PTP_PPS_EVENT, NULL);
459		break;
460	}
461}
462EXPORT_SYMBOL(ptp_clock_event);
463
464int ptp_clock_index(struct ptp_clock *ptp)
465{
466	return ptp->index;
467}
468EXPORT_SYMBOL(ptp_clock_index);
469
470int ptp_find_pin(struct ptp_clock *ptp,
471		 enum ptp_pin_function func, unsigned int chan)
472{
473	struct ptp_pin_desc *pin = NULL;
474	int i;
475
476	for (i = 0; i < ptp->info->n_pins; i++) {
477		if (ptp->info->pin_config[i].func == func &&
478		    ptp->info->pin_config[i].chan == chan) {
479			pin = &ptp->info->pin_config[i];
480			break;
481		}
482	}
483
484	return pin ? i : -1;
485}
486EXPORT_SYMBOL(ptp_find_pin);
487
488int ptp_find_pin_unlocked(struct ptp_clock *ptp,
489			  enum ptp_pin_function func, unsigned int chan)
490{
491	int result;
492
493	mutex_lock(&ptp->pincfg_mux);
494
495	result = ptp_find_pin(ptp, func, chan);
496
497	mutex_unlock(&ptp->pincfg_mux);
498
499	return result;
500}
501EXPORT_SYMBOL(ptp_find_pin_unlocked);
502
503int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
504{
505	return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
506}
507EXPORT_SYMBOL(ptp_schedule_worker);
508
509void ptp_cancel_worker_sync(struct ptp_clock *ptp)
510{
511	kthread_cancel_delayed_work_sync(&ptp->aux_work);
512}
513EXPORT_SYMBOL(ptp_cancel_worker_sync);
514
515/* module operations */
516
517static void __exit ptp_exit(void)
518{
519	class_unregister(&ptp_class);
520	unregister_chrdev_region(ptp_devt, MINORMASK + 1);
521	xa_destroy(&ptp_clocks_map);
522}
523
524static int __init ptp_init(void)
525{
526	int err;
527
528	err = class_register(&ptp_class);
529	if (err) {
530		pr_err("ptp: failed to allocate class\n");
531		return err;
532	}
533
534	err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
535	if (err < 0) {
536		pr_err("ptp: failed to allocate device region\n");
537		goto no_region;
538	}
539
540	pr_info("PTP clock support registered\n");
541	return 0;
542
543no_region:
544	class_unregister(&ptp_class);
545	return err;
546}
547
548subsys_initcall(ptp_init);
549module_exit(ptp_exit);
550
551MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
552MODULE_DESCRIPTION("PTP clocks support");
553MODULE_LICENSE("GPL");