Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
  4 *
  5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
  6 *
  7 */
  8
  9#include <linux/bitops.h>
 10#include <linux/clk.h>
 11#include <linux/err.h>
 12#include <linux/io.h>
 13#include <linux/module.h>
 14#include <linux/of.h>
 15#include <linux/of_platform.h>
 16#include <linux/platform_device.h>
 17#include <linux/timekeeping.h>
 18#include <linux/interrupt.h>
 19#include <linux/of_irq.h>
 20#include <linux/workqueue.h>
 21
 22#include "icss_iep.h"
 23
 24#define IEP_MAX_DEF_INC		0xf
 25#define IEP_MAX_COMPEN_INC		0xfff
 26#define IEP_MAX_COMPEN_COUNT	0xffffff
 27
 28#define IEP_GLOBAL_CFG_CNT_ENABLE	BIT(0)
 29#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK		GENMASK(7, 4)
 30#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT	4
 31#define IEP_GLOBAL_CFG_COMPEN_INC_MASK		GENMASK(19, 8)
 32#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT		8
 33
 34#define IEP_GLOBAL_STATUS_CNT_OVF	BIT(0)
 35
 36#define IEP_CMP_CFG_SHADOW_EN		BIT(17)
 37#define IEP_CMP_CFG_CMP0_RST_CNT_EN	BIT(0)
 38#define IEP_CMP_CFG_CMP_EN(cmp)		(GENMASK(16, 1) & (1 << ((cmp) + 1)))
 39
 40#define IEP_CMP_STATUS(cmp)		(1 << (cmp))
 41
 42#define IEP_SYNC_CTRL_SYNC_EN		BIT(0)
 43#define IEP_SYNC_CTRL_SYNC_N_EN(n)	(GENMASK(2, 1) & (BIT(1) << (n)))
 44
 45#define IEP_MIN_CMP	0
 46#define IEP_MAX_CMP	15
 47
 48#define ICSS_IEP_64BIT_COUNTER_SUPPORT		BIT(0)
 49#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT	BIT(1)
 50#define ICSS_IEP_SHADOW_MODE_SUPPORT		BIT(2)
 51
 52#define LATCH_INDEX(ts_index)			((ts_index) + 6)
 53#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n)	BIT(LATCH_INDEX(n))
 54#define IEP_CAP_CFG_CAP_ASYNC_EN(n)		BIT(LATCH_INDEX(n) + 10)
 55
 56/**
 57 * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
 58 * @iep: Pointer to structure representing IEP.
 59 *
 60 * Return: upper 32 bit IEP counter
 61 */
 62int icss_iep_get_count_hi(struct icss_iep *iep)
 63{
 64	u32 val = 0;
 65
 66	if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
 67		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
 68
 69	return val;
 70}
 71EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
 72
 73/**
 74 * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
 75 * @iep: Pointer to structure representing IEP.
 76 *
 77 * Return: lower 32 bit IEP counter
 78 */
 79int icss_iep_get_count_low(struct icss_iep *iep)
 80{
 81	u32 val = 0;
 82
 83	if (iep)
 84		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
 85
 86	return val;
 87}
 88EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
 89
 90/**
 91 * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
 92 * @iep: Pointer to structure representing IEP.
 93 *
 94 * Return: PTP clock index, -1 if not registered
 95 */
 96int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
 97{
 98	if (!iep || !iep->ptp_clock)
 99		return -1;
100	return ptp_clock_index(iep->ptp_clock);
101}
102EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
103
104static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
105{
106	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
107		writel(upper_32_bits(ns), iep->base +
108		       iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
109	writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
110}
111
112static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
113
114/**
115 * icss_iep_settime() - Set time of the PTP clock using IEP driver
116 * @iep: Pointer to structure representing IEP.
117 * @ns: Time to be set in nanoseconds
118 *
119 * This API uses writel() instead of regmap_write() for write operations as
120 * regmap_write() is too slow and this API is time sensitive.
121 */
122static void icss_iep_settime(struct icss_iep *iep, u64 ns)
123{
124	if (iep->ops && iep->ops->settime) {
125		iep->ops->settime(iep->clockops_data, ns);
126		return;
127	}
128
129	if (iep->pps_enabled || iep->perout_enabled)
130		writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
131
132	icss_iep_set_counter(iep, ns);
133
134	if (iep->pps_enabled || iep->perout_enabled) {
135		icss_iep_update_to_next_boundary(iep, ns);
136		writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
137		       iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
138	}
139}
140
141/**
142 * icss_iep_gettime() - Get time of the PTP clock using IEP driver
143 * @iep: Pointer to structure representing IEP.
144 * @sts: Pointer to structure representing PTP system timestamp.
145 *
146 * This API uses readl() instead of regmap_read() for read operations as
147 * regmap_read() is too slow and this API is time sensitive.
148 *
149 * Return: The current timestamp of the PTP clock using IEP driver
150 */
151static u64 icss_iep_gettime(struct icss_iep *iep,
152			    struct ptp_system_timestamp *sts)
153{
154	u32 ts_hi = 0, ts_lo;
155	unsigned long flags;
156
157	if (iep->ops && iep->ops->gettime)
158		return iep->ops->gettime(iep->clockops_data, sts);
159
160	/* use local_irq_x() to make it work for both RT/non-RT */
161	local_irq_save(flags);
162
163	/* no need to play with hi-lo, hi is latched when lo is read */
164	ptp_read_system_prets(sts);
165	ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
166	ptp_read_system_postts(sts);
167	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
168		ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
169
170	local_irq_restore(flags);
171
172	return (u64)ts_lo | (u64)ts_hi << 32;
173}
174
175static void icss_iep_enable(struct icss_iep *iep)
176{
177	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
178			   IEP_GLOBAL_CFG_CNT_ENABLE,
179			   IEP_GLOBAL_CFG_CNT_ENABLE);
180}
181
182static void icss_iep_disable(struct icss_iep *iep)
183{
184	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
185			   IEP_GLOBAL_CFG_CNT_ENABLE,
186			   0);
187}
188
189static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
190{
191	u32 cycle_time;
192	int cmp;
193
194	cycle_time = iep->cycle_time_ns - iep->def_inc;
195
196	icss_iep_disable(iep);
197
198	/* disable shadow mode */
199	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
200			   IEP_CMP_CFG_SHADOW_EN, 0);
201
202	/* enable shadow mode */
203	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
204			   IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
205
206	/* clear counters */
207	icss_iep_set_counter(iep, 0);
208
209	/* clear overflow status */
210	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
211			   IEP_GLOBAL_STATUS_CNT_OVF,
212			   IEP_GLOBAL_STATUS_CNT_OVF);
213
214	/* clear compare status */
215	for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
216		regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
217				   IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
218
219		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
220				   IEP_CMP_CFG_CMP_EN(cmp), 0);
221	}
222
223	/* enable reset counter on CMP0 event */
224	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
225			   IEP_CMP_CFG_CMP0_RST_CNT_EN,
226			   IEP_CMP_CFG_CMP0_RST_CNT_EN);
227	/* enable compare */
228	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
229			   IEP_CMP_CFG_CMP_EN(0),
230			   IEP_CMP_CFG_CMP_EN(0));
231
232	/* set CMP0 value to cycle time */
233	regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
234	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
235		regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
236
237	icss_iep_set_counter(iep, 0);
238	icss_iep_enable(iep);
239}
240
241static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
242{
243	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
244			   IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
245			   def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
246}
247
248static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
249{
250	struct device *dev = regmap_get_device(iep->map);
251
252	if (compen_inc > IEP_MAX_COMPEN_INC) {
253		dev_err(dev, "%s: too high compensation inc %d\n",
254			__func__, compen_inc);
255		compen_inc = IEP_MAX_COMPEN_INC;
256	}
257
258	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
259			   IEP_GLOBAL_CFG_COMPEN_INC_MASK,
260			   compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
261}
262
263static void icss_iep_set_compensation_count(struct icss_iep *iep,
264					    u32 compen_count)
265{
266	struct device *dev = regmap_get_device(iep->map);
267
268	if (compen_count > IEP_MAX_COMPEN_COUNT) {
269		dev_err(dev, "%s: too high compensation count %d\n",
270			__func__, compen_count);
271		compen_count = IEP_MAX_COMPEN_COUNT;
272	}
273
274	regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
275}
276
277static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
278						 u32 compen_count)
279{
280	regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
281}
282
283/* PTP PHC operations */
284static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
285{
286	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
287	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
288	u32 cyc_count;
289	u16 cmp_inc;
290
291	mutex_lock(&iep->ptp_clk_mutex);
292
293	/* ppb is amount of frequency we want to adjust in 1GHz (billion)
294	 * e.g. 100ppb means we need to speed up clock by 100Hz
295	 * i.e. at end of 1 second (1 billion ns) clock time, we should be
296	 * counting 100 more ns.
297	 * We use IEP slow compensation to achieve continuous freq. adjustment.
298	 * There are 2 parts. Cycle time and adjustment per cycle.
299	 * Simplest case would be 1 sec Cycle time. Then adjustment
300	 * pre cycle would be (def_inc + ppb) value.
301	 * Cycle time will have to be chosen based on how worse the ppb is.
302	 * e.g. smaller the ppb, cycle time has to be large.
303	 * The minimum adjustment we can do is +-1ns per cycle so let's
304	 * reduce the cycle time to get 1ns per cycle adjustment.
305	 *	1ppb = 1sec cycle time & 1ns adjust
306	 *	1000ppb = 1/1000 cycle time & 1ns adjust per cycle
307	 */
308
309	if (iep->cycle_time_ns)
310		iep->slow_cmp_inc = iep->clk_tick_time;	/* 4ns adj per cycle */
311	else
312		iep->slow_cmp_inc = 1;	/* 1ns adjust per cycle */
313
314	if (ppb < 0) {
315		iep->slow_cmp_inc = -iep->slow_cmp_inc;
316		ppb = -ppb;
317	}
318
319	cyc_count = NSEC_PER_SEC;		/* 1s cycle time @1GHz */
320	cyc_count /= ppb;		/* cycle time per ppb */
321
322	/* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
323	if (!iep->cycle_time_ns)
324		cyc_count /= iep->clk_tick_time;
325	iep->slow_cmp_count = cyc_count;
326
327	/* iep->clk_tick_time is def_inc */
328	cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
329	icss_iep_set_compensation_inc(iep, cmp_inc);
330	icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
331
332	mutex_unlock(&iep->ptp_clk_mutex);
333
334	return 0;
335}
336
337static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
338{
339	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
340	s64 ns;
341
342	mutex_lock(&iep->ptp_clk_mutex);
343	if (iep->ops && iep->ops->adjtime) {
344		iep->ops->adjtime(iep->clockops_data, delta);
345	} else {
346		ns = icss_iep_gettime(iep, NULL);
347		ns += delta;
348		icss_iep_settime(iep, ns);
349	}
350	mutex_unlock(&iep->ptp_clk_mutex);
351
352	return 0;
353}
354
355static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
356				  struct timespec64 *ts,
357				  struct ptp_system_timestamp *sts)
358{
359	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
360	u64 ns;
361
362	mutex_lock(&iep->ptp_clk_mutex);
363	ns = icss_iep_gettime(iep, sts);
364	*ts = ns_to_timespec64(ns);
365	mutex_unlock(&iep->ptp_clk_mutex);
366
367	return 0;
368}
369
370static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
371				const struct timespec64 *ts)
372{
373	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
374	u64 ns;
375
376	mutex_lock(&iep->ptp_clk_mutex);
377	ns = timespec64_to_ns(ts);
378	icss_iep_settime(iep, ns);
379	mutex_unlock(&iep->ptp_clk_mutex);
380
381	return 0;
382}
383
384static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
385{
386	u64 ns, p_ns;
387	u32 offset;
388
389	ns = icss_iep_gettime(iep, NULL);
390	if (start_ns < ns)
391		start_ns = ns;
392	p_ns = iep->period;
393	/* Round up to next period boundary */
394	start_ns += p_ns - 1;
395	offset = do_div(start_ns, p_ns);
396	start_ns = start_ns * p_ns;
397	/* If it is too close to update, shift to next boundary */
398	if (p_ns - offset < 10)
399		start_ns += p_ns;
400
401	regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
402	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
403		regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
404}
405
406static int icss_iep_perout_enable_hw(struct icss_iep *iep,
407				     struct ptp_perout_request *req, int on)
408{
409	int ret;
410	u64 cmp;
411
412	if (iep->ops && iep->ops->perout_enable) {
413		ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
414		if (ret)
415			return ret;
416
417		if (on) {
418			/* Configure CMP */
419			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
420			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
421				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
422			/* Configure SYNC, 1ms pulse width */
423			regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
424			regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
425			regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
426			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
427			/* Enable CMP 1 */
428			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
429					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
430		} else {
431			/* Disable CMP 1 */
432			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
433					   IEP_CMP_CFG_CMP_EN(1), 0);
434
435			/* clear regs */
436			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
437			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
438				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
439		}
440	} else {
441		if (on) {
442			u64 start_ns;
443
444			iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
445				      req->period.nsec;
446			start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
447				   + req->period.nsec;
448			icss_iep_update_to_next_boundary(iep, start_ns);
449
450			/* Enable Sync in single shot mode  */
451			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
452				     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
453			/* Enable CMP 1 */
454			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
455					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
456		} else {
457			/* Disable CMP 1 */
458			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
459					   IEP_CMP_CFG_CMP_EN(1), 0);
460
461			/* clear CMP regs */
462			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
463			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
464				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
465
466			/* Disable sync */
467			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
468		}
469	}
470
471	return 0;
472}
473
474static int icss_iep_perout_enable(struct icss_iep *iep,
475				  struct ptp_perout_request *req, int on)
476{
477	return -EOPNOTSUPP;
478}
479
480static void icss_iep_cap_cmp_work(struct work_struct *work)
481{
482	struct icss_iep *iep = container_of(work, struct icss_iep, work);
483	const u32 *reg_offs = iep->plat_data->reg_offs;
484	struct ptp_clock_event pevent;
485	unsigned int val;
486	u64 ns, ns_next;
487
488	mutex_lock(&iep->ptp_clk_mutex);
489
490	ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
491	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
492		val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
493		ns |= (u64)val << 32;
494	}
495	/* set next event */
496	ns_next = ns + iep->period;
497	writel(lower_32_bits(ns_next),
498	       iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
499	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
500		writel(upper_32_bits(ns_next),
501		       iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
502
503	pevent.pps_times.ts_real = ns_to_timespec64(ns);
504	pevent.type = PTP_CLOCK_PPSUSR;
505	pevent.index = 0;
506	ptp_clock_event(iep->ptp_clock, &pevent);
507	dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
508
509	mutex_unlock(&iep->ptp_clk_mutex);
510}
511
512static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
513{
514	struct icss_iep *iep = (struct icss_iep *)dev_id;
515	const u32 *reg_offs = iep->plat_data->reg_offs;
516	unsigned int val;
517
518	val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
519	/* The driver only enables CMP1 */
520	if (val & BIT(1)) {
521		/* Clear the event */
522		writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
523		if (iep->pps_enabled || iep->perout_enabled)
524			schedule_work(&iep->work);
525		return IRQ_HANDLED;
526	}
527
528	return IRQ_NONE;
529}
530
531static int icss_iep_pps_enable(struct icss_iep *iep, int on)
532{
533	struct ptp_clock_request rq;
534	struct timespec64 ts;
535	int ret = 0;
536	u64 ns;
537
538	mutex_lock(&iep->ptp_clk_mutex);
539
540	if (iep->perout_enabled) {
541		ret = -EBUSY;
542		goto exit;
543	}
544
545	if (iep->pps_enabled == !!on)
546		goto exit;
547
548	rq.perout.index = 0;
549	if (on) {
550		ns = icss_iep_gettime(iep, NULL);
551		ts = ns_to_timespec64(ns);
552		rq.perout.period.sec = 1;
553		rq.perout.period.nsec = 0;
554		rq.perout.start.sec = ts.tv_sec + 2;
555		rq.perout.start.nsec = 0;
556		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
557	} else {
558		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
559		if (iep->cap_cmp_irq)
560			cancel_work_sync(&iep->work);
561	}
562
563	if (!ret)
564		iep->pps_enabled = !!on;
565
566exit:
567	mutex_unlock(&iep->ptp_clk_mutex);
568
569	return ret;
570}
571
572static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
573{
574	u32 val, cap, ret = 0;
575
576	mutex_lock(&iep->ptp_clk_mutex);
577
578	if (iep->ops && iep->ops->extts_enable) {
579		ret = iep->ops->extts_enable(iep->clockops_data, index, on);
580		goto exit;
581	}
582
583	if (((iep->latch_enable & BIT(index)) >> index) == on)
584		goto exit;
585
586	regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
587	cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
588	if (on) {
589		val |= cap;
590		iep->latch_enable |= BIT(index);
591	} else {
592		val &= ~cap;
593		iep->latch_enable &= ~BIT(index);
594	}
595	regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
596
597exit:
598	mutex_unlock(&iep->ptp_clk_mutex);
599
600	return ret;
601}
602
603static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
604			       struct ptp_clock_request *rq, int on)
605{
606	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
607
608	switch (rq->type) {
609	case PTP_CLK_REQ_PEROUT:
610		return icss_iep_perout_enable(iep, &rq->perout, on);
611	case PTP_CLK_REQ_PPS:
612		return icss_iep_pps_enable(iep, on);
613	case PTP_CLK_REQ_EXTTS:
614		return icss_iep_extts_enable(iep, rq->extts.index, on);
615	default:
616		break;
617	}
618
619	return -EOPNOTSUPP;
620}
621
622static struct ptp_clock_info icss_iep_ptp_info = {
623	.owner		= THIS_MODULE,
624	.name		= "ICSS IEP timer",
625	.max_adj	= 10000000,
626	.adjfine	= icss_iep_ptp_adjfine,
627	.adjtime	= icss_iep_ptp_adjtime,
628	.gettimex64	= icss_iep_ptp_gettimeex,
629	.settime64	= icss_iep_ptp_settime,
630	.enable		= icss_iep_ptp_enable,
631};
632
633struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
634{
635	struct platform_device *pdev;
636	struct device_node *iep_np;
637	struct icss_iep *iep;
638
639	iep_np = of_parse_phandle(np, "ti,iep", idx);
640	if (!iep_np || !of_device_is_available(iep_np))
641		return ERR_PTR(-ENODEV);
642
643	pdev = of_find_device_by_node(iep_np);
644	of_node_put(iep_np);
645
646	if (!pdev)
647		/* probably IEP not yet probed */
648		return ERR_PTR(-EPROBE_DEFER);
649
650	iep = platform_get_drvdata(pdev);
651	if (!iep)
652		return ERR_PTR(-EPROBE_DEFER);
653
654	device_lock(iep->dev);
655	if (iep->client_np) {
656		device_unlock(iep->dev);
657		dev_err(iep->dev, "IEP is already acquired by %s",
658			iep->client_np->name);
659		return ERR_PTR(-EBUSY);
660	}
661	iep->client_np = np;
662	device_unlock(iep->dev);
663	get_device(iep->dev);
664
665	return iep;
666}
667EXPORT_SYMBOL_GPL(icss_iep_get_idx);
668
669struct icss_iep *icss_iep_get(struct device_node *np)
670{
671	return icss_iep_get_idx(np, 0);
672}
673EXPORT_SYMBOL_GPL(icss_iep_get);
674
675void icss_iep_put(struct icss_iep *iep)
676{
677	device_lock(iep->dev);
678	iep->client_np = NULL;
679	device_unlock(iep->dev);
680	put_device(iep->dev);
681}
682EXPORT_SYMBOL_GPL(icss_iep_put);
683
684void icss_iep_init_fw(struct icss_iep *iep)
685{
686	/* start IEP for FW use in raw 64bit mode, no PTP support */
687	iep->clk_tick_time = iep->def_inc;
688	iep->cycle_time_ns = 0;
689	iep->ops = NULL;
690	iep->clockops_data = NULL;
691	icss_iep_set_default_inc(iep, iep->def_inc);
692	icss_iep_set_compensation_inc(iep, iep->def_inc);
693	icss_iep_set_compensation_count(iep, 0);
694	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
695	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
696	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
697		icss_iep_set_slow_compensation_count(iep, 0);
698
699	icss_iep_enable(iep);
700	icss_iep_settime(iep, 0);
701}
702EXPORT_SYMBOL_GPL(icss_iep_init_fw);
703
704void icss_iep_exit_fw(struct icss_iep *iep)
705{
706	icss_iep_disable(iep);
707}
708EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
709
710int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
711		  void *clockops_data, u32 cycle_time_ns)
712{
713	int ret = 0;
714
715	iep->cycle_time_ns = cycle_time_ns;
716	iep->clk_tick_time = iep->def_inc;
717	iep->ops = clkops;
718	iep->clockops_data = clockops_data;
719	icss_iep_set_default_inc(iep, iep->def_inc);
720	icss_iep_set_compensation_inc(iep, iep->def_inc);
721	icss_iep_set_compensation_count(iep, 0);
722	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
723	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
724	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
725		icss_iep_set_slow_compensation_count(iep, 0);
726
727	if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
728	    !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
729		goto skip_perout;
730
731	if (iep->ops && iep->ops->perout_enable) {
732		iep->ptp_info.n_per_out = 1;
733		iep->ptp_info.pps = 1;
734	} else if (iep->cap_cmp_irq) {
735		iep->ptp_info.pps = 1;
736	}
737
738	if (iep->ops && iep->ops->extts_enable)
739		iep->ptp_info.n_ext_ts = 2;
740
741skip_perout:
742	if (cycle_time_ns)
743		icss_iep_enable_shadow_mode(iep);
744	else
745		icss_iep_enable(iep);
746	icss_iep_settime(iep, ktime_get_real_ns());
747
748	iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
749	if (IS_ERR(iep->ptp_clock)) {
750		ret = PTR_ERR(iep->ptp_clock);
751		iep->ptp_clock = NULL;
752		dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
753	}
754
755	return ret;
756}
757EXPORT_SYMBOL_GPL(icss_iep_init);
758
759int icss_iep_exit(struct icss_iep *iep)
760{
761	if (iep->ptp_clock) {
762		ptp_clock_unregister(iep->ptp_clock);
763		iep->ptp_clock = NULL;
764	}
765	icss_iep_disable(iep);
766
767	if (iep->pps_enabled)
768		icss_iep_pps_enable(iep, false);
769	else if (iep->perout_enabled)
770		icss_iep_perout_enable(iep, NULL, false);
771
772	return 0;
773}
774EXPORT_SYMBOL_GPL(icss_iep_exit);
775
776static int icss_iep_probe(struct platform_device *pdev)
777{
778	struct device *dev = &pdev->dev;
779	struct icss_iep *iep;
780	struct clk *iep_clk;
781	int ret, irq;
782
783	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
784	if (!iep)
785		return -ENOMEM;
786
787	iep->dev = dev;
788	iep->base = devm_platform_ioremap_resource(pdev, 0);
789	if (IS_ERR(iep->base))
790		return -ENODEV;
791
792	irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
793	if (irq == -EPROBE_DEFER)
794		return irq;
795
796	if (irq > 0) {
797		ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
798				       IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
799		if (ret) {
800			dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
801				 ret);
802		} else {
803			iep->cap_cmp_irq = irq;
804			INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
805		}
806	}
807
808	iep_clk = devm_clk_get(dev, NULL);
809	if (IS_ERR(iep_clk))
810		return PTR_ERR(iep_clk);
811
812	iep->refclk_freq = clk_get_rate(iep_clk);
813
814	iep->def_inc = NSEC_PER_SEC / iep->refclk_freq;	/* ns per clock tick */
815	if (iep->def_inc > IEP_MAX_DEF_INC) {
816		dev_err(dev, "Failed to set def_inc %d.  IEP_clock is too slow to be supported\n",
817			iep->def_inc);
818		return -EINVAL;
819	}
820
821	iep->plat_data = device_get_match_data(dev);
822	if (!iep->plat_data)
823		return -EINVAL;
824
825	iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
826	if (IS_ERR(iep->map)) {
827		dev_err(dev, "Failed to create regmap for IEP %ld\n",
828			PTR_ERR(iep->map));
829		return PTR_ERR(iep->map);
830	}
831
832	iep->ptp_info = icss_iep_ptp_info;
833	mutex_init(&iep->ptp_clk_mutex);
834	dev_set_drvdata(dev, iep);
835	icss_iep_disable(iep);
836
837	return 0;
838}
839
840static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
841{
842	switch (reg) {
843	case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
844		return true;
845	default:
846		return false;
847	}
848
849	return false;
850}
851
852static int icss_iep_regmap_write(void *context, unsigned int reg,
853				 unsigned int val)
854{
855	struct icss_iep *iep = context;
856
857	writel(val, iep->base + iep->plat_data->reg_offs[reg]);
858
859	return 0;
860}
861
862static int icss_iep_regmap_read(void *context, unsigned int reg,
863				unsigned int *val)
864{
865	struct icss_iep *iep = context;
866
867	*val = readl(iep->base + iep->plat_data->reg_offs[reg]);
868
869	return 0;
870}
871
872static const struct regmap_config am654_icss_iep_regmap_config = {
873	.name = "icss iep",
874	.reg_stride = 1,
875	.reg_write = icss_iep_regmap_write,
876	.reg_read = icss_iep_regmap_read,
877	.writeable_reg = am654_icss_iep_valid_reg,
878	.readable_reg = am654_icss_iep_valid_reg,
879	.fast_io = 1,
880};
881
882static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
883	.flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
884		 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
885		 ICSS_IEP_SHADOW_MODE_SUPPORT,
886	.reg_offs = {
887		[ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
888		[ICSS_IEP_COMPEN_REG] = 0x08,
889		[ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
890		[ICSS_IEP_COUNT_REG0] = 0x10,
891		[ICSS_IEP_COUNT_REG1] = 0x14,
892		[ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
893		[ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
894
895		[ICSS_IEP_CAP6_RISE_REG0] = 0x50,
896		[ICSS_IEP_CAP6_RISE_REG1] = 0x54,
897
898		[ICSS_IEP_CAP7_RISE_REG0] = 0x60,
899		[ICSS_IEP_CAP7_RISE_REG1] = 0x64,
900
901		[ICSS_IEP_CMP_CFG_REG] = 0x70,
902		[ICSS_IEP_CMP_STAT_REG] = 0x74,
903		[ICSS_IEP_CMP0_REG0] = 0x78,
904		[ICSS_IEP_CMP0_REG1] = 0x7c,
905		[ICSS_IEP_CMP1_REG0] = 0x80,
906		[ICSS_IEP_CMP1_REG1] = 0x84,
907
908		[ICSS_IEP_CMP8_REG0] = 0xc0,
909		[ICSS_IEP_CMP8_REG1] = 0xc4,
910		[ICSS_IEP_SYNC_CTRL_REG] = 0x180,
911		[ICSS_IEP_SYNC0_STAT_REG] = 0x188,
912		[ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
913		[ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
914		[ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
915		[ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
916		[ICSS_IEP_SYNC_START_REG] = 0x19c,
917	},
918	.config = &am654_icss_iep_regmap_config,
919};
920
921static const struct of_device_id icss_iep_of_match[] = {
922	{
923		.compatible = "ti,am654-icss-iep",
924		.data = &am654_icss_iep_plat_data,
925	},
926	{},
927};
928MODULE_DEVICE_TABLE(of, icss_iep_of_match);
929
930static struct platform_driver icss_iep_driver = {
931	.driver = {
932		.name = "icss-iep",
933		.of_match_table = icss_iep_of_match,
934	},
935	.probe = icss_iep_probe,
936};
937module_platform_driver(icss_iep_driver);
938
939MODULE_LICENSE("GPL");
940MODULE_DESCRIPTION("TI ICSS IEP driver");
941MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
942MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");