Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/arch/arm/plat-omap/dmtimer.c
4 *
5 * OMAP Dual-Mode Timers
6 *
7 * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
8 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
9 * Thara Gopinath <thara@ti.com>
10 *
11 * dmtimer adaptation to platform_driver.
12 *
13 * Copyright (C) 2005 Nokia Corporation
14 * OMAP2 support by Juha Yrjola
15 * API improvements and OMAP2 clock framework support by Timo Teras
16 *
17 * Copyright (C) 2009 Texas Instruments
18 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
19 */
20
21#include <linux/clk.h>
22#include <linux/clk-provider.h>
23#include <linux/cpu_pm.h>
24#include <linux/module.h>
25#include <linux/io.h>
26#include <linux/device.h>
27#include <linux/err.h>
28#include <linux/pm_runtime.h>
29#include <linux/of.h>
30#include <linux/platform_device.h>
31#include <linux/platform_data/dmtimer-omap.h>
32
33#include <clocksource/timer-ti-dm.h>
34
35/*
36 * timer errata flags
37 *
38 * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
39 * errata prevents us from using posted mode on these devices, unless the
40 * timer counter register is never read. For more details please refer to
41 * the OMAP3/4/5 errata documents.
42 */
43#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
44
45/* posted mode types */
46#define OMAP_TIMER_NONPOSTED 0x00
47#define OMAP_TIMER_POSTED 0x01
48
49/* register offsets with the write pending bit encoded */
50#define WPSHIFT 16
51
52#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
53 | (WP_NONE << WPSHIFT))
54
55#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
56 | (WP_TCLR << WPSHIFT))
57
58#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
59 | (WP_TCRR << WPSHIFT))
60
61#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
62 | (WP_TLDR << WPSHIFT))
63
64#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
65 | (WP_TTGR << WPSHIFT))
66
67#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
68 | (WP_NONE << WPSHIFT))
69
70#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
71 | (WP_TMAR << WPSHIFT))
72
73#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
74 | (WP_NONE << WPSHIFT))
75
76#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
77 | (WP_NONE << WPSHIFT))
78
79#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
80 | (WP_NONE << WPSHIFT))
81
82#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
83 | (WP_TPIR << WPSHIFT))
84
85#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
86 | (WP_TNIR << WPSHIFT))
87
88#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
89 | (WP_TCVR << WPSHIFT))
90
91#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
92 (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
93
94#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
95 (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
96
97struct timer_regs {
98 u32 ocp_cfg;
99 u32 tidr;
100 u32 tier;
101 u32 twer;
102 u32 tclr;
103 u32 tcrr;
104 u32 tldr;
105 u32 ttrg;
106 u32 twps;
107 u32 tmar;
108 u32 tcar1;
109 u32 tsicr;
110 u32 tcar2;
111 u32 tpir;
112 u32 tnir;
113 u32 tcvr;
114 u32 tocr;
115 u32 towr;
116};
117
118struct dmtimer {
119 struct omap_dm_timer cookie;
120 int id;
121 int irq;
122 struct clk *fclk;
123
124 void __iomem *io_base;
125 int irq_stat; /* TISR/IRQSTATUS interrupt status */
126 int irq_ena; /* irq enable */
127 int irq_dis; /* irq disable, only on v2 ip */
128 void __iomem *pend; /* write pending */
129 void __iomem *func_base; /* function register base */
130
131 atomic_t enabled;
132 unsigned long rate;
133 unsigned reserved:1;
134 unsigned posted:1;
135 unsigned omap1:1;
136 struct timer_regs context;
137 int revision;
138 u32 capability;
139 u32 errata;
140 struct platform_device *pdev;
141 struct list_head node;
142 struct notifier_block nb;
143 struct notifier_block fclk_nb;
144 unsigned long fclk_rate;
145};
146
147static u32 omap_reserved_systimers;
148static LIST_HEAD(omap_timer_list);
149static DEFINE_SPINLOCK(dm_timer_lock);
150
151enum {
152 REQUEST_ANY = 0,
153 REQUEST_BY_ID,
154 REQUEST_BY_CAP,
155 REQUEST_BY_NODE,
156};
157
158/**
159 * dmtimer_read - read timer registers in posted and non-posted mode
160 * @timer: timer pointer over which read operation to perform
161 * @reg: lowest byte holds the register offset
162 *
163 * The posted mode bit is encoded in reg. Note that in posted mode, write
164 * pending bit must be checked. Otherwise a read of a non completed write
165 * will produce an error.
166 */
167static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
168{
169 u16 wp, offset;
170
171 wp = reg >> WPSHIFT;
172 offset = reg & 0xff;
173
174 /* Wait for a possible write pending bit in posted mode */
175 if (wp && timer->posted)
176 while (readl_relaxed(timer->pend) & wp)
177 cpu_relax();
178
179 return readl_relaxed(timer->func_base + offset);
180}
181
182/**
183 * dmtimer_write - write timer registers in posted and non-posted mode
184 * @timer: timer pointer over which write operation is to perform
185 * @reg: lowest byte holds the register offset
186 * @val: data to write into the register
187 *
188 * The posted mode bit is encoded in reg. Note that in posted mode, the write
189 * pending bit must be checked. Otherwise a write on a register which has a
190 * pending write will be lost.
191 */
192static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
193{
194 u16 wp, offset;
195
196 wp = reg >> WPSHIFT;
197 offset = reg & 0xff;
198
199 /* Wait for a possible write pending bit in posted mode */
200 if (wp && timer->posted)
201 while (readl_relaxed(timer->pend) & wp)
202 cpu_relax();
203
204 writel_relaxed(val, timer->func_base + offset);
205}
206
207static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
208{
209 u32 tidr;
210
211 /* Assume v1 ip if bits [31:16] are zero */
212 tidr = readl_relaxed(timer->io_base);
213 if (!(tidr >> 16)) {
214 timer->revision = 1;
215 timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
216 timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
217 timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
218 timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
219 timer->func_base = timer->io_base;
220 } else {
221 timer->revision = 2;
222 timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
223 timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
224 timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
225 timer->pend = timer->io_base +
226 _OMAP_TIMER_WRITE_PEND_OFFSET +
227 OMAP_TIMER_V2_FUNC_OFFSET;
228 timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
229 }
230}
231
232/*
233 * __omap_dm_timer_enable_posted - enables write posted mode
234 * @timer: pointer to timer instance handle
235 *
236 * Enables the write posted mode for the timer. When posted mode is enabled
237 * writes to certain timer registers are immediately acknowledged by the
238 * internal bus and hence prevents stalling the CPU waiting for the write to
239 * complete. Enabling this feature can improve performance for writing to the
240 * timer registers.
241 */
242static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
243{
244 if (timer->posted)
245 return;
246
247 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
248 timer->posted = OMAP_TIMER_NONPOSTED;
249 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
250 return;
251 }
252
253 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
254 timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
255 timer->posted = OMAP_TIMER_POSTED;
256}
257
258static inline void __omap_dm_timer_stop(struct dmtimer *timer)
259{
260 u32 l;
261
262 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
263 if (l & OMAP_TIMER_CTRL_ST) {
264 l &= ~0x1;
265 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
266#ifdef CONFIG_ARCH_OMAP2PLUS
267 /* Readback to make sure write has completed */
268 dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
269 /*
270 * Wait for functional clock period x 3.5 to make sure that
271 * timer is stopped
272 */
273 udelay(3500000 / timer->fclk_rate + 1);
274#endif
275 }
276
277 /* Ack possibly pending interrupt */
278 dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
279}
280
281static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
282 unsigned int value)
283{
284 dmtimer_write(timer, timer->irq_ena, value);
285 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
286}
287
288static inline unsigned int
289__omap_dm_timer_read_counter(struct dmtimer *timer)
290{
291 return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
292}
293
294static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
295 unsigned int value)
296{
297 dmtimer_write(timer, timer->irq_stat, value);
298}
299
300static void omap_timer_restore_context(struct dmtimer *timer)
301{
302 dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
303
304 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
305 dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
306 dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
307 dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
308 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
309 dmtimer_write(timer, timer->irq_ena, timer->context.tier);
310 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
311}
312
313static void omap_timer_save_context(struct dmtimer *timer)
314{
315 timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
316
317 timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
318 timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
319 timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
320 timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
321 timer->context.tier = dmtimer_read(timer, timer->irq_ena);
322 timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
323}
324
325static int omap_timer_context_notifier(struct notifier_block *nb,
326 unsigned long cmd, void *v)
327{
328 struct dmtimer *timer;
329
330 timer = container_of(nb, struct dmtimer, nb);
331
332 switch (cmd) {
333 case CPU_CLUSTER_PM_ENTER:
334 if ((timer->capability & OMAP_TIMER_ALWON) ||
335 !atomic_read(&timer->enabled))
336 break;
337 omap_timer_save_context(timer);
338 break;
339 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
340 break;
341 case CPU_CLUSTER_PM_EXIT:
342 if ((timer->capability & OMAP_TIMER_ALWON) ||
343 !atomic_read(&timer->enabled))
344 break;
345 omap_timer_restore_context(timer);
346 break;
347 }
348
349 return NOTIFY_OK;
350}
351
352static int omap_timer_fclk_notifier(struct notifier_block *nb,
353 unsigned long event, void *data)
354{
355 struct clk_notifier_data *clk_data = data;
356 struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
357
358 switch (event) {
359 case POST_RATE_CHANGE:
360 timer->fclk_rate = clk_data->new_rate;
361 return NOTIFY_OK;
362 default:
363 return NOTIFY_DONE;
364 }
365}
366
367static int omap_dm_timer_reset(struct dmtimer *timer)
368{
369 u32 l, timeout = 100000;
370
371 if (timer->revision != 1)
372 return -EINVAL;
373
374 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
375
376 do {
377 l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
378 } while (!l && timeout--);
379
380 if (!timeout) {
381 dev_err(&timer->pdev->dev, "Timer failed to reset\n");
382 return -ETIMEDOUT;
383 }
384
385 /* Configure timer for smart-idle mode */
386 l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
387 l |= 0x2 << 0x3;
388 dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
389
390 timer->posted = 0;
391
392 return 0;
393}
394
395/*
396 * Functions exposed to PWM and remoteproc drivers via platform_data.
397 * Do not use these in the driver, these will get deprecated and will
398 * will be replaced by Linux generic framework functions such as
399 * chained interrupts and clock framework.
400 */
401static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
402{
403 if (!cookie)
404 return NULL;
405
406 return container_of(cookie, struct dmtimer, cookie);
407}
408
409static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
410{
411 int ret;
412 const char *parent_name;
413 struct clk *parent;
414 struct dmtimer_platform_data *pdata;
415 struct dmtimer *timer;
416
417 timer = to_dmtimer(cookie);
418 if (unlikely(!timer) || IS_ERR(timer->fclk))
419 return -EINVAL;
420
421 switch (source) {
422 case OMAP_TIMER_SRC_SYS_CLK:
423 parent_name = "timer_sys_ck";
424 break;
425 case OMAP_TIMER_SRC_32_KHZ:
426 parent_name = "timer_32k_ck";
427 break;
428 case OMAP_TIMER_SRC_EXT_CLK:
429 parent_name = "timer_ext_ck";
430 break;
431 default:
432 return -EINVAL;
433 }
434
435 pdata = timer->pdev->dev.platform_data;
436
437 /*
438 * FIXME: Used for OMAP1 devices only because they do not currently
439 * use the clock framework to set the parent clock. To be removed
440 * once OMAP1 migrated to using clock framework for dmtimers
441 */
442 if (timer->omap1 && pdata && pdata->set_timer_src)
443 return pdata->set_timer_src(timer->pdev, source);
444
445#if defined(CONFIG_COMMON_CLK)
446 /* Check if the clock has configurable parents */
447 if (clk_hw_get_num_parents(__clk_get_hw(timer->fclk)) < 2)
448 return 0;
449#endif
450
451 parent = clk_get(&timer->pdev->dev, parent_name);
452 if (IS_ERR(parent)) {
453 pr_err("%s: %s not found\n", __func__, parent_name);
454 return -EINVAL;
455 }
456
457 ret = clk_set_parent(timer->fclk, parent);
458 if (ret < 0)
459 pr_err("%s: failed to set %s as parent\n", __func__,
460 parent_name);
461
462 clk_put(parent);
463
464 return ret;
465}
466
467static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
468{
469 struct dmtimer *timer = to_dmtimer(cookie);
470 struct device *dev = &timer->pdev->dev;
471 int rc;
472
473 rc = pm_runtime_resume_and_get(dev);
474 if (rc)
475 dev_err(dev, "could not enable timer\n");
476}
477
478static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
479{
480 struct dmtimer *timer = to_dmtimer(cookie);
481 struct device *dev = &timer->pdev->dev;
482
483 pm_runtime_put_sync(dev);
484}
485
486static int omap_dm_timer_prepare(struct dmtimer *timer)
487{
488 struct device *dev = &timer->pdev->dev;
489 int rc;
490
491 rc = pm_runtime_resume_and_get(dev);
492 if (rc)
493 return rc;
494
495 if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
496 rc = omap_dm_timer_reset(timer);
497 if (rc) {
498 pm_runtime_put_sync(dev);
499 return rc;
500 }
501 }
502
503 __omap_dm_timer_enable_posted(timer);
504 pm_runtime_put_sync(dev);
505
506 return 0;
507}
508
509static inline u32 omap_dm_timer_reserved_systimer(int id)
510{
511 return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
512}
513
514static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
515{
516 struct dmtimer *timer = NULL, *t;
517 struct device_node *np = NULL;
518 unsigned long flags;
519 u32 cap = 0;
520 int id = 0;
521
522 switch (req_type) {
523 case REQUEST_BY_ID:
524 id = *(int *)data;
525 break;
526 case REQUEST_BY_CAP:
527 cap = *(u32 *)data;
528 break;
529 case REQUEST_BY_NODE:
530 np = (struct device_node *)data;
531 break;
532 default:
533 /* REQUEST_ANY */
534 break;
535 }
536
537 spin_lock_irqsave(&dm_timer_lock, flags);
538 list_for_each_entry(t, &omap_timer_list, node) {
539 if (t->reserved)
540 continue;
541
542 switch (req_type) {
543 case REQUEST_BY_ID:
544 if (id == t->pdev->id) {
545 timer = t;
546 timer->reserved = 1;
547 goto found;
548 }
549 break;
550 case REQUEST_BY_CAP:
551 if (cap == (t->capability & cap)) {
552 /*
553 * If timer is not NULL, we have already found
554 * one timer. But it was not an exact match
555 * because it had more capabilities than what
556 * was required. Therefore, unreserve the last
557 * timer found and see if this one is a better
558 * match.
559 */
560 if (timer)
561 timer->reserved = 0;
562 timer = t;
563 timer->reserved = 1;
564
565 /* Exit loop early if we find an exact match */
566 if (t->capability == cap)
567 goto found;
568 }
569 break;
570 case REQUEST_BY_NODE:
571 if (np == t->pdev->dev.of_node) {
572 timer = t;
573 timer->reserved = 1;
574 goto found;
575 }
576 break;
577 default:
578 /* REQUEST_ANY */
579 timer = t;
580 timer->reserved = 1;
581 goto found;
582 }
583 }
584found:
585 spin_unlock_irqrestore(&dm_timer_lock, flags);
586
587 if (timer && omap_dm_timer_prepare(timer)) {
588 timer->reserved = 0;
589 timer = NULL;
590 }
591
592 if (!timer)
593 pr_debug("%s: timer request failed!\n", __func__);
594
595 return timer;
596}
597
598static struct omap_dm_timer *omap_dm_timer_request(void)
599{
600 struct dmtimer *timer;
601
602 timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
603 if (!timer)
604 return NULL;
605
606 return &timer->cookie;
607}
608
609static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
610{
611 struct dmtimer *timer;
612
613 /* Requesting timer by ID is not supported when device tree is used */
614 if (of_have_populated_dt()) {
615 pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
616 __func__);
617 return NULL;
618 }
619
620 timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
621 if (!timer)
622 return NULL;
623
624 return &timer->cookie;
625}
626
627/**
628 * omap_dm_timer_request_by_node - Request a timer by device-tree node
629 * @np: Pointer to device-tree timer node
630 *
631 * Request a timer based upon a device node pointer. Returns pointer to
632 * timer handle on success and a NULL pointer on failure.
633 */
634static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
635{
636 struct dmtimer *timer;
637
638 if (!np)
639 return NULL;
640
641 timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
642 if (!timer)
643 return NULL;
644
645 return &timer->cookie;
646}
647
648static int omap_dm_timer_free(struct omap_dm_timer *cookie)
649{
650 struct dmtimer *timer;
651 struct device *dev;
652 int rc;
653
654 timer = to_dmtimer(cookie);
655 if (unlikely(!timer))
656 return -EINVAL;
657
658 WARN_ON(!timer->reserved);
659 timer->reserved = 0;
660
661 dev = &timer->pdev->dev;
662 rc = pm_runtime_resume_and_get(dev);
663 if (rc)
664 return rc;
665
666 /* Clear timer configuration */
667 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, 0);
668
669 pm_runtime_put_sync(dev);
670
671 return 0;
672}
673
674static int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
675{
676 struct dmtimer *timer = to_dmtimer(cookie);
677 if (timer)
678 return timer->irq;
679 return -EINVAL;
680}
681
682#if defined(CONFIG_ARCH_OMAP1)
683#include <linux/soc/ti/omap1-io.h>
684
685static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
686{
687 return NULL;
688}
689
690/**
691 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
692 * @inputmask: current value of idlect mask
693 */
694__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
695{
696 int i = 0;
697 struct dmtimer *timer = NULL;
698 unsigned long flags;
699
700 /* If ARMXOR cannot be idled this function call is unnecessary */
701 if (!(inputmask & (1 << 1)))
702 return inputmask;
703
704 /* If any active timer is using ARMXOR return modified mask */
705 spin_lock_irqsave(&dm_timer_lock, flags);
706 list_for_each_entry(timer, &omap_timer_list, node) {
707 u32 l;
708
709 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
710 if (l & OMAP_TIMER_CTRL_ST) {
711 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
712 inputmask &= ~(1 << 1);
713 else
714 inputmask &= ~(1 << 2);
715 }
716 i++;
717 }
718 spin_unlock_irqrestore(&dm_timer_lock, flags);
719
720 return inputmask;
721}
722
723#else
724
725static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
726{
727 struct dmtimer *timer = to_dmtimer(cookie);
728
729 if (timer && !IS_ERR(timer->fclk))
730 return timer->fclk;
731 return NULL;
732}
733
734__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
735{
736 BUG();
737
738 return 0;
739}
740
741#endif
742
743static int omap_dm_timer_start(struct omap_dm_timer *cookie)
744{
745 struct dmtimer *timer;
746 struct device *dev;
747 int rc;
748 u32 l;
749
750 timer = to_dmtimer(cookie);
751 if (unlikely(!timer))
752 return -EINVAL;
753
754 dev = &timer->pdev->dev;
755
756 rc = pm_runtime_resume_and_get(dev);
757 if (rc)
758 return rc;
759
760 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
761 if (!(l & OMAP_TIMER_CTRL_ST)) {
762 l |= OMAP_TIMER_CTRL_ST;
763 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
764 }
765
766 return 0;
767}
768
769static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
770{
771 struct dmtimer *timer;
772 struct device *dev;
773
774 timer = to_dmtimer(cookie);
775 if (unlikely(!timer))
776 return -EINVAL;
777
778 dev = &timer->pdev->dev;
779
780 __omap_dm_timer_stop(timer);
781
782 pm_runtime_put_sync(dev);
783
784 return 0;
785}
786
787static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
788 unsigned int load)
789{
790 struct dmtimer *timer;
791 struct device *dev;
792 int rc;
793
794 timer = to_dmtimer(cookie);
795 if (unlikely(!timer))
796 return -EINVAL;
797
798 dev = &timer->pdev->dev;
799 rc = pm_runtime_resume_and_get(dev);
800 if (rc)
801 return rc;
802
803 dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
804
805 pm_runtime_put_sync(dev);
806
807 return 0;
808}
809
810static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
811 unsigned int match)
812{
813 struct dmtimer *timer;
814 struct device *dev;
815 int rc;
816 u32 l;
817
818 timer = to_dmtimer(cookie);
819 if (unlikely(!timer))
820 return -EINVAL;
821
822 dev = &timer->pdev->dev;
823 rc = pm_runtime_resume_and_get(dev);
824 if (rc)
825 return rc;
826
827 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
828 if (enable)
829 l |= OMAP_TIMER_CTRL_CE;
830 else
831 l &= ~OMAP_TIMER_CTRL_CE;
832 dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
833 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
834
835 pm_runtime_put_sync(dev);
836
837 return 0;
838}
839
840static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
841 int toggle, int trigger, int autoreload)
842{
843 struct dmtimer *timer;
844 struct device *dev;
845 int rc;
846 u32 l;
847
848 timer = to_dmtimer(cookie);
849 if (unlikely(!timer))
850 return -EINVAL;
851
852 dev = &timer->pdev->dev;
853 rc = pm_runtime_resume_and_get(dev);
854 if (rc)
855 return rc;
856
857 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
858 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
859 OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
860 if (def_on)
861 l |= OMAP_TIMER_CTRL_SCPWM;
862 if (toggle)
863 l |= OMAP_TIMER_CTRL_PT;
864 l |= trigger << 10;
865 if (autoreload)
866 l |= OMAP_TIMER_CTRL_AR;
867 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
868
869 pm_runtime_put_sync(dev);
870
871 return 0;
872}
873
874static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
875{
876 struct dmtimer *timer;
877 struct device *dev;
878 int rc;
879 u32 l;
880
881 timer = to_dmtimer(cookie);
882 if (unlikely(!timer))
883 return -EINVAL;
884
885 dev = &timer->pdev->dev;
886 rc = pm_runtime_resume_and_get(dev);
887 if (rc)
888 return rc;
889
890 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
891
892 pm_runtime_put_sync(dev);
893
894 return l;
895}
896
897static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
898 int prescaler)
899{
900 struct dmtimer *timer;
901 struct device *dev;
902 int rc;
903 u32 l;
904
905 timer = to_dmtimer(cookie);
906 if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
907 return -EINVAL;
908
909 dev = &timer->pdev->dev;
910 rc = pm_runtime_resume_and_get(dev);
911 if (rc)
912 return rc;
913
914 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
915 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
916 if (prescaler >= 0) {
917 l |= OMAP_TIMER_CTRL_PRE;
918 l |= prescaler << 2;
919 }
920 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
921
922 pm_runtime_put_sync(dev);
923
924 return 0;
925}
926
927static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
928 unsigned int value)
929{
930 struct dmtimer *timer;
931 struct device *dev;
932 int rc;
933
934 timer = to_dmtimer(cookie);
935 if (unlikely(!timer))
936 return -EINVAL;
937
938 dev = &timer->pdev->dev;
939 rc = pm_runtime_resume_and_get(dev);
940 if (rc)
941 return rc;
942
943 __omap_dm_timer_int_enable(timer, value);
944
945 pm_runtime_put_sync(dev);
946
947 return 0;
948}
949
950/**
951 * omap_dm_timer_set_int_disable - disable timer interrupts
952 * @cookie: pointer to timer cookie
953 * @mask: bit mask of interrupts to be disabled
954 *
955 * Disables the specified timer interrupts for a timer.
956 */
957static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
958{
959 struct dmtimer *timer;
960 struct device *dev;
961 u32 l = mask;
962 int rc;
963
964 timer = to_dmtimer(cookie);
965 if (unlikely(!timer))
966 return -EINVAL;
967
968 dev = &timer->pdev->dev;
969 rc = pm_runtime_resume_and_get(dev);
970 if (rc)
971 return rc;
972
973 if (timer->revision == 1)
974 l = dmtimer_read(timer, timer->irq_ena) & ~mask;
975
976 dmtimer_write(timer, timer->irq_dis, l);
977 l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
978 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
979
980 pm_runtime_put_sync(dev);
981
982 return 0;
983}
984
985static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
986{
987 struct dmtimer *timer;
988 unsigned int l;
989
990 timer = to_dmtimer(cookie);
991 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
992 pr_err("%s: timer not available or enabled.\n", __func__);
993 return 0;
994 }
995
996 l = dmtimer_read(timer, timer->irq_stat);
997
998 return l;
999}
1000
1001static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
1002{
1003 struct dmtimer *timer;
1004
1005 timer = to_dmtimer(cookie);
1006 if (unlikely(!timer || !atomic_read(&timer->enabled)))
1007 return -EINVAL;
1008
1009 __omap_dm_timer_write_status(timer, value);
1010
1011 return 0;
1012}
1013
1014static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
1015{
1016 struct dmtimer *timer;
1017
1018 timer = to_dmtimer(cookie);
1019 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
1020 pr_err("%s: timer not iavailable or enabled.\n", __func__);
1021 return 0;
1022 }
1023
1024 return __omap_dm_timer_read_counter(timer);
1025}
1026
1027static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
1028{
1029 struct dmtimer *timer;
1030
1031 timer = to_dmtimer(cookie);
1032 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
1033 pr_err("%s: timer not available or enabled.\n", __func__);
1034 return -EINVAL;
1035 }
1036
1037 dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
1038
1039 /* Save the context */
1040 timer->context.tcrr = value;
1041 return 0;
1042}
1043
1044static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
1045{
1046 struct dmtimer *timer = dev_get_drvdata(dev);
1047
1048 atomic_set(&timer->enabled, 0);
1049
1050 if (timer->capability & OMAP_TIMER_ALWON || !timer->func_base)
1051 return 0;
1052
1053 omap_timer_save_context(timer);
1054
1055 return 0;
1056}
1057
1058static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
1059{
1060 struct dmtimer *timer = dev_get_drvdata(dev);
1061
1062 if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
1063 omap_timer_restore_context(timer);
1064
1065 atomic_set(&timer->enabled, 1);
1066
1067 return 0;
1068}
1069
1070static const struct dev_pm_ops omap_dm_timer_pm_ops = {
1071 SET_RUNTIME_PM_OPS(omap_dm_timer_runtime_suspend,
1072 omap_dm_timer_runtime_resume, NULL)
1073};
1074
1075static const struct of_device_id omap_timer_match[];
1076
1077/**
1078 * omap_dm_timer_probe - probe function called for every registered device
1079 * @pdev: pointer to current timer platform device
1080 *
1081 * Called by driver framework at the end of device registration for all
1082 * timer devices.
1083 */
1084static int omap_dm_timer_probe(struct platform_device *pdev)
1085{
1086 unsigned long flags;
1087 struct dmtimer *timer;
1088 struct device *dev = &pdev->dev;
1089 const struct dmtimer_platform_data *pdata;
1090 int ret;
1091
1092 pdata = of_device_get_match_data(dev);
1093 if (!pdata)
1094 pdata = dev_get_platdata(dev);
1095 else
1096 dev->platform_data = (void *)pdata;
1097
1098 if (!pdata) {
1099 dev_err(dev, "%s: no platform data.\n", __func__);
1100 return -ENODEV;
1101 }
1102
1103 timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
1104 if (!timer)
1105 return -ENOMEM;
1106
1107 timer->irq = platform_get_irq(pdev, 0);
1108 if (timer->irq < 0)
1109 return timer->irq;
1110
1111 timer->io_base = devm_platform_ioremap_resource(pdev, 0);
1112 if (IS_ERR(timer->io_base))
1113 return PTR_ERR(timer->io_base);
1114
1115 platform_set_drvdata(pdev, timer);
1116
1117 if (dev->of_node) {
1118 if (of_property_read_bool(dev->of_node, "ti,timer-alwon"))
1119 timer->capability |= OMAP_TIMER_ALWON;
1120 if (of_property_read_bool(dev->of_node, "ti,timer-dsp"))
1121 timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
1122 if (of_property_read_bool(dev->of_node, "ti,timer-pwm"))
1123 timer->capability |= OMAP_TIMER_HAS_PWM;
1124 if (of_property_read_bool(dev->of_node, "ti,timer-secure"))
1125 timer->capability |= OMAP_TIMER_SECURE;
1126 } else {
1127 timer->id = pdev->id;
1128 timer->capability = pdata->timer_capability;
1129 timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
1130 }
1131
1132 timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
1133
1134 /* OMAP1 devices do not yet use the clock framework for dmtimers */
1135 if (!timer->omap1) {
1136 timer->fclk = devm_clk_get(dev, "fck");
1137 if (IS_ERR(timer->fclk))
1138 return PTR_ERR(timer->fclk);
1139
1140 timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
1141 ret = devm_clk_notifier_register(dev, timer->fclk,
1142 &timer->fclk_nb);
1143 if (ret)
1144 return ret;
1145
1146 timer->fclk_rate = clk_get_rate(timer->fclk);
1147 } else {
1148 timer->fclk = ERR_PTR(-ENODEV);
1149 }
1150
1151 if (!(timer->capability & OMAP_TIMER_ALWON)) {
1152 timer->nb.notifier_call = omap_timer_context_notifier;
1153 cpu_pm_register_notifier(&timer->nb);
1154 }
1155
1156 timer->errata = pdata->timer_errata;
1157
1158 timer->pdev = pdev;
1159
1160 pm_runtime_enable(dev);
1161
1162 if (!timer->reserved) {
1163 ret = pm_runtime_resume_and_get(dev);
1164 if (ret) {
1165 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
1166 __func__);
1167 goto err_disable;
1168 }
1169 __omap_dm_timer_init_regs(timer);
1170
1171 /* Clear timer configuration */
1172 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, 0);
1173
1174 pm_runtime_put(dev);
1175 }
1176
1177 /* add the timer element to the list */
1178 spin_lock_irqsave(&dm_timer_lock, flags);
1179 list_add_tail(&timer->node, &omap_timer_list);
1180 spin_unlock_irqrestore(&dm_timer_lock, flags);
1181
1182 dev_dbg(dev, "Device Probed.\n");
1183
1184 return 0;
1185
1186err_disable:
1187 pm_runtime_disable(dev);
1188 return ret;
1189}
1190
1191/**
1192 * omap_dm_timer_remove - cleanup a registered timer device
1193 * @pdev: pointer to current timer platform device
1194 *
1195 * Called by driver framework whenever a timer device is unregistered.
1196 * In addition to freeing platform resources it also deletes the timer
1197 * entry from the local list.
1198 */
1199static void omap_dm_timer_remove(struct platform_device *pdev)
1200{
1201 struct dmtimer *timer;
1202 unsigned long flags;
1203 int ret = -EINVAL;
1204
1205 spin_lock_irqsave(&dm_timer_lock, flags);
1206 list_for_each_entry(timer, &omap_timer_list, node)
1207 if (!strcmp(dev_name(&timer->pdev->dev),
1208 dev_name(&pdev->dev))) {
1209 if (!(timer->capability & OMAP_TIMER_ALWON))
1210 cpu_pm_unregister_notifier(&timer->nb);
1211 list_del(&timer->node);
1212 ret = 0;
1213 break;
1214 }
1215 spin_unlock_irqrestore(&dm_timer_lock, flags);
1216
1217 pm_runtime_disable(&pdev->dev);
1218
1219 if (ret)
1220 dev_err(&pdev->dev, "Unable to determine timer entry in list of drivers on remove\n");
1221}
1222
1223static const struct omap_dm_timer_ops dmtimer_ops = {
1224 .request_by_node = omap_dm_timer_request_by_node,
1225 .request_specific = omap_dm_timer_request_specific,
1226 .request = omap_dm_timer_request,
1227 .set_source = omap_dm_timer_set_source,
1228 .get_irq = omap_dm_timer_get_irq,
1229 .set_int_enable = omap_dm_timer_set_int_enable,
1230 .set_int_disable = omap_dm_timer_set_int_disable,
1231 .free = omap_dm_timer_free,
1232 .enable = omap_dm_timer_enable,
1233 .disable = omap_dm_timer_disable,
1234 .get_fclk = omap_dm_timer_get_fclk,
1235 .start = omap_dm_timer_start,
1236 .stop = omap_dm_timer_stop,
1237 .set_load = omap_dm_timer_set_load,
1238 .set_match = omap_dm_timer_set_match,
1239 .set_pwm = omap_dm_timer_set_pwm,
1240 .get_pwm_status = omap_dm_timer_get_pwm_status,
1241 .set_prescaler = omap_dm_timer_set_prescaler,
1242 .read_counter = omap_dm_timer_read_counter,
1243 .write_counter = omap_dm_timer_write_counter,
1244 .read_status = omap_dm_timer_read_status,
1245 .write_status = omap_dm_timer_write_status,
1246};
1247
1248static const struct dmtimer_platform_data omap3plus_pdata = {
1249 .timer_errata = OMAP_TIMER_ERRATA_I103_I767,
1250 .timer_ops = &dmtimer_ops,
1251};
1252
1253static const struct dmtimer_platform_data am6_pdata = {
1254 .timer_ops = &dmtimer_ops,
1255};
1256
1257static const struct of_device_id omap_timer_match[] = {
1258 {
1259 .compatible = "ti,omap2420-timer",
1260 },
1261 {
1262 .compatible = "ti,omap3430-timer",
1263 .data = &omap3plus_pdata,
1264 },
1265 {
1266 .compatible = "ti,omap4430-timer",
1267 .data = &omap3plus_pdata,
1268 },
1269 {
1270 .compatible = "ti,omap5430-timer",
1271 .data = &omap3plus_pdata,
1272 },
1273 {
1274 .compatible = "ti,am335x-timer",
1275 .data = &omap3plus_pdata,
1276 },
1277 {
1278 .compatible = "ti,am335x-timer-1ms",
1279 .data = &omap3plus_pdata,
1280 },
1281 {
1282 .compatible = "ti,dm816-timer",
1283 .data = &omap3plus_pdata,
1284 },
1285 {
1286 .compatible = "ti,am654-timer",
1287 .data = &am6_pdata,
1288 },
1289 {},
1290};
1291MODULE_DEVICE_TABLE(of, omap_timer_match);
1292
1293static struct platform_driver omap_dm_timer_driver = {
1294 .probe = omap_dm_timer_probe,
1295 .remove_new = omap_dm_timer_remove,
1296 .driver = {
1297 .name = "omap_timer",
1298 .of_match_table = omap_timer_match,
1299 .pm = &omap_dm_timer_pm_ops,
1300 },
1301};
1302
1303module_platform_driver(omap_dm_timer_driver);
1304
1305MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
1306MODULE_AUTHOR("Texas Instruments Inc");
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/arch/arm/plat-omap/dmtimer.c
4 *
5 * OMAP Dual-Mode Timers
6 *
7 * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
8 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
9 * Thara Gopinath <thara@ti.com>
10 *
11 * dmtimer adaptation to platform_driver.
12 *
13 * Copyright (C) 2005 Nokia Corporation
14 * OMAP2 support by Juha Yrjola
15 * API improvements and OMAP2 clock framework support by Timo Teras
16 *
17 * Copyright (C) 2009 Texas Instruments
18 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
19 */
20
21#include <linux/clk.h>
22#include <linux/clk-provider.h>
23#include <linux/cpu_pm.h>
24#include <linux/module.h>
25#include <linux/io.h>
26#include <linux/device.h>
27#include <linux/err.h>
28#include <linux/pm_runtime.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/platform_device.h>
32#include <linux/platform_data/dmtimer-omap.h>
33
34#include <clocksource/timer-ti-dm.h>
35
36/*
37 * timer errata flags
38 *
39 * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
40 * errata prevents us from using posted mode on these devices, unless the
41 * timer counter register is never read. For more details please refer to
42 * the OMAP3/4/5 errata documents.
43 */
44#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
45
46/* posted mode types */
47#define OMAP_TIMER_NONPOSTED 0x00
48#define OMAP_TIMER_POSTED 0x01
49
50/* register offsets with the write pending bit encoded */
51#define WPSHIFT 16
52
53#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
54 | (WP_NONE << WPSHIFT))
55
56#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
57 | (WP_TCLR << WPSHIFT))
58
59#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
60 | (WP_TCRR << WPSHIFT))
61
62#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
63 | (WP_TLDR << WPSHIFT))
64
65#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
66 | (WP_TTGR << WPSHIFT))
67
68#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
69 | (WP_NONE << WPSHIFT))
70
71#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
72 | (WP_TMAR << WPSHIFT))
73
74#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
75 | (WP_NONE << WPSHIFT))
76
77#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
78 | (WP_NONE << WPSHIFT))
79
80#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
81 | (WP_NONE << WPSHIFT))
82
83#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
84 | (WP_TPIR << WPSHIFT))
85
86#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
87 | (WP_TNIR << WPSHIFT))
88
89#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
90 | (WP_TCVR << WPSHIFT))
91
92#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
93 (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
94
95#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
96 (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
97
98struct timer_regs {
99 u32 ocp_cfg;
100 u32 tidr;
101 u32 tier;
102 u32 twer;
103 u32 tclr;
104 u32 tcrr;
105 u32 tldr;
106 u32 ttrg;
107 u32 twps;
108 u32 tmar;
109 u32 tcar1;
110 u32 tsicr;
111 u32 tcar2;
112 u32 tpir;
113 u32 tnir;
114 u32 tcvr;
115 u32 tocr;
116 u32 towr;
117};
118
119struct dmtimer {
120 struct omap_dm_timer cookie;
121 int id;
122 int irq;
123 struct clk *fclk;
124
125 void __iomem *io_base;
126 int irq_stat; /* TISR/IRQSTATUS interrupt status */
127 int irq_ena; /* irq enable */
128 int irq_dis; /* irq disable, only on v2 ip */
129 void __iomem *pend; /* write pending */
130 void __iomem *func_base; /* function register base */
131
132 atomic_t enabled;
133 unsigned long rate;
134 unsigned reserved:1;
135 unsigned posted:1;
136 unsigned omap1:1;
137 struct timer_regs context;
138 int revision;
139 u32 capability;
140 u32 errata;
141 struct platform_device *pdev;
142 struct list_head node;
143 struct notifier_block nb;
144};
145
146static u32 omap_reserved_systimers;
147static LIST_HEAD(omap_timer_list);
148static DEFINE_SPINLOCK(dm_timer_lock);
149
150enum {
151 REQUEST_ANY = 0,
152 REQUEST_BY_ID,
153 REQUEST_BY_CAP,
154 REQUEST_BY_NODE,
155};
156
157/**
158 * dmtimer_read - read timer registers in posted and non-posted mode
159 * @timer: timer pointer over which read operation to perform
160 * @reg: lowest byte holds the register offset
161 *
162 * The posted mode bit is encoded in reg. Note that in posted mode, write
163 * pending bit must be checked. Otherwise a read of a non completed write
164 * will produce an error.
165 */
166static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
167{
168 u16 wp, offset;
169
170 wp = reg >> WPSHIFT;
171 offset = reg & 0xff;
172
173 /* Wait for a possible write pending bit in posted mode */
174 if (wp && timer->posted)
175 while (readl_relaxed(timer->pend) & wp)
176 cpu_relax();
177
178 return readl_relaxed(timer->func_base + offset);
179}
180
181/**
182 * dmtimer_write - write timer registers in posted and non-posted mode
183 * @timer: timer pointer over which write operation is to perform
184 * @reg: lowest byte holds the register offset
185 * @value: data to write into the register
186 *
187 * The posted mode bit is encoded in reg. Note that in posted mode, the write
188 * pending bit must be checked. Otherwise a write on a register which has a
189 * pending write will be lost.
190 */
191static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
192{
193 u16 wp, offset;
194
195 wp = reg >> WPSHIFT;
196 offset = reg & 0xff;
197
198 /* Wait for a possible write pending bit in posted mode */
199 if (wp && timer->posted)
200 while (readl_relaxed(timer->pend) & wp)
201 cpu_relax();
202
203 writel_relaxed(val, timer->func_base + offset);
204}
205
206static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
207{
208 u32 tidr;
209
210 /* Assume v1 ip if bits [31:16] are zero */
211 tidr = readl_relaxed(timer->io_base);
212 if (!(tidr >> 16)) {
213 timer->revision = 1;
214 timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
215 timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
216 timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
217 timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
218 timer->func_base = timer->io_base;
219 } else {
220 timer->revision = 2;
221 timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
222 timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
223 timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
224 timer->pend = timer->io_base +
225 _OMAP_TIMER_WRITE_PEND_OFFSET +
226 OMAP_TIMER_V2_FUNC_OFFSET;
227 timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
228 }
229}
230
231/*
232 * __omap_dm_timer_enable_posted - enables write posted mode
233 * @timer: pointer to timer instance handle
234 *
235 * Enables the write posted mode for the timer. When posted mode is enabled
236 * writes to certain timer registers are immediately acknowledged by the
237 * internal bus and hence prevents stalling the CPU waiting for the write to
238 * complete. Enabling this feature can improve performance for writing to the
239 * timer registers.
240 */
241static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
242{
243 if (timer->posted)
244 return;
245
246 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
247 timer->posted = OMAP_TIMER_NONPOSTED;
248 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
249 return;
250 }
251
252 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
253 timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
254 timer->posted = OMAP_TIMER_POSTED;
255}
256
257static inline void __omap_dm_timer_stop(struct dmtimer *timer,
258 unsigned long rate)
259{
260 u32 l;
261
262 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
263 if (l & OMAP_TIMER_CTRL_ST) {
264 l &= ~0x1;
265 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
266#ifdef CONFIG_ARCH_OMAP2PLUS
267 /* Readback to make sure write has completed */
268 dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
269 /*
270 * Wait for functional clock period x 3.5 to make sure that
271 * timer is stopped
272 */
273 udelay(3500000 / rate + 1);
274#endif
275 }
276
277 /* Ack possibly pending interrupt */
278 dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
279}
280
281static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
282 unsigned int value)
283{
284 dmtimer_write(timer, timer->irq_ena, value);
285 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
286}
287
288static inline unsigned int
289__omap_dm_timer_read_counter(struct dmtimer *timer)
290{
291 return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
292}
293
294static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
295 unsigned int value)
296{
297 dmtimer_write(timer, timer->irq_stat, value);
298}
299
300static void omap_timer_restore_context(struct dmtimer *timer)
301{
302 dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
303
304 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
305 dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
306 dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
307 dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
308 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
309 dmtimer_write(timer, timer->irq_ena, timer->context.tier);
310 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
311}
312
313static void omap_timer_save_context(struct dmtimer *timer)
314{
315 timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
316
317 timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
318 timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
319 timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
320 timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
321 timer->context.tier = dmtimer_read(timer, timer->irq_ena);
322 timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
323}
324
325static int omap_timer_context_notifier(struct notifier_block *nb,
326 unsigned long cmd, void *v)
327{
328 struct dmtimer *timer;
329
330 timer = container_of(nb, struct dmtimer, nb);
331
332 switch (cmd) {
333 case CPU_CLUSTER_PM_ENTER:
334 if ((timer->capability & OMAP_TIMER_ALWON) ||
335 !atomic_read(&timer->enabled))
336 break;
337 omap_timer_save_context(timer);
338 break;
339 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
340 break;
341 case CPU_CLUSTER_PM_EXIT:
342 if ((timer->capability & OMAP_TIMER_ALWON) ||
343 !atomic_read(&timer->enabled))
344 break;
345 omap_timer_restore_context(timer);
346 break;
347 }
348
349 return NOTIFY_OK;
350}
351
352static int omap_dm_timer_reset(struct dmtimer *timer)
353{
354 u32 l, timeout = 100000;
355
356 if (timer->revision != 1)
357 return -EINVAL;
358
359 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
360
361 do {
362 l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
363 } while (!l && timeout--);
364
365 if (!timeout) {
366 dev_err(&timer->pdev->dev, "Timer failed to reset\n");
367 return -ETIMEDOUT;
368 }
369
370 /* Configure timer for smart-idle mode */
371 l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
372 l |= 0x2 << 0x3;
373 dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
374
375 timer->posted = 0;
376
377 return 0;
378}
379
380/*
381 * Functions exposed to PWM and remoteproc drivers via platform_data.
382 * Do not use these in the driver, these will get deprecated and will
383 * will be replaced by Linux generic framework functions such as
384 * chained interrupts and clock framework.
385 */
386static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
387{
388 if (!cookie)
389 return NULL;
390
391 return container_of(cookie, struct dmtimer, cookie);
392}
393
394static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
395{
396 int ret;
397 const char *parent_name;
398 struct clk *parent;
399 struct dmtimer_platform_data *pdata;
400 struct dmtimer *timer;
401
402 timer = to_dmtimer(cookie);
403 if (unlikely(!timer) || IS_ERR(timer->fclk))
404 return -EINVAL;
405
406 switch (source) {
407 case OMAP_TIMER_SRC_SYS_CLK:
408 parent_name = "timer_sys_ck";
409 break;
410 case OMAP_TIMER_SRC_32_KHZ:
411 parent_name = "timer_32k_ck";
412 break;
413 case OMAP_TIMER_SRC_EXT_CLK:
414 parent_name = "timer_ext_ck";
415 break;
416 default:
417 return -EINVAL;
418 }
419
420 pdata = timer->pdev->dev.platform_data;
421
422 /*
423 * FIXME: Used for OMAP1 devices only because they do not currently
424 * use the clock framework to set the parent clock. To be removed
425 * once OMAP1 migrated to using clock framework for dmtimers
426 */
427 if (timer->omap1 && pdata && pdata->set_timer_src)
428 return pdata->set_timer_src(timer->pdev, source);
429
430#if defined(CONFIG_COMMON_CLK)
431 /* Check if the clock has configurable parents */
432 if (clk_hw_get_num_parents(__clk_get_hw(timer->fclk)) < 2)
433 return 0;
434#endif
435
436 parent = clk_get(&timer->pdev->dev, parent_name);
437 if (IS_ERR(parent)) {
438 pr_err("%s: %s not found\n", __func__, parent_name);
439 return -EINVAL;
440 }
441
442 ret = clk_set_parent(timer->fclk, parent);
443 if (ret < 0)
444 pr_err("%s: failed to set %s as parent\n", __func__,
445 parent_name);
446
447 clk_put(parent);
448
449 return ret;
450}
451
452static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
453{
454 struct dmtimer *timer = to_dmtimer(cookie);
455 struct device *dev = &timer->pdev->dev;
456 int rc;
457
458 rc = pm_runtime_resume_and_get(dev);
459 if (rc)
460 dev_err(dev, "could not enable timer\n");
461}
462
463static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
464{
465 struct dmtimer *timer = to_dmtimer(cookie);
466 struct device *dev = &timer->pdev->dev;
467
468 pm_runtime_put_sync(dev);
469}
470
471static int omap_dm_timer_prepare(struct dmtimer *timer)
472{
473 struct device *dev = &timer->pdev->dev;
474 int rc;
475
476 rc = pm_runtime_resume_and_get(dev);
477 if (rc)
478 return rc;
479
480 if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
481 rc = omap_dm_timer_reset(timer);
482 if (rc) {
483 pm_runtime_put_sync(dev);
484 return rc;
485 }
486 }
487
488 __omap_dm_timer_enable_posted(timer);
489 pm_runtime_put_sync(dev);
490
491 return 0;
492}
493
494static inline u32 omap_dm_timer_reserved_systimer(int id)
495{
496 return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
497}
498
499static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
500{
501 struct dmtimer *timer = NULL, *t;
502 struct device_node *np = NULL;
503 unsigned long flags;
504 u32 cap = 0;
505 int id = 0;
506
507 switch (req_type) {
508 case REQUEST_BY_ID:
509 id = *(int *)data;
510 break;
511 case REQUEST_BY_CAP:
512 cap = *(u32 *)data;
513 break;
514 case REQUEST_BY_NODE:
515 np = (struct device_node *)data;
516 break;
517 default:
518 /* REQUEST_ANY */
519 break;
520 }
521
522 spin_lock_irqsave(&dm_timer_lock, flags);
523 list_for_each_entry(t, &omap_timer_list, node) {
524 if (t->reserved)
525 continue;
526
527 switch (req_type) {
528 case REQUEST_BY_ID:
529 if (id == t->pdev->id) {
530 timer = t;
531 timer->reserved = 1;
532 goto found;
533 }
534 break;
535 case REQUEST_BY_CAP:
536 if (cap == (t->capability & cap)) {
537 /*
538 * If timer is not NULL, we have already found
539 * one timer. But it was not an exact match
540 * because it had more capabilities than what
541 * was required. Therefore, unreserve the last
542 * timer found and see if this one is a better
543 * match.
544 */
545 if (timer)
546 timer->reserved = 0;
547 timer = t;
548 timer->reserved = 1;
549
550 /* Exit loop early if we find an exact match */
551 if (t->capability == cap)
552 goto found;
553 }
554 break;
555 case REQUEST_BY_NODE:
556 if (np == t->pdev->dev.of_node) {
557 timer = t;
558 timer->reserved = 1;
559 goto found;
560 }
561 break;
562 default:
563 /* REQUEST_ANY */
564 timer = t;
565 timer->reserved = 1;
566 goto found;
567 }
568 }
569found:
570 spin_unlock_irqrestore(&dm_timer_lock, flags);
571
572 if (timer && omap_dm_timer_prepare(timer)) {
573 timer->reserved = 0;
574 timer = NULL;
575 }
576
577 if (!timer)
578 pr_debug("%s: timer request failed!\n", __func__);
579
580 return timer;
581}
582
583static struct omap_dm_timer *omap_dm_timer_request(void)
584{
585 struct dmtimer *timer;
586
587 timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
588 if (!timer)
589 return NULL;
590
591 return &timer->cookie;
592}
593
594static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
595{
596 struct dmtimer *timer;
597
598 /* Requesting timer by ID is not supported when device tree is used */
599 if (of_have_populated_dt()) {
600 pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
601 __func__);
602 return NULL;
603 }
604
605 timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
606 if (!timer)
607 return NULL;
608
609 return &timer->cookie;
610}
611
612/**
613 * omap_dm_timer_request_by_node - Request a timer by device-tree node
614 * @np: Pointer to device-tree timer node
615 *
616 * Request a timer based upon a device node pointer. Returns pointer to
617 * timer handle on success and a NULL pointer on failure.
618 */
619static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
620{
621 struct dmtimer *timer;
622
623 if (!np)
624 return NULL;
625
626 timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
627 if (!timer)
628 return NULL;
629
630 return &timer->cookie;
631}
632
633static int omap_dm_timer_free(struct omap_dm_timer *cookie)
634{
635 struct dmtimer *timer;
636 struct device *dev;
637 int rc;
638
639 timer = to_dmtimer(cookie);
640 if (unlikely(!timer))
641 return -EINVAL;
642
643 WARN_ON(!timer->reserved);
644 timer->reserved = 0;
645
646 dev = &timer->pdev->dev;
647 rc = pm_runtime_resume_and_get(dev);
648 if (rc)
649 return rc;
650
651 /* Clear timer configuration */
652 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, 0);
653
654 pm_runtime_put_sync(dev);
655
656 return 0;
657}
658
659static int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
660{
661 struct dmtimer *timer = to_dmtimer(cookie);
662 if (timer)
663 return timer->irq;
664 return -EINVAL;
665}
666
667#if defined(CONFIG_ARCH_OMAP1)
668#include <linux/soc/ti/omap1-io.h>
669
670static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
671{
672 return NULL;
673}
674
675/**
676 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
677 * @inputmask: current value of idlect mask
678 */
679__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
680{
681 int i = 0;
682 struct dmtimer *timer = NULL;
683 unsigned long flags;
684
685 /* If ARMXOR cannot be idled this function call is unnecessary */
686 if (!(inputmask & (1 << 1)))
687 return inputmask;
688
689 /* If any active timer is using ARMXOR return modified mask */
690 spin_lock_irqsave(&dm_timer_lock, flags);
691 list_for_each_entry(timer, &omap_timer_list, node) {
692 u32 l;
693
694 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
695 if (l & OMAP_TIMER_CTRL_ST) {
696 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
697 inputmask &= ~(1 << 1);
698 else
699 inputmask &= ~(1 << 2);
700 }
701 i++;
702 }
703 spin_unlock_irqrestore(&dm_timer_lock, flags);
704
705 return inputmask;
706}
707
708#else
709
710static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
711{
712 struct dmtimer *timer = to_dmtimer(cookie);
713
714 if (timer && !IS_ERR(timer->fclk))
715 return timer->fclk;
716 return NULL;
717}
718
719__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
720{
721 BUG();
722
723 return 0;
724}
725
726#endif
727
728static int omap_dm_timer_start(struct omap_dm_timer *cookie)
729{
730 struct dmtimer *timer;
731 struct device *dev;
732 int rc;
733 u32 l;
734
735 timer = to_dmtimer(cookie);
736 if (unlikely(!timer))
737 return -EINVAL;
738
739 dev = &timer->pdev->dev;
740
741 rc = pm_runtime_resume_and_get(dev);
742 if (rc)
743 return rc;
744
745 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
746 if (!(l & OMAP_TIMER_CTRL_ST)) {
747 l |= OMAP_TIMER_CTRL_ST;
748 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
749 }
750
751 return 0;
752}
753
754static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
755{
756 struct dmtimer *timer;
757 struct device *dev;
758 unsigned long rate = 0;
759
760 timer = to_dmtimer(cookie);
761 if (unlikely(!timer))
762 return -EINVAL;
763
764 dev = &timer->pdev->dev;
765
766 if (!timer->omap1)
767 rate = clk_get_rate(timer->fclk);
768
769 __omap_dm_timer_stop(timer, rate);
770
771 pm_runtime_put_sync(dev);
772
773 return 0;
774}
775
776static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
777 unsigned int load)
778{
779 struct dmtimer *timer;
780 struct device *dev;
781 int rc;
782
783 timer = to_dmtimer(cookie);
784 if (unlikely(!timer))
785 return -EINVAL;
786
787 dev = &timer->pdev->dev;
788 rc = pm_runtime_resume_and_get(dev);
789 if (rc)
790 return rc;
791
792 dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
793
794 pm_runtime_put_sync(dev);
795
796 return 0;
797}
798
799static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
800 unsigned int match)
801{
802 struct dmtimer *timer;
803 struct device *dev;
804 int rc;
805 u32 l;
806
807 timer = to_dmtimer(cookie);
808 if (unlikely(!timer))
809 return -EINVAL;
810
811 dev = &timer->pdev->dev;
812 rc = pm_runtime_resume_and_get(dev);
813 if (rc)
814 return rc;
815
816 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
817 if (enable)
818 l |= OMAP_TIMER_CTRL_CE;
819 else
820 l &= ~OMAP_TIMER_CTRL_CE;
821 dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
822 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
823
824 pm_runtime_put_sync(dev);
825
826 return 0;
827}
828
829static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
830 int toggle, int trigger, int autoreload)
831{
832 struct dmtimer *timer;
833 struct device *dev;
834 int rc;
835 u32 l;
836
837 timer = to_dmtimer(cookie);
838 if (unlikely(!timer))
839 return -EINVAL;
840
841 dev = &timer->pdev->dev;
842 rc = pm_runtime_resume_and_get(dev);
843 if (rc)
844 return rc;
845
846 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
847 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
848 OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
849 if (def_on)
850 l |= OMAP_TIMER_CTRL_SCPWM;
851 if (toggle)
852 l |= OMAP_TIMER_CTRL_PT;
853 l |= trigger << 10;
854 if (autoreload)
855 l |= OMAP_TIMER_CTRL_AR;
856 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
857
858 pm_runtime_put_sync(dev);
859
860 return 0;
861}
862
863static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
864{
865 struct dmtimer *timer;
866 struct device *dev;
867 int rc;
868 u32 l;
869
870 timer = to_dmtimer(cookie);
871 if (unlikely(!timer))
872 return -EINVAL;
873
874 dev = &timer->pdev->dev;
875 rc = pm_runtime_resume_and_get(dev);
876 if (rc)
877 return rc;
878
879 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
880
881 pm_runtime_put_sync(dev);
882
883 return l;
884}
885
886static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
887 int prescaler)
888{
889 struct dmtimer *timer;
890 struct device *dev;
891 int rc;
892 u32 l;
893
894 timer = to_dmtimer(cookie);
895 if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
896 return -EINVAL;
897
898 dev = &timer->pdev->dev;
899 rc = pm_runtime_resume_and_get(dev);
900 if (rc)
901 return rc;
902
903 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
904 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
905 if (prescaler >= 0) {
906 l |= OMAP_TIMER_CTRL_PRE;
907 l |= prescaler << 2;
908 }
909 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
910
911 pm_runtime_put_sync(dev);
912
913 return 0;
914}
915
916static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
917 unsigned int value)
918{
919 struct dmtimer *timer;
920 struct device *dev;
921 int rc;
922
923 timer = to_dmtimer(cookie);
924 if (unlikely(!timer))
925 return -EINVAL;
926
927 dev = &timer->pdev->dev;
928 rc = pm_runtime_resume_and_get(dev);
929 if (rc)
930 return rc;
931
932 __omap_dm_timer_int_enable(timer, value);
933
934 pm_runtime_put_sync(dev);
935
936 return 0;
937}
938
939/**
940 * omap_dm_timer_set_int_disable - disable timer interrupts
941 * @timer: pointer to timer handle
942 * @mask: bit mask of interrupts to be disabled
943 *
944 * Disables the specified timer interrupts for a timer.
945 */
946static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
947{
948 struct dmtimer *timer;
949 struct device *dev;
950 u32 l = mask;
951 int rc;
952
953 timer = to_dmtimer(cookie);
954 if (unlikely(!timer))
955 return -EINVAL;
956
957 dev = &timer->pdev->dev;
958 rc = pm_runtime_resume_and_get(dev);
959 if (rc)
960 return rc;
961
962 if (timer->revision == 1)
963 l = dmtimer_read(timer, timer->irq_ena) & ~mask;
964
965 dmtimer_write(timer, timer->irq_dis, l);
966 l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
967 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
968
969 pm_runtime_put_sync(dev);
970
971 return 0;
972}
973
974static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
975{
976 struct dmtimer *timer;
977 unsigned int l;
978
979 timer = to_dmtimer(cookie);
980 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
981 pr_err("%s: timer not available or enabled.\n", __func__);
982 return 0;
983 }
984
985 l = dmtimer_read(timer, timer->irq_stat);
986
987 return l;
988}
989
990static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
991{
992 struct dmtimer *timer;
993
994 timer = to_dmtimer(cookie);
995 if (unlikely(!timer || !atomic_read(&timer->enabled)))
996 return -EINVAL;
997
998 __omap_dm_timer_write_status(timer, value);
999
1000 return 0;
1001}
1002
1003static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
1004{
1005 struct dmtimer *timer;
1006
1007 timer = to_dmtimer(cookie);
1008 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
1009 pr_err("%s: timer not iavailable or enabled.\n", __func__);
1010 return 0;
1011 }
1012
1013 return __omap_dm_timer_read_counter(timer);
1014}
1015
1016static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
1017{
1018 struct dmtimer *timer;
1019
1020 timer = to_dmtimer(cookie);
1021 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
1022 pr_err("%s: timer not available or enabled.\n", __func__);
1023 return -EINVAL;
1024 }
1025
1026 dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
1027
1028 /* Save the context */
1029 timer->context.tcrr = value;
1030 return 0;
1031}
1032
1033static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
1034{
1035 struct dmtimer *timer = dev_get_drvdata(dev);
1036
1037 atomic_set(&timer->enabled, 0);
1038
1039 if (timer->capability & OMAP_TIMER_ALWON || !timer->func_base)
1040 return 0;
1041
1042 omap_timer_save_context(timer);
1043
1044 return 0;
1045}
1046
1047static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
1048{
1049 struct dmtimer *timer = dev_get_drvdata(dev);
1050
1051 if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
1052 omap_timer_restore_context(timer);
1053
1054 atomic_set(&timer->enabled, 1);
1055
1056 return 0;
1057}
1058
1059static const struct dev_pm_ops omap_dm_timer_pm_ops = {
1060 SET_RUNTIME_PM_OPS(omap_dm_timer_runtime_suspend,
1061 omap_dm_timer_runtime_resume, NULL)
1062};
1063
1064static const struct of_device_id omap_timer_match[];
1065
1066/**
1067 * omap_dm_timer_probe - probe function called for every registered device
1068 * @pdev: pointer to current timer platform device
1069 *
1070 * Called by driver framework at the end of device registration for all
1071 * timer devices.
1072 */
1073static int omap_dm_timer_probe(struct platform_device *pdev)
1074{
1075 unsigned long flags;
1076 struct dmtimer *timer;
1077 struct device *dev = &pdev->dev;
1078 const struct dmtimer_platform_data *pdata;
1079 int ret;
1080
1081 pdata = of_device_get_match_data(dev);
1082 if (!pdata)
1083 pdata = dev_get_platdata(dev);
1084 else
1085 dev->platform_data = (void *)pdata;
1086
1087 if (!pdata) {
1088 dev_err(dev, "%s: no platform data.\n", __func__);
1089 return -ENODEV;
1090 }
1091
1092 timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
1093 if (!timer)
1094 return -ENOMEM;
1095
1096 timer->irq = platform_get_irq(pdev, 0);
1097 if (timer->irq < 0)
1098 return timer->irq;
1099
1100 timer->io_base = devm_platform_ioremap_resource(pdev, 0);
1101 if (IS_ERR(timer->io_base))
1102 return PTR_ERR(timer->io_base);
1103
1104 platform_set_drvdata(pdev, timer);
1105
1106 if (dev->of_node) {
1107 if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
1108 timer->capability |= OMAP_TIMER_ALWON;
1109 if (of_find_property(dev->of_node, "ti,timer-dsp", NULL))
1110 timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
1111 if (of_find_property(dev->of_node, "ti,timer-pwm", NULL))
1112 timer->capability |= OMAP_TIMER_HAS_PWM;
1113 if (of_find_property(dev->of_node, "ti,timer-secure", NULL))
1114 timer->capability |= OMAP_TIMER_SECURE;
1115 } else {
1116 timer->id = pdev->id;
1117 timer->capability = pdata->timer_capability;
1118 timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
1119 }
1120
1121 timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
1122
1123 /* OMAP1 devices do not yet use the clock framework for dmtimers */
1124 if (!timer->omap1) {
1125 timer->fclk = devm_clk_get(dev, "fck");
1126 if (IS_ERR(timer->fclk))
1127 return PTR_ERR(timer->fclk);
1128 } else {
1129 timer->fclk = ERR_PTR(-ENODEV);
1130 }
1131
1132 if (!(timer->capability & OMAP_TIMER_ALWON)) {
1133 timer->nb.notifier_call = omap_timer_context_notifier;
1134 cpu_pm_register_notifier(&timer->nb);
1135 }
1136
1137 timer->errata = pdata->timer_errata;
1138
1139 timer->pdev = pdev;
1140
1141 pm_runtime_enable(dev);
1142
1143 if (!timer->reserved) {
1144 ret = pm_runtime_resume_and_get(dev);
1145 if (ret) {
1146 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
1147 __func__);
1148 goto err_disable;
1149 }
1150 __omap_dm_timer_init_regs(timer);
1151
1152 /* Clear timer configuration */
1153 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, 0);
1154
1155 pm_runtime_put(dev);
1156 }
1157
1158 /* add the timer element to the list */
1159 spin_lock_irqsave(&dm_timer_lock, flags);
1160 list_add_tail(&timer->node, &omap_timer_list);
1161 spin_unlock_irqrestore(&dm_timer_lock, flags);
1162
1163 dev_dbg(dev, "Device Probed.\n");
1164
1165 return 0;
1166
1167err_disable:
1168 pm_runtime_disable(dev);
1169 return ret;
1170}
1171
1172/**
1173 * omap_dm_timer_remove - cleanup a registered timer device
1174 * @pdev: pointer to current timer platform device
1175 *
1176 * Called by driver framework whenever a timer device is unregistered.
1177 * In addition to freeing platform resources it also deletes the timer
1178 * entry from the local list.
1179 */
1180static int omap_dm_timer_remove(struct platform_device *pdev)
1181{
1182 struct dmtimer *timer;
1183 unsigned long flags;
1184 int ret = -EINVAL;
1185
1186 spin_lock_irqsave(&dm_timer_lock, flags);
1187 list_for_each_entry(timer, &omap_timer_list, node)
1188 if (!strcmp(dev_name(&timer->pdev->dev),
1189 dev_name(&pdev->dev))) {
1190 if (!(timer->capability & OMAP_TIMER_ALWON))
1191 cpu_pm_unregister_notifier(&timer->nb);
1192 list_del(&timer->node);
1193 ret = 0;
1194 break;
1195 }
1196 spin_unlock_irqrestore(&dm_timer_lock, flags);
1197
1198 pm_runtime_disable(&pdev->dev);
1199
1200 return ret;
1201}
1202
1203static const struct omap_dm_timer_ops dmtimer_ops = {
1204 .request_by_node = omap_dm_timer_request_by_node,
1205 .request_specific = omap_dm_timer_request_specific,
1206 .request = omap_dm_timer_request,
1207 .set_source = omap_dm_timer_set_source,
1208 .get_irq = omap_dm_timer_get_irq,
1209 .set_int_enable = omap_dm_timer_set_int_enable,
1210 .set_int_disable = omap_dm_timer_set_int_disable,
1211 .free = omap_dm_timer_free,
1212 .enable = omap_dm_timer_enable,
1213 .disable = omap_dm_timer_disable,
1214 .get_fclk = omap_dm_timer_get_fclk,
1215 .start = omap_dm_timer_start,
1216 .stop = omap_dm_timer_stop,
1217 .set_load = omap_dm_timer_set_load,
1218 .set_match = omap_dm_timer_set_match,
1219 .set_pwm = omap_dm_timer_set_pwm,
1220 .get_pwm_status = omap_dm_timer_get_pwm_status,
1221 .set_prescaler = omap_dm_timer_set_prescaler,
1222 .read_counter = omap_dm_timer_read_counter,
1223 .write_counter = omap_dm_timer_write_counter,
1224 .read_status = omap_dm_timer_read_status,
1225 .write_status = omap_dm_timer_write_status,
1226};
1227
1228static const struct dmtimer_platform_data omap3plus_pdata = {
1229 .timer_errata = OMAP_TIMER_ERRATA_I103_I767,
1230 .timer_ops = &dmtimer_ops,
1231};
1232
1233static const struct dmtimer_platform_data am6_pdata = {
1234 .timer_ops = &dmtimer_ops,
1235};
1236
1237static const struct of_device_id omap_timer_match[] = {
1238 {
1239 .compatible = "ti,omap2420-timer",
1240 },
1241 {
1242 .compatible = "ti,omap3430-timer",
1243 .data = &omap3plus_pdata,
1244 },
1245 {
1246 .compatible = "ti,omap4430-timer",
1247 .data = &omap3plus_pdata,
1248 },
1249 {
1250 .compatible = "ti,omap5430-timer",
1251 .data = &omap3plus_pdata,
1252 },
1253 {
1254 .compatible = "ti,am335x-timer",
1255 .data = &omap3plus_pdata,
1256 },
1257 {
1258 .compatible = "ti,am335x-timer-1ms",
1259 .data = &omap3plus_pdata,
1260 },
1261 {
1262 .compatible = "ti,dm816-timer",
1263 .data = &omap3plus_pdata,
1264 },
1265 {
1266 .compatible = "ti,am654-timer",
1267 .data = &am6_pdata,
1268 },
1269 {},
1270};
1271MODULE_DEVICE_TABLE(of, omap_timer_match);
1272
1273static struct platform_driver omap_dm_timer_driver = {
1274 .probe = omap_dm_timer_probe,
1275 .remove = omap_dm_timer_remove,
1276 .driver = {
1277 .name = "omap_timer",
1278 .of_match_table = omap_timer_match,
1279 .pm = &omap_dm_timer_pm_ops,
1280 },
1281};
1282
1283module_platform_driver(omap_dm_timer_driver);
1284
1285MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
1286MODULE_LICENSE("GPL");
1287MODULE_AUTHOR("Texas Instruments Inc");