Loading...
Note: File does not exist in v6.13.7.
1/*
2 * DaVinci timer subsystem
3 *
4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/err.h>
20#include <linux/platform_device.h>
21#include <linux/sched_clock.h>
22
23#include <asm/mach/irq.h>
24#include <asm/mach/time.h>
25
26#include <mach/cputype.h>
27#include <mach/hardware.h>
28#include <mach/time.h>
29
30#include "clock.h"
31
32static struct clock_event_device clockevent_davinci;
33static unsigned int davinci_clock_tick_rate;
34
35/*
36 * This driver configures the 2 64-bit count-up timers as 4 independent
37 * 32-bit count-up timers used as follows:
38 */
39
40enum {
41 TID_CLOCKEVENT,
42 TID_CLOCKSOURCE,
43};
44
45/* Timer register offsets */
46#define PID12 0x0
47#define TIM12 0x10
48#define TIM34 0x14
49#define PRD12 0x18
50#define PRD34 0x1c
51#define TCR 0x20
52#define TGCR 0x24
53#define WDTCR 0x28
54
55/* Offsets of the 8 compare registers */
56#define CMP12_0 0x60
57#define CMP12_1 0x64
58#define CMP12_2 0x68
59#define CMP12_3 0x6c
60#define CMP12_4 0x70
61#define CMP12_5 0x74
62#define CMP12_6 0x78
63#define CMP12_7 0x7c
64
65/* Timer register bitfields */
66#define TCR_ENAMODE_DISABLE 0x0
67#define TCR_ENAMODE_ONESHOT 0x1
68#define TCR_ENAMODE_PERIODIC 0x2
69#define TCR_ENAMODE_MASK 0x3
70
71#define TGCR_TIMMODE_SHIFT 2
72#define TGCR_TIMMODE_64BIT_GP 0x0
73#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
74#define TGCR_TIMMODE_64BIT_WDOG 0x2
75#define TGCR_TIMMODE_32BIT_CHAINED 0x3
76
77#define TGCR_TIM12RS_SHIFT 0
78#define TGCR_TIM34RS_SHIFT 1
79#define TGCR_RESET 0x0
80#define TGCR_UNRESET 0x1
81#define TGCR_RESET_MASK 0x3
82
83struct timer_s {
84 char *name;
85 unsigned int id;
86 unsigned long period;
87 unsigned long opts;
88 unsigned long flags;
89 void __iomem *base;
90 unsigned long tim_off;
91 unsigned long prd_off;
92 unsigned long enamode_shift;
93 struct irqaction irqaction;
94};
95static struct timer_s timers[];
96
97/* values for 'opts' field of struct timer_s */
98#define TIMER_OPTS_DISABLED 0x01
99#define TIMER_OPTS_ONESHOT 0x02
100#define TIMER_OPTS_PERIODIC 0x04
101#define TIMER_OPTS_STATE_MASK 0x07
102
103#define TIMER_OPTS_USE_COMPARE 0x80000000
104#define USING_COMPARE(t) ((t)->opts & TIMER_OPTS_USE_COMPARE)
105
106static char *id_to_name[] = {
107 [T0_BOT] = "timer0_0",
108 [T0_TOP] = "timer0_1",
109 [T1_BOT] = "timer1_0",
110 [T1_TOP] = "timer1_1",
111};
112
113static int timer32_config(struct timer_s *t)
114{
115 u32 tcr;
116 struct davinci_soc_info *soc_info = &davinci_soc_info;
117
118 if (USING_COMPARE(t)) {
119 struct davinci_timer_instance *dtip =
120 soc_info->timer_info->timers;
121 int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
122
123 /*
124 * Next interrupt should be the current time reg value plus
125 * the new period (using 32-bit unsigned addition/wrapping
126 * to 0 on overflow). This assumes that the clocksource
127 * is setup to count to 2^32-1 before wrapping around to 0.
128 */
129 __raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
130 t->base + dtip[event_timer].cmp_off);
131 } else {
132 tcr = __raw_readl(t->base + TCR);
133
134 /* disable timer */
135 tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
136 __raw_writel(tcr, t->base + TCR);
137
138 /* reset counter to zero, set new period */
139 __raw_writel(0, t->base + t->tim_off);
140 __raw_writel(t->period, t->base + t->prd_off);
141
142 /* Set enable mode */
143 if (t->opts & TIMER_OPTS_ONESHOT)
144 tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
145 else if (t->opts & TIMER_OPTS_PERIODIC)
146 tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
147
148 __raw_writel(tcr, t->base + TCR);
149 }
150 return 0;
151}
152
153static inline u32 timer32_read(struct timer_s *t)
154{
155 return __raw_readl(t->base + t->tim_off);
156}
157
158static irqreturn_t timer_interrupt(int irq, void *dev_id)
159{
160 struct clock_event_device *evt = &clockevent_davinci;
161
162 evt->event_handler(evt);
163 return IRQ_HANDLED;
164}
165
166/* called when 32-bit counter wraps */
167static irqreturn_t freerun_interrupt(int irq, void *dev_id)
168{
169 return IRQ_HANDLED;
170}
171
172static struct timer_s timers[] = {
173 [TID_CLOCKEVENT] = {
174 .name = "clockevent",
175 .opts = TIMER_OPTS_DISABLED,
176 .irqaction = {
177 .flags = IRQF_TIMER,
178 .handler = timer_interrupt,
179 }
180 },
181 [TID_CLOCKSOURCE] = {
182 .name = "free-run counter",
183 .period = ~0,
184 .opts = TIMER_OPTS_PERIODIC,
185 .irqaction = {
186 .flags = IRQF_TIMER,
187 .handler = freerun_interrupt,
188 }
189 },
190};
191
192static void __init timer_init(void)
193{
194 struct davinci_soc_info *soc_info = &davinci_soc_info;
195 struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
196 void __iomem *base[2];
197 int i;
198
199 /* Global init of each 64-bit timer as a whole */
200 for(i=0; i<2; i++) {
201 u32 tgcr;
202
203 base[i] = ioremap(dtip[i].base, SZ_4K);
204 if (WARN_ON(!base[i]))
205 continue;
206
207 /* Disabled, Internal clock source */
208 __raw_writel(0, base[i] + TCR);
209
210 /* reset both timers, no pre-scaler for timer34 */
211 tgcr = 0;
212 __raw_writel(tgcr, base[i] + TGCR);
213
214 /* Set both timers to unchained 32-bit */
215 tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
216 __raw_writel(tgcr, base[i] + TGCR);
217
218 /* Unreset timers */
219 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
220 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
221 __raw_writel(tgcr, base[i] + TGCR);
222
223 /* Init both counters to zero */
224 __raw_writel(0, base[i] + TIM12);
225 __raw_writel(0, base[i] + TIM34);
226 }
227
228 /* Init of each timer as a 32-bit timer */
229 for (i=0; i< ARRAY_SIZE(timers); i++) {
230 struct timer_s *t = &timers[i];
231 int timer = ID_TO_TIMER(t->id);
232 u32 irq;
233
234 t->base = base[timer];
235 if (!t->base)
236 continue;
237
238 if (IS_TIMER_BOT(t->id)) {
239 t->enamode_shift = 6;
240 t->tim_off = TIM12;
241 t->prd_off = PRD12;
242 irq = dtip[timer].bottom_irq;
243 } else {
244 t->enamode_shift = 22;
245 t->tim_off = TIM34;
246 t->prd_off = PRD34;
247 irq = dtip[timer].top_irq;
248 }
249
250 /* Register interrupt */
251 t->irqaction.name = t->name;
252 t->irqaction.dev_id = (void *)t;
253
254 if (t->irqaction.handler != NULL) {
255 irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
256 setup_irq(irq, &t->irqaction);
257 }
258 }
259}
260
261/*
262 * clocksource
263 */
264static u64 read_cycles(struct clocksource *cs)
265{
266 struct timer_s *t = &timers[TID_CLOCKSOURCE];
267
268 return (cycles_t)timer32_read(t);
269}
270
271static struct clocksource clocksource_davinci = {
272 .rating = 300,
273 .read = read_cycles,
274 .mask = CLOCKSOURCE_MASK(32),
275 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
276};
277
278/*
279 * Overwrite weak default sched_clock with something more precise
280 */
281static u64 notrace davinci_read_sched_clock(void)
282{
283 return timer32_read(&timers[TID_CLOCKSOURCE]);
284}
285
286/*
287 * clockevent
288 */
289static int davinci_set_next_event(unsigned long cycles,
290 struct clock_event_device *evt)
291{
292 struct timer_s *t = &timers[TID_CLOCKEVENT];
293
294 t->period = cycles;
295 timer32_config(t);
296 return 0;
297}
298
299static int davinci_shutdown(struct clock_event_device *evt)
300{
301 struct timer_s *t = &timers[TID_CLOCKEVENT];
302
303 t->opts &= ~TIMER_OPTS_STATE_MASK;
304 t->opts |= TIMER_OPTS_DISABLED;
305 return 0;
306}
307
308static int davinci_set_oneshot(struct clock_event_device *evt)
309{
310 struct timer_s *t = &timers[TID_CLOCKEVENT];
311
312 t->opts &= ~TIMER_OPTS_STATE_MASK;
313 t->opts |= TIMER_OPTS_ONESHOT;
314 return 0;
315}
316
317static int davinci_set_periodic(struct clock_event_device *evt)
318{
319 struct timer_s *t = &timers[TID_CLOCKEVENT];
320
321 t->period = davinci_clock_tick_rate / (HZ);
322 t->opts &= ~TIMER_OPTS_STATE_MASK;
323 t->opts |= TIMER_OPTS_PERIODIC;
324 timer32_config(t);
325 return 0;
326}
327
328static struct clock_event_device clockevent_davinci = {
329 .features = CLOCK_EVT_FEAT_PERIODIC |
330 CLOCK_EVT_FEAT_ONESHOT,
331 .set_next_event = davinci_set_next_event,
332 .set_state_shutdown = davinci_shutdown,
333 .set_state_periodic = davinci_set_periodic,
334 .set_state_oneshot = davinci_set_oneshot,
335};
336
337
338void __init davinci_timer_init(void)
339{
340 struct clk *timer_clk;
341 struct davinci_soc_info *soc_info = &davinci_soc_info;
342 unsigned int clockevent_id;
343 unsigned int clocksource_id;
344 int i;
345
346 clockevent_id = soc_info->timer_info->clockevent_id;
347 clocksource_id = soc_info->timer_info->clocksource_id;
348
349 timers[TID_CLOCKEVENT].id = clockevent_id;
350 timers[TID_CLOCKSOURCE].id = clocksource_id;
351
352 /*
353 * If using same timer for both clock events & clocksource,
354 * a compare register must be used to generate an event interrupt.
355 * This is equivalent to a oneshot timer only (not periodic).
356 */
357 if (clockevent_id == clocksource_id) {
358 struct davinci_timer_instance *dtip =
359 soc_info->timer_info->timers;
360 int event_timer = ID_TO_TIMER(clockevent_id);
361
362 /* Only bottom timers can use compare regs */
363 if (IS_TIMER_TOP(clockevent_id))
364 pr_warn("%s: Invalid use of system timers. Results unpredictable.\n",
365 __func__);
366 else if ((dtip[event_timer].cmp_off == 0)
367 || (dtip[event_timer].cmp_irq == 0))
368 pr_warn("%s: Invalid timer instance setup. Results unpredictable.\n",
369 __func__);
370 else {
371 timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
372 clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
373 }
374 }
375
376 timer_clk = clk_get(NULL, "timer0");
377 BUG_ON(IS_ERR(timer_clk));
378 clk_prepare_enable(timer_clk);
379
380 /* init timer hw */
381 timer_init();
382
383 davinci_clock_tick_rate = clk_get_rate(timer_clk);
384
385 /* setup clocksource */
386 clocksource_davinci.name = id_to_name[clocksource_id];
387 if (clocksource_register_hz(&clocksource_davinci,
388 davinci_clock_tick_rate))
389 pr_err("%s: can't register clocksource!\n",
390 clocksource_davinci.name);
391
392 sched_clock_register(davinci_read_sched_clock, 32,
393 davinci_clock_tick_rate);
394
395 /* setup clockevent */
396 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
397
398 clockevent_davinci.cpumask = cpumask_of(0);
399 clockevents_config_and_register(&clockevent_davinci,
400 davinci_clock_tick_rate, 1, 0xfffffffe);
401
402 for (i=0; i< ARRAY_SIZE(timers); i++)
403 timer32_config(&timers[i]);
404}