Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * DaVinci timer subsystem
  3 *
  4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
  5 *
  6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
  7 * the terms of the GNU General Public License version 2. This program
  8 * is licensed "as is" without any warranty of any kind, whether express
  9 * or implied.
 10 */
 11#include <linux/kernel.h>
 12#include <linux/init.h>
 13#include <linux/types.h>
 14#include <linux/interrupt.h>
 15#include <linux/clocksource.h>
 16#include <linux/clockchips.h>
 17#include <linux/io.h>
 18#include <linux/clk.h>
 19#include <linux/err.h>
 20#include <linux/platform_device.h>
 21#include <linux/sched_clock.h>
 22
 
 23#include <asm/mach/irq.h>
 24#include <asm/mach/time.h>
 25
 26#include <mach/cputype.h>
 27#include <mach/hardware.h>
 28#include <mach/time.h>
 29
 30#include "clock.h"
 31
 32static struct clock_event_device clockevent_davinci;
 33static unsigned int davinci_clock_tick_rate;
 34
 35/*
 36 * This driver configures the 2 64-bit count-up timers as 4 independent
 37 * 32-bit count-up timers used as follows:
 38 */
 39
 40enum {
 41	TID_CLOCKEVENT,
 42	TID_CLOCKSOURCE,
 43};
 44
 45/* Timer register offsets */
 46#define PID12			0x0
 47#define TIM12			0x10
 48#define TIM34			0x14
 49#define PRD12			0x18
 50#define PRD34			0x1c
 51#define TCR			0x20
 52#define TGCR			0x24
 53#define WDTCR			0x28
 54
 55/* Offsets of the 8 compare registers */
 56#define	CMP12_0			0x60
 57#define	CMP12_1			0x64
 58#define	CMP12_2			0x68
 59#define	CMP12_3			0x6c
 60#define	CMP12_4			0x70
 61#define	CMP12_5			0x74
 62#define	CMP12_6			0x78
 63#define	CMP12_7			0x7c
 64
 65/* Timer register bitfields */
 66#define TCR_ENAMODE_DISABLE          0x0
 67#define TCR_ENAMODE_ONESHOT          0x1
 68#define TCR_ENAMODE_PERIODIC         0x2
 69#define TCR_ENAMODE_MASK             0x3
 70
 71#define TGCR_TIMMODE_SHIFT           2
 72#define TGCR_TIMMODE_64BIT_GP        0x0
 73#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
 74#define TGCR_TIMMODE_64BIT_WDOG      0x2
 75#define TGCR_TIMMODE_32BIT_CHAINED   0x3
 76
 77#define TGCR_TIM12RS_SHIFT           0
 78#define TGCR_TIM34RS_SHIFT           1
 79#define TGCR_RESET                   0x0
 80#define TGCR_UNRESET                 0x1
 81#define TGCR_RESET_MASK              0x3
 82
 
 
 
 
 
 
 
 83struct timer_s {
 84	char *name;
 85	unsigned int id;
 86	unsigned long period;
 87	unsigned long opts;
 88	unsigned long flags;
 89	void __iomem *base;
 90	unsigned long tim_off;
 91	unsigned long prd_off;
 92	unsigned long enamode_shift;
 93	struct irqaction irqaction;
 94};
 95static struct timer_s timers[];
 96
 97/* values for 'opts' field of struct timer_s */
 98#define TIMER_OPTS_DISABLED		0x01
 99#define TIMER_OPTS_ONESHOT		0x02
100#define TIMER_OPTS_PERIODIC		0x04
101#define TIMER_OPTS_STATE_MASK		0x07
102
103#define TIMER_OPTS_USE_COMPARE		0x80000000
104#define USING_COMPARE(t)		((t)->opts & TIMER_OPTS_USE_COMPARE)
105
106static char *id_to_name[] = {
107	[T0_BOT]	= "timer0_0",
108	[T0_TOP]	= "timer0_1",
109	[T1_BOT]	= "timer1_0",
110	[T1_TOP]	= "timer1_1",
111};
112
113static int timer32_config(struct timer_s *t)
114{
115	u32 tcr;
116	struct davinci_soc_info *soc_info = &davinci_soc_info;
117
118	if (USING_COMPARE(t)) {
119		struct davinci_timer_instance *dtip =
120				soc_info->timer_info->timers;
121		int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
122
123		/*
124		 * Next interrupt should be the current time reg value plus
125		 * the new period (using 32-bit unsigned addition/wrapping
126		 * to 0 on overflow).  This assumes that the clocksource
127		 * is setup to count to 2^32-1 before wrapping around to 0.
128		 */
129		__raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
130			t->base + dtip[event_timer].cmp_off);
131	} else {
132		tcr = __raw_readl(t->base + TCR);
133
134		/* disable timer */
135		tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
136		__raw_writel(tcr, t->base + TCR);
137
138		/* reset counter to zero, set new period */
139		__raw_writel(0, t->base + t->tim_off);
140		__raw_writel(t->period, t->base + t->prd_off);
141
142		/* Set enable mode */
143		if (t->opts & TIMER_OPTS_ONESHOT)
144			tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
145		else if (t->opts & TIMER_OPTS_PERIODIC)
146			tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
147
148		__raw_writel(tcr, t->base + TCR);
149	}
150	return 0;
151}
152
153static inline u32 timer32_read(struct timer_s *t)
154{
155	return __raw_readl(t->base + t->tim_off);
156}
157
158static irqreturn_t timer_interrupt(int irq, void *dev_id)
159{
160	struct clock_event_device *evt = &clockevent_davinci;
161
162	evt->event_handler(evt);
163	return IRQ_HANDLED;
164}
165
166/* called when 32-bit counter wraps */
167static irqreturn_t freerun_interrupt(int irq, void *dev_id)
168{
169	return IRQ_HANDLED;
170}
171
172static struct timer_s timers[] = {
173	[TID_CLOCKEVENT] = {
174		.name      = "clockevent",
175		.opts      = TIMER_OPTS_DISABLED,
176		.irqaction = {
177			.flags   = IRQF_TIMER,
178			.handler = timer_interrupt,
179		}
180	},
181	[TID_CLOCKSOURCE] = {
182		.name       = "free-run counter",
183		.period     = ~0,
184		.opts       = TIMER_OPTS_PERIODIC,
185		.irqaction = {
186			.flags   = IRQF_TIMER,
187			.handler = freerun_interrupt,
188		}
189	},
190};
191
192static void __init timer_init(void)
193{
194	struct davinci_soc_info *soc_info = &davinci_soc_info;
195	struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
196	void __iomem *base[2];
197	int i;
198
199	/* Global init of each 64-bit timer as a whole */
200	for(i=0; i<2; i++) {
201		u32 tgcr;
202
203		base[i] = ioremap(dtip[i].base, SZ_4K);
204		if (WARN_ON(!base[i]))
205			continue;
206
207		/* Disabled, Internal clock source */
208		__raw_writel(0, base[i] + TCR);
209
210		/* reset both timers, no pre-scaler for timer34 */
211		tgcr = 0;
212		__raw_writel(tgcr, base[i] + TGCR);
213
214		/* Set both timers to unchained 32-bit */
215		tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
216		__raw_writel(tgcr, base[i] + TGCR);
217
218		/* Unreset timers */
219		tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
220			(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
221		__raw_writel(tgcr, base[i] + TGCR);
222
223		/* Init both counters to zero */
224		__raw_writel(0, base[i] + TIM12);
225		__raw_writel(0, base[i] + TIM34);
226	}
227
228	/* Init of each timer as a 32-bit timer */
229	for (i=0; i< ARRAY_SIZE(timers); i++) {
230		struct timer_s *t = &timers[i];
231		int timer = ID_TO_TIMER(t->id);
232		u32 irq;
233
234		t->base = base[timer];
235		if (!t->base)
236			continue;
237
238		if (IS_TIMER_BOT(t->id)) {
239			t->enamode_shift = 6;
240			t->tim_off = TIM12;
241			t->prd_off = PRD12;
242			irq = dtip[timer].bottom_irq;
243		} else {
244			t->enamode_shift = 22;
245			t->tim_off = TIM34;
246			t->prd_off = PRD34;
247			irq = dtip[timer].top_irq;
248		}
249
250		/* Register interrupt */
251		t->irqaction.name = t->name;
252		t->irqaction.dev_id = (void *)t;
253
254		if (t->irqaction.handler != NULL) {
255			irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
256			setup_irq(irq, &t->irqaction);
257		}
258	}
259}
260
261/*
262 * clocksource
263 */
264static u64 read_cycles(struct clocksource *cs)
265{
266	struct timer_s *t = &timers[TID_CLOCKSOURCE];
267
268	return (cycles_t)timer32_read(t);
269}
270
 
 
 
 
 
 
 
 
 
 
271static struct clocksource clocksource_davinci = {
272	.rating		= 300,
273	.read		= read_cycles,
274	.mask		= CLOCKSOURCE_MASK(32),
275	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
276};
277
278/*
279 * Overwrite weak default sched_clock with something more precise
280 */
281static u64 notrace davinci_read_sched_clock(void)
282{
283	return timer32_read(&timers[TID_CLOCKSOURCE]);
 
 
 
284}
285
286/*
287 * clockevent
288 */
289static int davinci_set_next_event(unsigned long cycles,
290				  struct clock_event_device *evt)
291{
292	struct timer_s *t = &timers[TID_CLOCKEVENT];
293
294	t->period = cycles;
295	timer32_config(t);
296	return 0;
297}
298
299static int davinci_shutdown(struct clock_event_device *evt)
300{
301	struct timer_s *t = &timers[TID_CLOCKEVENT];
302
303	t->opts &= ~TIMER_OPTS_STATE_MASK;
304	t->opts |= TIMER_OPTS_DISABLED;
305	return 0;
306}
307
308static int davinci_set_oneshot(struct clock_event_device *evt)
309{
310	struct timer_s *t = &timers[TID_CLOCKEVENT];
311
312	t->opts &= ~TIMER_OPTS_STATE_MASK;
313	t->opts |= TIMER_OPTS_ONESHOT;
314	return 0;
315}
316
317static int davinci_set_periodic(struct clock_event_device *evt)
318{
319	struct timer_s *t = &timers[TID_CLOCKEVENT];
320
321	t->period = davinci_clock_tick_rate / (HZ);
322	t->opts &= ~TIMER_OPTS_STATE_MASK;
323	t->opts |= TIMER_OPTS_PERIODIC;
324	timer32_config(t);
325	return 0;
 
 
 
 
 
326}
327
328static struct clock_event_device clockevent_davinci = {
329	.features		= CLOCK_EVT_FEAT_PERIODIC |
330				  CLOCK_EVT_FEAT_ONESHOT,
331	.set_next_event		= davinci_set_next_event,
332	.set_state_shutdown	= davinci_shutdown,
333	.set_state_periodic	= davinci_set_periodic,
334	.set_state_oneshot	= davinci_set_oneshot,
335};
336
337
338void __init davinci_timer_init(void)
339{
340	struct clk *timer_clk;
341	struct davinci_soc_info *soc_info = &davinci_soc_info;
342	unsigned int clockevent_id;
343	unsigned int clocksource_id;
 
 
344	int i;
345
346	clockevent_id = soc_info->timer_info->clockevent_id;
347	clocksource_id = soc_info->timer_info->clocksource_id;
348
349	timers[TID_CLOCKEVENT].id = clockevent_id;
350	timers[TID_CLOCKSOURCE].id = clocksource_id;
351
352	/*
353	 * If using same timer for both clock events & clocksource,
354	 * a compare register must be used to generate an event interrupt.
355	 * This is equivalent to a oneshot timer only (not periodic).
356	 */
357	if (clockevent_id == clocksource_id) {
358		struct davinci_timer_instance *dtip =
359				soc_info->timer_info->timers;
360		int event_timer = ID_TO_TIMER(clockevent_id);
361
362		/* Only bottom timers can use compare regs */
363		if (IS_TIMER_TOP(clockevent_id))
364			pr_warn("%s: Invalid use of system timers.  Results unpredictable.\n",
365				__func__);
366		else if ((dtip[event_timer].cmp_off == 0)
367				|| (dtip[event_timer].cmp_irq == 0))
368			pr_warn("%s: Invalid timer instance setup.  Results unpredictable.\n",
369				__func__);
370		else {
371			timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
372			clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
373		}
374	}
375
376	timer_clk = clk_get(NULL, "timer0");
377	BUG_ON(IS_ERR(timer_clk));
378	clk_prepare_enable(timer_clk);
379
380	/* init timer hw */
381	timer_init();
382
383	davinci_clock_tick_rate = clk_get_rate(timer_clk);
384
385	/* setup clocksource */
 
386	clocksource_davinci.name = id_to_name[clocksource_id];
387	if (clocksource_register_hz(&clocksource_davinci,
388				    davinci_clock_tick_rate))
389		pr_err("%s: can't register clocksource!\n",
390		       clocksource_davinci.name);
391
392	sched_clock_register(davinci_read_sched_clock, 32,
393			  davinci_clock_tick_rate);
394
395	/* setup clockevent */
396	clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
 
 
 
 
 
397
398	clockevent_davinci.cpumask = cpumask_of(0);
399	clockevents_config_and_register(&clockevent_davinci,
400					davinci_clock_tick_rate, 1, 0xfffffffe);
401
402	for (i=0; i< ARRAY_SIZE(timers); i++)
403		timer32_config(&timers[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404}
v3.1
  1/*
  2 * DaVinci timer subsystem
  3 *
  4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
  5 *
  6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
  7 * the terms of the GNU General Public License version 2. This program
  8 * is licensed "as is" without any warranty of any kind, whether express
  9 * or implied.
 10 */
 11#include <linux/kernel.h>
 12#include <linux/init.h>
 13#include <linux/types.h>
 14#include <linux/interrupt.h>
 15#include <linux/clocksource.h>
 16#include <linux/clockchips.h>
 17#include <linux/io.h>
 18#include <linux/clk.h>
 19#include <linux/err.h>
 20#include <linux/platform_device.h>
 
 21
 22#include <mach/hardware.h>
 23#include <asm/mach/irq.h>
 24#include <asm/mach/time.h>
 
 25#include <mach/cputype.h>
 
 26#include <mach/time.h>
 
 27#include "clock.h"
 28
 29static struct clock_event_device clockevent_davinci;
 30static unsigned int davinci_clock_tick_rate;
 31
 32/*
 33 * This driver configures the 2 64-bit count-up timers as 4 independent
 34 * 32-bit count-up timers used as follows:
 35 */
 36
 37enum {
 38	TID_CLOCKEVENT,
 39	TID_CLOCKSOURCE,
 40};
 41
 42/* Timer register offsets */
 43#define PID12			0x0
 44#define TIM12			0x10
 45#define TIM34			0x14
 46#define PRD12			0x18
 47#define PRD34			0x1c
 48#define TCR			0x20
 49#define TGCR			0x24
 50#define WDTCR			0x28
 51
 52/* Offsets of the 8 compare registers */
 53#define	CMP12_0			0x60
 54#define	CMP12_1			0x64
 55#define	CMP12_2			0x68
 56#define	CMP12_3			0x6c
 57#define	CMP12_4			0x70
 58#define	CMP12_5			0x74
 59#define	CMP12_6			0x78
 60#define	CMP12_7			0x7c
 61
 62/* Timer register bitfields */
 63#define TCR_ENAMODE_DISABLE          0x0
 64#define TCR_ENAMODE_ONESHOT          0x1
 65#define TCR_ENAMODE_PERIODIC         0x2
 66#define TCR_ENAMODE_MASK             0x3
 67
 68#define TGCR_TIMMODE_SHIFT           2
 69#define TGCR_TIMMODE_64BIT_GP        0x0
 70#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
 71#define TGCR_TIMMODE_64BIT_WDOG      0x2
 72#define TGCR_TIMMODE_32BIT_CHAINED   0x3
 73
 74#define TGCR_TIM12RS_SHIFT           0
 75#define TGCR_TIM34RS_SHIFT           1
 76#define TGCR_RESET                   0x0
 77#define TGCR_UNRESET                 0x1
 78#define TGCR_RESET_MASK              0x3
 79
 80#define WDTCR_WDEN_SHIFT             14
 81#define WDTCR_WDEN_DISABLE           0x0
 82#define WDTCR_WDEN_ENABLE            0x1
 83#define WDTCR_WDKEY_SHIFT            16
 84#define WDTCR_WDKEY_SEQ0             0xa5c6
 85#define WDTCR_WDKEY_SEQ1             0xda7e
 86
 87struct timer_s {
 88	char *name;
 89	unsigned int id;
 90	unsigned long period;
 91	unsigned long opts;
 92	unsigned long flags;
 93	void __iomem *base;
 94	unsigned long tim_off;
 95	unsigned long prd_off;
 96	unsigned long enamode_shift;
 97	struct irqaction irqaction;
 98};
 99static struct timer_s timers[];
100
101/* values for 'opts' field of struct timer_s */
102#define TIMER_OPTS_DISABLED		0x01
103#define TIMER_OPTS_ONESHOT		0x02
104#define TIMER_OPTS_PERIODIC		0x04
105#define TIMER_OPTS_STATE_MASK		0x07
106
107#define TIMER_OPTS_USE_COMPARE		0x80000000
108#define USING_COMPARE(t)		((t)->opts & TIMER_OPTS_USE_COMPARE)
109
110static char *id_to_name[] = {
111	[T0_BOT]	= "timer0_0",
112	[T0_TOP]	= "timer0_1",
113	[T1_BOT]	= "timer1_0",
114	[T1_TOP]	= "timer1_1",
115};
116
117static int timer32_config(struct timer_s *t)
118{
119	u32 tcr;
120	struct davinci_soc_info *soc_info = &davinci_soc_info;
121
122	if (USING_COMPARE(t)) {
123		struct davinci_timer_instance *dtip =
124				soc_info->timer_info->timers;
125		int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
126
127		/*
128		 * Next interrupt should be the current time reg value plus
129		 * the new period (using 32-bit unsigned addition/wrapping
130		 * to 0 on overflow).  This assumes that the clocksource
131		 * is setup to count to 2^32-1 before wrapping around to 0.
132		 */
133		__raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
134			t->base + dtip[event_timer].cmp_off);
135	} else {
136		tcr = __raw_readl(t->base + TCR);
137
138		/* disable timer */
139		tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
140		__raw_writel(tcr, t->base + TCR);
141
142		/* reset counter to zero, set new period */
143		__raw_writel(0, t->base + t->tim_off);
144		__raw_writel(t->period, t->base + t->prd_off);
145
146		/* Set enable mode */
147		if (t->opts & TIMER_OPTS_ONESHOT)
148			tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
149		else if (t->opts & TIMER_OPTS_PERIODIC)
150			tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
151
152		__raw_writel(tcr, t->base + TCR);
153	}
154	return 0;
155}
156
157static inline u32 timer32_read(struct timer_s *t)
158{
159	return __raw_readl(t->base + t->tim_off);
160}
161
162static irqreturn_t timer_interrupt(int irq, void *dev_id)
163{
164	struct clock_event_device *evt = &clockevent_davinci;
165
166	evt->event_handler(evt);
167	return IRQ_HANDLED;
168}
169
170/* called when 32-bit counter wraps */
171static irqreturn_t freerun_interrupt(int irq, void *dev_id)
172{
173	return IRQ_HANDLED;
174}
175
176static struct timer_s timers[] = {
177	[TID_CLOCKEVENT] = {
178		.name      = "clockevent",
179		.opts      = TIMER_OPTS_DISABLED,
180		.irqaction = {
181			.flags   = IRQF_DISABLED | IRQF_TIMER,
182			.handler = timer_interrupt,
183		}
184	},
185	[TID_CLOCKSOURCE] = {
186		.name       = "free-run counter",
187		.period     = ~0,
188		.opts       = TIMER_OPTS_PERIODIC,
189		.irqaction = {
190			.flags   = IRQF_DISABLED | IRQF_TIMER,
191			.handler = freerun_interrupt,
192		}
193	},
194};
195
196static void __init timer_init(void)
197{
198	struct davinci_soc_info *soc_info = &davinci_soc_info;
199	struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
200	void __iomem *base[2];
201	int i;
202
203	/* Global init of each 64-bit timer as a whole */
204	for(i=0; i<2; i++) {
205		u32 tgcr;
206
207		base[i] = ioremap(dtip[i].base, SZ_4K);
208		if (WARN_ON(!base[i]))
209			continue;
210
211		/* Disabled, Internal clock source */
212		__raw_writel(0, base[i] + TCR);
213
214		/* reset both timers, no pre-scaler for timer34 */
215		tgcr = 0;
216		__raw_writel(tgcr, base[i] + TGCR);
217
218		/* Set both timers to unchained 32-bit */
219		tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
220		__raw_writel(tgcr, base[i] + TGCR);
221
222		/* Unreset timers */
223		tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
224			(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
225		__raw_writel(tgcr, base[i] + TGCR);
226
227		/* Init both counters to zero */
228		__raw_writel(0, base[i] + TIM12);
229		__raw_writel(0, base[i] + TIM34);
230	}
231
232	/* Init of each timer as a 32-bit timer */
233	for (i=0; i< ARRAY_SIZE(timers); i++) {
234		struct timer_s *t = &timers[i];
235		int timer = ID_TO_TIMER(t->id);
236		u32 irq;
237
238		t->base = base[timer];
239		if (!t->base)
240			continue;
241
242		if (IS_TIMER_BOT(t->id)) {
243			t->enamode_shift = 6;
244			t->tim_off = TIM12;
245			t->prd_off = PRD12;
246			irq = dtip[timer].bottom_irq;
247		} else {
248			t->enamode_shift = 22;
249			t->tim_off = TIM34;
250			t->prd_off = PRD34;
251			irq = dtip[timer].top_irq;
252		}
253
254		/* Register interrupt */
255		t->irqaction.name = t->name;
256		t->irqaction.dev_id = (void *)t;
257
258		if (t->irqaction.handler != NULL) {
259			irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
260			setup_irq(irq, &t->irqaction);
261		}
262	}
263}
264
265/*
266 * clocksource
267 */
268static cycle_t read_cycles(struct clocksource *cs)
269{
270	struct timer_s *t = &timers[TID_CLOCKSOURCE];
271
272	return (cycles_t)timer32_read(t);
273}
274
275/*
276 * Kernel assumes that sched_clock can be called early but may not have
277 * things ready yet.
278 */
279static cycle_t read_dummy(struct clocksource *cs)
280{
281	return 0;
282}
283
284
285static struct clocksource clocksource_davinci = {
286	.rating		= 300,
287	.read		= read_dummy,
288	.mask		= CLOCKSOURCE_MASK(32),
289	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
290};
291
292/*
293 * Overwrite weak default sched_clock with something more precise
294 */
295unsigned long long notrace sched_clock(void)
296{
297	const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci);
298
299	return clocksource_cyc2ns(cyc, clocksource_davinci.mult,
300				clocksource_davinci.shift);
301}
302
303/*
304 * clockevent
305 */
306static int davinci_set_next_event(unsigned long cycles,
307				  struct clock_event_device *evt)
308{
309	struct timer_s *t = &timers[TID_CLOCKEVENT];
310
311	t->period = cycles;
312	timer32_config(t);
313	return 0;
314}
315
316static void davinci_set_mode(enum clock_event_mode mode,
317			     struct clock_event_device *evt)
 
 
 
 
 
 
 
 
318{
319	struct timer_s *t = &timers[TID_CLOCKEVENT];
320
321	switch (mode) {
322	case CLOCK_EVT_MODE_PERIODIC:
323		t->period = davinci_clock_tick_rate / (HZ);
324		t->opts &= ~TIMER_OPTS_STATE_MASK;
325		t->opts |= TIMER_OPTS_PERIODIC;
326		timer32_config(t);
327		break;
328	case CLOCK_EVT_MODE_ONESHOT:
329		t->opts &= ~TIMER_OPTS_STATE_MASK;
330		t->opts |= TIMER_OPTS_ONESHOT;
331		break;
332	case CLOCK_EVT_MODE_UNUSED:
333	case CLOCK_EVT_MODE_SHUTDOWN:
334		t->opts &= ~TIMER_OPTS_STATE_MASK;
335		t->opts |= TIMER_OPTS_DISABLED;
336		break;
337	case CLOCK_EVT_MODE_RESUME:
338		break;
339	}
340}
341
342static struct clock_event_device clockevent_davinci = {
343	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
344	.shift		= 32,
345	.set_next_event	= davinci_set_next_event,
346	.set_mode	= davinci_set_mode,
 
 
347};
348
349
350static void __init davinci_timer_init(void)
351{
352	struct clk *timer_clk;
353	struct davinci_soc_info *soc_info = &davinci_soc_info;
354	unsigned int clockevent_id;
355	unsigned int clocksource_id;
356	static char err[] __initdata = KERN_ERR
357		"%s: can't register clocksource!\n";
358	int i;
359
360	clockevent_id = soc_info->timer_info->clockevent_id;
361	clocksource_id = soc_info->timer_info->clocksource_id;
362
363	timers[TID_CLOCKEVENT].id = clockevent_id;
364	timers[TID_CLOCKSOURCE].id = clocksource_id;
365
366	/*
367	 * If using same timer for both clock events & clocksource,
368	 * a compare register must be used to generate an event interrupt.
369	 * This is equivalent to a oneshot timer only (not periodic).
370	 */
371	if (clockevent_id == clocksource_id) {
372		struct davinci_timer_instance *dtip =
373				soc_info->timer_info->timers;
374		int event_timer = ID_TO_TIMER(clockevent_id);
375
376		/* Only bottom timers can use compare regs */
377		if (IS_TIMER_TOP(clockevent_id))
378			pr_warning("davinci_timer_init: Invalid use"
379				" of system timers.  Results unpredictable.\n");
380		else if ((dtip[event_timer].cmp_off == 0)
381				|| (dtip[event_timer].cmp_irq == 0))
382			pr_warning("davinci_timer_init:  Invalid timer instance"
383				" setup.  Results unpredictable.\n");
384		else {
385			timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
386			clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
387		}
388	}
389
390	timer_clk = clk_get(NULL, "timer0");
391	BUG_ON(IS_ERR(timer_clk));
392	clk_enable(timer_clk);
393
394	/* init timer hw */
395	timer_init();
396
397	davinci_clock_tick_rate = clk_get_rate(timer_clk);
398
399	/* setup clocksource */
400	clocksource_davinci.read = read_cycles;
401	clocksource_davinci.name = id_to_name[clocksource_id];
402	if (clocksource_register_hz(&clocksource_davinci,
403				    davinci_clock_tick_rate))
404		printk(err, clocksource_davinci.name);
 
 
 
 
405
406	/* setup clockevent */
407	clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
408	clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
409					 clockevent_davinci.shift);
410	clockevent_davinci.max_delta_ns =
411		clockevent_delta2ns(0xfffffffe, &clockevent_davinci);
412	clockevent_davinci.min_delta_ns = 50000; /* 50 usec */
413
414	clockevent_davinci.cpumask = cpumask_of(0);
415	clockevents_register_device(&clockevent_davinci);
 
416
417	for (i=0; i< ARRAY_SIZE(timers); i++)
418		timer32_config(&timers[i]);
419}
420
421struct sys_timer davinci_timer = {
422	.init   = davinci_timer_init,
423};
424
425
426/* reset board using watchdog timer */
427void davinci_watchdog_reset(struct platform_device *pdev)
428{
429	u32 tgcr, wdtcr;
430	void __iomem *base;
431	struct clk *wd_clk;
432
433	base = ioremap(pdev->resource[0].start, SZ_4K);
434	if (WARN_ON(!base))
435		return;
436
437	wd_clk = clk_get(&pdev->dev, NULL);
438	if (WARN_ON(IS_ERR(wd_clk)))
439		return;
440	clk_enable(wd_clk);
441
442	/* disable, internal clock source */
443	__raw_writel(0, base + TCR);
444
445	/* reset timer, set mode to 64-bit watchdog, and unreset */
446	tgcr = 0;
447	__raw_writel(tgcr, base + TGCR);
448	tgcr = TGCR_TIMMODE_64BIT_WDOG << TGCR_TIMMODE_SHIFT;
449	tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
450		(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
451	__raw_writel(tgcr, base + TGCR);
452
453	/* clear counter and period regs */
454	__raw_writel(0, base + TIM12);
455	__raw_writel(0, base + TIM34);
456	__raw_writel(0, base + PRD12);
457	__raw_writel(0, base + PRD34);
458
459	/* put watchdog in pre-active state */
460	wdtcr = __raw_readl(base + WDTCR);
461	wdtcr = (WDTCR_WDKEY_SEQ0 << WDTCR_WDKEY_SHIFT) |
462		(WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
463	__raw_writel(wdtcr, base + WDTCR);
464
465	/* put watchdog in active state */
466	wdtcr = (WDTCR_WDKEY_SEQ1 << WDTCR_WDKEY_SHIFT) |
467		(WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
468	__raw_writel(wdtcr, base + WDTCR);
469
470	/* write an invalid value to the WDKEY field to trigger
471	 * a watchdog reset */
472	wdtcr = 0x00004000;
473	__raw_writel(wdtcr, base + WDTCR);
474}