Loading...
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (C) 2000-2001 Deep Blue Solutions
4// Copyright (C) 2002 Shane Nay (shane@minirl.com)
5// Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
6// Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
7
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/clockchips.h>
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/err.h>
14#include <linux/sched_clock.h>
15#include <linux/slab.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <soc/imx/timer.h>
20
21/*
22 * There are 4 versions of the timer hardware on Freescale MXC hardware.
23 * - MX1/MXL
24 * - MX21, MX27.
25 * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
26 * - MX6DL, MX6SX, MX6Q(rev1.1+)
27 */
28
29/* defines common for all i.MX */
30#define MXC_TCTL 0x00
31#define MXC_TCTL_TEN (1 << 0) /* Enable module */
32#define MXC_TPRER 0x04
33
34/* MX1, MX21, MX27 */
35#define MX1_2_TCTL_CLK_PCLK1 (1 << 1)
36#define MX1_2_TCTL_IRQEN (1 << 4)
37#define MX1_2_TCTL_FRR (1 << 8)
38#define MX1_2_TCMP 0x08
39#define MX1_2_TCN 0x10
40#define MX1_2_TSTAT 0x14
41
42/* MX21, MX27 */
43#define MX2_TSTAT_CAPT (1 << 1)
44#define MX2_TSTAT_COMP (1 << 0)
45
46/* MX31, MX35, MX25, MX5, MX6 */
47#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
48#define V2_TCTL_CLK_IPG (1 << 6)
49#define V2_TCTL_CLK_PER (2 << 6)
50#define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
51#define V2_TCTL_FRR (1 << 9)
52#define V2_TCTL_24MEN (1 << 10)
53#define V2_TPRER_PRE24M 12
54#define V2_IR 0x0c
55#define V2_TSTAT 0x08
56#define V2_TSTAT_OF1 (1 << 0)
57#define V2_TCN 0x24
58#define V2_TCMP 0x10
59
60#define V2_TIMER_RATE_OSC_DIV8 3000000
61
62struct imx_timer {
63 enum imx_gpt_type type;
64 void __iomem *base;
65 int irq;
66 struct clk *clk_per;
67 struct clk *clk_ipg;
68 const struct imx_gpt_data *gpt;
69 struct clock_event_device ced;
70};
71
72struct imx_gpt_data {
73 int reg_tstat;
74 int reg_tcn;
75 int reg_tcmp;
76 void (*gpt_setup_tctl)(struct imx_timer *imxtm);
77 void (*gpt_irq_enable)(struct imx_timer *imxtm);
78 void (*gpt_irq_disable)(struct imx_timer *imxtm);
79 void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
80 int (*set_next_event)(unsigned long evt,
81 struct clock_event_device *ced);
82};
83
84static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
85{
86 return container_of(ced, struct imx_timer, ced);
87}
88
89static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
90{
91 unsigned int tmp;
92
93 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
94 writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
95}
96#define imx21_gpt_irq_disable imx1_gpt_irq_disable
97
98static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
99{
100 writel_relaxed(0, imxtm->base + V2_IR);
101}
102#define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
103
104static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
105{
106 unsigned int tmp;
107
108 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
109 writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
110}
111#define imx21_gpt_irq_enable imx1_gpt_irq_enable
112
113static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
114{
115 writel_relaxed(1<<0, imxtm->base + V2_IR);
116}
117#define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
118
119static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
120{
121 writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
122}
123
124static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
125{
126 writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
127 imxtm->base + MX1_2_TSTAT);
128}
129
130static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
131{
132 writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
133}
134#define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
135
136static void __iomem *sched_clock_reg;
137
138static u64 notrace mxc_read_sched_clock(void)
139{
140 return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
141}
142
143#if defined(CONFIG_ARM)
144static struct delay_timer imx_delay_timer;
145
146static unsigned long imx_read_current_timer(void)
147{
148 return readl_relaxed(sched_clock_reg);
149}
150#endif
151
152static int __init mxc_clocksource_init(struct imx_timer *imxtm)
153{
154 unsigned int c = clk_get_rate(imxtm->clk_per);
155 void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
156
157#if defined(CONFIG_ARM)
158 imx_delay_timer.read_current_timer = &imx_read_current_timer;
159 imx_delay_timer.freq = c;
160 register_current_timer_delay(&imx_delay_timer);
161#endif
162
163 sched_clock_reg = reg;
164
165 sched_clock_register(mxc_read_sched_clock, 32, c);
166 return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
167 clocksource_mmio_readl_up);
168}
169
170/* clock event */
171
172static int mx1_2_set_next_event(unsigned long evt,
173 struct clock_event_device *ced)
174{
175 struct imx_timer *imxtm = to_imx_timer(ced);
176 unsigned long tcmp;
177
178 tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
179
180 writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
181
182 return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
183 -ETIME : 0;
184}
185
186static int v2_set_next_event(unsigned long evt,
187 struct clock_event_device *ced)
188{
189 struct imx_timer *imxtm = to_imx_timer(ced);
190 unsigned long tcmp;
191
192 tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
193
194 writel_relaxed(tcmp, imxtm->base + V2_TCMP);
195
196 return evt < 0x7fffffff &&
197 (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
198 -ETIME : 0;
199}
200
201static int mxc_shutdown(struct clock_event_device *ced)
202{
203 struct imx_timer *imxtm = to_imx_timer(ced);
204 u32 tcn;
205
206 /* Disable interrupt in GPT module */
207 imxtm->gpt->gpt_irq_disable(imxtm);
208
209 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
210 /* Set event time into far-far future */
211 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
212
213 /* Clear pending interrupt */
214 imxtm->gpt->gpt_irq_acknowledge(imxtm);
215
216#ifdef DEBUG
217 printk(KERN_INFO "%s: changing mode\n", __func__);
218#endif /* DEBUG */
219
220 return 0;
221}
222
223static int mxc_set_oneshot(struct clock_event_device *ced)
224{
225 struct imx_timer *imxtm = to_imx_timer(ced);
226
227 /* Disable interrupt in GPT module */
228 imxtm->gpt->gpt_irq_disable(imxtm);
229
230 if (!clockevent_state_oneshot(ced)) {
231 u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
232 /* Set event time into far-far future */
233 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
234
235 /* Clear pending interrupt */
236 imxtm->gpt->gpt_irq_acknowledge(imxtm);
237 }
238
239#ifdef DEBUG
240 printk(KERN_INFO "%s: changing mode\n", __func__);
241#endif /* DEBUG */
242
243 /*
244 * Do not put overhead of interrupt enable/disable into
245 * mxc_set_next_event(), the core has about 4 minutes
246 * to call mxc_set_next_event() or shutdown clock after
247 * mode switching
248 */
249 imxtm->gpt->gpt_irq_enable(imxtm);
250
251 return 0;
252}
253
254/*
255 * IRQ handler for the timer
256 */
257static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
258{
259 struct clock_event_device *ced = dev_id;
260 struct imx_timer *imxtm = to_imx_timer(ced);
261 uint32_t tstat;
262
263 tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
264
265 imxtm->gpt->gpt_irq_acknowledge(imxtm);
266
267 ced->event_handler(ced);
268
269 return IRQ_HANDLED;
270}
271
272static int __init mxc_clockevent_init(struct imx_timer *imxtm)
273{
274 struct clock_event_device *ced = &imxtm->ced;
275
276 ced->name = "mxc_timer1";
277 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
278 ced->set_state_shutdown = mxc_shutdown;
279 ced->set_state_oneshot = mxc_set_oneshot;
280 ced->tick_resume = mxc_shutdown;
281 ced->set_next_event = imxtm->gpt->set_next_event;
282 ced->rating = 200;
283 ced->cpumask = cpumask_of(0);
284 ced->irq = imxtm->irq;
285 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
286 0xff, 0xfffffffe);
287
288 return request_irq(imxtm->irq, mxc_timer_interrupt,
289 IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced);
290}
291
292static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
293{
294 u32 tctl_val;
295
296 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
297 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
298}
299#define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
300
301static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
302{
303 u32 tctl_val;
304
305 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
306 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
307 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
308 else
309 tctl_val |= V2_TCTL_CLK_PER;
310
311 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
312}
313
314static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
315{
316 u32 tctl_val;
317
318 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
319 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
320 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
321 /* 24 / 8 = 3 MHz */
322 writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
323 tctl_val |= V2_TCTL_24MEN;
324 } else {
325 tctl_val |= V2_TCTL_CLK_PER;
326 }
327
328 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
329}
330
331static const struct imx_gpt_data imx1_gpt_data = {
332 .reg_tstat = MX1_2_TSTAT,
333 .reg_tcn = MX1_2_TCN,
334 .reg_tcmp = MX1_2_TCMP,
335 .gpt_irq_enable = imx1_gpt_irq_enable,
336 .gpt_irq_disable = imx1_gpt_irq_disable,
337 .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
338 .gpt_setup_tctl = imx1_gpt_setup_tctl,
339 .set_next_event = mx1_2_set_next_event,
340};
341
342static const struct imx_gpt_data imx21_gpt_data = {
343 .reg_tstat = MX1_2_TSTAT,
344 .reg_tcn = MX1_2_TCN,
345 .reg_tcmp = MX1_2_TCMP,
346 .gpt_irq_enable = imx21_gpt_irq_enable,
347 .gpt_irq_disable = imx21_gpt_irq_disable,
348 .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
349 .gpt_setup_tctl = imx21_gpt_setup_tctl,
350 .set_next_event = mx1_2_set_next_event,
351};
352
353static const struct imx_gpt_data imx31_gpt_data = {
354 .reg_tstat = V2_TSTAT,
355 .reg_tcn = V2_TCN,
356 .reg_tcmp = V2_TCMP,
357 .gpt_irq_enable = imx31_gpt_irq_enable,
358 .gpt_irq_disable = imx31_gpt_irq_disable,
359 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
360 .gpt_setup_tctl = imx31_gpt_setup_tctl,
361 .set_next_event = v2_set_next_event,
362};
363
364static const struct imx_gpt_data imx6dl_gpt_data = {
365 .reg_tstat = V2_TSTAT,
366 .reg_tcn = V2_TCN,
367 .reg_tcmp = V2_TCMP,
368 .gpt_irq_enable = imx6dl_gpt_irq_enable,
369 .gpt_irq_disable = imx6dl_gpt_irq_disable,
370 .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge,
371 .gpt_setup_tctl = imx6dl_gpt_setup_tctl,
372 .set_next_event = v2_set_next_event,
373};
374
375static int __init _mxc_timer_init(struct imx_timer *imxtm)
376{
377 int ret;
378
379 switch (imxtm->type) {
380 case GPT_TYPE_IMX1:
381 imxtm->gpt = &imx1_gpt_data;
382 break;
383 case GPT_TYPE_IMX21:
384 imxtm->gpt = &imx21_gpt_data;
385 break;
386 case GPT_TYPE_IMX31:
387 imxtm->gpt = &imx31_gpt_data;
388 break;
389 case GPT_TYPE_IMX6DL:
390 imxtm->gpt = &imx6dl_gpt_data;
391 break;
392 default:
393 return -EINVAL;
394 }
395
396 if (IS_ERR(imxtm->clk_per)) {
397 pr_err("i.MX timer: unable to get clk\n");
398 return PTR_ERR(imxtm->clk_per);
399 }
400
401 if (!IS_ERR(imxtm->clk_ipg))
402 clk_prepare_enable(imxtm->clk_ipg);
403
404 clk_prepare_enable(imxtm->clk_per);
405
406 /*
407 * Initialise to a known state (all timers off, and timing reset)
408 */
409
410 writel_relaxed(0, imxtm->base + MXC_TCTL);
411 writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */
412
413 imxtm->gpt->gpt_setup_tctl(imxtm);
414
415 /* init and register the timer to the framework */
416 ret = mxc_clocksource_init(imxtm);
417 if (ret)
418 return ret;
419
420 return mxc_clockevent_init(imxtm);
421}
422
423void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
424{
425 struct imx_timer *imxtm;
426
427 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
428 BUG_ON(!imxtm);
429
430 imxtm->clk_per = clk_get_sys("imx-gpt.0", "per");
431 imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
432
433 imxtm->base = ioremap(pbase, SZ_4K);
434 BUG_ON(!imxtm->base);
435
436 imxtm->type = type;
437 imxtm->irq = irq;
438
439 _mxc_timer_init(imxtm);
440}
441
442static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
443{
444 struct imx_timer *imxtm;
445 static int initialized;
446 int ret;
447
448 /* Support one instance only */
449 if (initialized)
450 return 0;
451
452 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
453 if (!imxtm)
454 return -ENOMEM;
455
456 imxtm->base = of_iomap(np, 0);
457 if (!imxtm->base)
458 return -ENXIO;
459
460 imxtm->irq = irq_of_parse_and_map(np, 0);
461 if (imxtm->irq <= 0)
462 return -EINVAL;
463
464 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
465
466 /* Try osc_per first, and fall back to per otherwise */
467 imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
468 if (IS_ERR(imxtm->clk_per))
469 imxtm->clk_per = of_clk_get_by_name(np, "per");
470
471 imxtm->type = type;
472
473 ret = _mxc_timer_init(imxtm);
474 if (ret)
475 return ret;
476
477 initialized = 1;
478
479 return 0;
480}
481
482static int __init imx1_timer_init_dt(struct device_node *np)
483{
484 return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
485}
486
487static int __init imx21_timer_init_dt(struct device_node *np)
488{
489 return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
490}
491
492static int __init imx31_timer_init_dt(struct device_node *np)
493{
494 enum imx_gpt_type type = GPT_TYPE_IMX31;
495
496 /*
497 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
498 * GPT device, while they actually have different programming model.
499 * This is a workaround to keep the existing i.MX6DL/S DTBs continue
500 * working with the new kernel.
501 */
502 if (of_machine_is_compatible("fsl,imx6dl"))
503 type = GPT_TYPE_IMX6DL;
504
505 return mxc_timer_init_dt(np, type);
506}
507
508static int __init imx6dl_timer_init_dt(struct device_node *np)
509{
510 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
511}
512
513TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
514TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
515TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
516TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
517TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
518TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
519TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
520TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
521TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
522TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
523TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
524TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (C) 2000-2001 Deep Blue Solutions
4// Copyright (C) 2002 Shane Nay (shane@minirl.com)
5// Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
6// Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
7
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/clockchips.h>
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/err.h>
14#include <linux/sched_clock.h>
15#include <linux/slab.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <soc/imx/timer.h>
20
21/*
22 * There are 4 versions of the timer hardware on Freescale MXC hardware.
23 * - MX1/MXL
24 * - MX21, MX27.
25 * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
26 * - MX6DL, MX6SX, MX6Q(rev1.1+)
27 */
28
29/* defines common for all i.MX */
30#define MXC_TCTL 0x00
31#define MXC_TCTL_TEN (1 << 0) /* Enable module */
32#define MXC_TPRER 0x04
33
34/* MX1, MX21, MX27 */
35#define MX1_2_TCTL_CLK_PCLK1 (1 << 1)
36#define MX1_2_TCTL_IRQEN (1 << 4)
37#define MX1_2_TCTL_FRR (1 << 8)
38#define MX1_2_TCMP 0x08
39#define MX1_2_TCN 0x10
40#define MX1_2_TSTAT 0x14
41
42/* MX21, MX27 */
43#define MX2_TSTAT_CAPT (1 << 1)
44#define MX2_TSTAT_COMP (1 << 0)
45
46/* MX31, MX35, MX25, MX5, MX6 */
47#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
48#define V2_TCTL_CLK_IPG (1 << 6)
49#define V2_TCTL_CLK_PER (2 << 6)
50#define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
51#define V2_TCTL_FRR (1 << 9)
52#define V2_TCTL_24MEN (1 << 10)
53#define V2_TPRER_PRE24M 12
54#define V2_IR 0x0c
55#define V2_TSTAT 0x08
56#define V2_TSTAT_OF1 (1 << 0)
57#define V2_TCN 0x24
58#define V2_TCMP 0x10
59
60#define V2_TIMER_RATE_OSC_DIV8 3000000
61
62struct imx_timer {
63 enum imx_gpt_type type;
64 void __iomem *base;
65 int irq;
66 struct clk *clk_per;
67 struct clk *clk_ipg;
68 const struct imx_gpt_data *gpt;
69 struct clock_event_device ced;
70 struct irqaction act;
71};
72
73struct imx_gpt_data {
74 int reg_tstat;
75 int reg_tcn;
76 int reg_tcmp;
77 void (*gpt_setup_tctl)(struct imx_timer *imxtm);
78 void (*gpt_irq_enable)(struct imx_timer *imxtm);
79 void (*gpt_irq_disable)(struct imx_timer *imxtm);
80 void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
81 int (*set_next_event)(unsigned long evt,
82 struct clock_event_device *ced);
83};
84
85static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
86{
87 return container_of(ced, struct imx_timer, ced);
88}
89
90static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
91{
92 unsigned int tmp;
93
94 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
95 writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
96}
97#define imx21_gpt_irq_disable imx1_gpt_irq_disable
98
99static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
100{
101 writel_relaxed(0, imxtm->base + V2_IR);
102}
103#define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
104
105static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
106{
107 unsigned int tmp;
108
109 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
110 writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
111}
112#define imx21_gpt_irq_enable imx1_gpt_irq_enable
113
114static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
115{
116 writel_relaxed(1<<0, imxtm->base + V2_IR);
117}
118#define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
119
120static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
121{
122 writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
123}
124
125static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
126{
127 writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
128 imxtm->base + MX1_2_TSTAT);
129}
130
131static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
132{
133 writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
134}
135#define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
136
137static void __iomem *sched_clock_reg;
138
139static u64 notrace mxc_read_sched_clock(void)
140{
141 return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
142}
143
144#if defined(CONFIG_ARM)
145static struct delay_timer imx_delay_timer;
146
147static unsigned long imx_read_current_timer(void)
148{
149 return readl_relaxed(sched_clock_reg);
150}
151#endif
152
153static int __init mxc_clocksource_init(struct imx_timer *imxtm)
154{
155 unsigned int c = clk_get_rate(imxtm->clk_per);
156 void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
157
158#if defined(CONFIG_ARM)
159 imx_delay_timer.read_current_timer = &imx_read_current_timer;
160 imx_delay_timer.freq = c;
161 register_current_timer_delay(&imx_delay_timer);
162#endif
163
164 sched_clock_reg = reg;
165
166 sched_clock_register(mxc_read_sched_clock, 32, c);
167 return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
168 clocksource_mmio_readl_up);
169}
170
171/* clock event */
172
173static int mx1_2_set_next_event(unsigned long evt,
174 struct clock_event_device *ced)
175{
176 struct imx_timer *imxtm = to_imx_timer(ced);
177 unsigned long tcmp;
178
179 tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
180
181 writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
182
183 return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
184 -ETIME : 0;
185}
186
187static int v2_set_next_event(unsigned long evt,
188 struct clock_event_device *ced)
189{
190 struct imx_timer *imxtm = to_imx_timer(ced);
191 unsigned long tcmp;
192
193 tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
194
195 writel_relaxed(tcmp, imxtm->base + V2_TCMP);
196
197 return evt < 0x7fffffff &&
198 (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
199 -ETIME : 0;
200}
201
202static int mxc_shutdown(struct clock_event_device *ced)
203{
204 struct imx_timer *imxtm = to_imx_timer(ced);
205 u32 tcn;
206
207 /* Disable interrupt in GPT module */
208 imxtm->gpt->gpt_irq_disable(imxtm);
209
210 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
211 /* Set event time into far-far future */
212 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
213
214 /* Clear pending interrupt */
215 imxtm->gpt->gpt_irq_acknowledge(imxtm);
216
217#ifdef DEBUG
218 printk(KERN_INFO "%s: changing mode\n", __func__);
219#endif /* DEBUG */
220
221 return 0;
222}
223
224static int mxc_set_oneshot(struct clock_event_device *ced)
225{
226 struct imx_timer *imxtm = to_imx_timer(ced);
227
228 /* Disable interrupt in GPT module */
229 imxtm->gpt->gpt_irq_disable(imxtm);
230
231 if (!clockevent_state_oneshot(ced)) {
232 u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
233 /* Set event time into far-far future */
234 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
235
236 /* Clear pending interrupt */
237 imxtm->gpt->gpt_irq_acknowledge(imxtm);
238 }
239
240#ifdef DEBUG
241 printk(KERN_INFO "%s: changing mode\n", __func__);
242#endif /* DEBUG */
243
244 /*
245 * Do not put overhead of interrupt enable/disable into
246 * mxc_set_next_event(), the core has about 4 minutes
247 * to call mxc_set_next_event() or shutdown clock after
248 * mode switching
249 */
250 imxtm->gpt->gpt_irq_enable(imxtm);
251
252 return 0;
253}
254
255/*
256 * IRQ handler for the timer
257 */
258static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
259{
260 struct clock_event_device *ced = dev_id;
261 struct imx_timer *imxtm = to_imx_timer(ced);
262 uint32_t tstat;
263
264 tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
265
266 imxtm->gpt->gpt_irq_acknowledge(imxtm);
267
268 ced->event_handler(ced);
269
270 return IRQ_HANDLED;
271}
272
273static int __init mxc_clockevent_init(struct imx_timer *imxtm)
274{
275 struct clock_event_device *ced = &imxtm->ced;
276 struct irqaction *act = &imxtm->act;
277
278 ced->name = "mxc_timer1";
279 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
280 ced->set_state_shutdown = mxc_shutdown;
281 ced->set_state_oneshot = mxc_set_oneshot;
282 ced->tick_resume = mxc_shutdown;
283 ced->set_next_event = imxtm->gpt->set_next_event;
284 ced->rating = 200;
285 ced->cpumask = cpumask_of(0);
286 ced->irq = imxtm->irq;
287 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
288 0xff, 0xfffffffe);
289
290 act->name = "i.MX Timer Tick";
291 act->flags = IRQF_TIMER | IRQF_IRQPOLL;
292 act->handler = mxc_timer_interrupt;
293 act->dev_id = ced;
294
295 return setup_irq(imxtm->irq, act);
296}
297
298static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
299{
300 u32 tctl_val;
301
302 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
303 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
304}
305#define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
306
307static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
308{
309 u32 tctl_val;
310
311 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
312 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
313 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
314 else
315 tctl_val |= V2_TCTL_CLK_PER;
316
317 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
318}
319
320static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
321{
322 u32 tctl_val;
323
324 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
325 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
326 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
327 /* 24 / 8 = 3 MHz */
328 writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
329 tctl_val |= V2_TCTL_24MEN;
330 } else {
331 tctl_val |= V2_TCTL_CLK_PER;
332 }
333
334 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
335}
336
337static const struct imx_gpt_data imx1_gpt_data = {
338 .reg_tstat = MX1_2_TSTAT,
339 .reg_tcn = MX1_2_TCN,
340 .reg_tcmp = MX1_2_TCMP,
341 .gpt_irq_enable = imx1_gpt_irq_enable,
342 .gpt_irq_disable = imx1_gpt_irq_disable,
343 .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
344 .gpt_setup_tctl = imx1_gpt_setup_tctl,
345 .set_next_event = mx1_2_set_next_event,
346};
347
348static const struct imx_gpt_data imx21_gpt_data = {
349 .reg_tstat = MX1_2_TSTAT,
350 .reg_tcn = MX1_2_TCN,
351 .reg_tcmp = MX1_2_TCMP,
352 .gpt_irq_enable = imx21_gpt_irq_enable,
353 .gpt_irq_disable = imx21_gpt_irq_disable,
354 .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
355 .gpt_setup_tctl = imx21_gpt_setup_tctl,
356 .set_next_event = mx1_2_set_next_event,
357};
358
359static const struct imx_gpt_data imx31_gpt_data = {
360 .reg_tstat = V2_TSTAT,
361 .reg_tcn = V2_TCN,
362 .reg_tcmp = V2_TCMP,
363 .gpt_irq_enable = imx31_gpt_irq_enable,
364 .gpt_irq_disable = imx31_gpt_irq_disable,
365 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
366 .gpt_setup_tctl = imx31_gpt_setup_tctl,
367 .set_next_event = v2_set_next_event,
368};
369
370static const struct imx_gpt_data imx6dl_gpt_data = {
371 .reg_tstat = V2_TSTAT,
372 .reg_tcn = V2_TCN,
373 .reg_tcmp = V2_TCMP,
374 .gpt_irq_enable = imx6dl_gpt_irq_enable,
375 .gpt_irq_disable = imx6dl_gpt_irq_disable,
376 .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge,
377 .gpt_setup_tctl = imx6dl_gpt_setup_tctl,
378 .set_next_event = v2_set_next_event,
379};
380
381static int __init _mxc_timer_init(struct imx_timer *imxtm)
382{
383 int ret;
384
385 switch (imxtm->type) {
386 case GPT_TYPE_IMX1:
387 imxtm->gpt = &imx1_gpt_data;
388 break;
389 case GPT_TYPE_IMX21:
390 imxtm->gpt = &imx21_gpt_data;
391 break;
392 case GPT_TYPE_IMX31:
393 imxtm->gpt = &imx31_gpt_data;
394 break;
395 case GPT_TYPE_IMX6DL:
396 imxtm->gpt = &imx6dl_gpt_data;
397 break;
398 default:
399 return -EINVAL;
400 }
401
402 if (IS_ERR(imxtm->clk_per)) {
403 pr_err("i.MX timer: unable to get clk\n");
404 return PTR_ERR(imxtm->clk_per);
405 }
406
407 if (!IS_ERR(imxtm->clk_ipg))
408 clk_prepare_enable(imxtm->clk_ipg);
409
410 clk_prepare_enable(imxtm->clk_per);
411
412 /*
413 * Initialise to a known state (all timers off, and timing reset)
414 */
415
416 writel_relaxed(0, imxtm->base + MXC_TCTL);
417 writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */
418
419 imxtm->gpt->gpt_setup_tctl(imxtm);
420
421 /* init and register the timer to the framework */
422 ret = mxc_clocksource_init(imxtm);
423 if (ret)
424 return ret;
425
426 return mxc_clockevent_init(imxtm);
427}
428
429void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
430{
431 struct imx_timer *imxtm;
432
433 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
434 BUG_ON(!imxtm);
435
436 imxtm->clk_per = clk_get_sys("imx-gpt.0", "per");
437 imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
438
439 imxtm->base = ioremap(pbase, SZ_4K);
440 BUG_ON(!imxtm->base);
441
442 imxtm->type = type;
443 imxtm->irq = irq;
444
445 _mxc_timer_init(imxtm);
446}
447
448static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
449{
450 struct imx_timer *imxtm;
451 static int initialized;
452 int ret;
453
454 /* Support one instance only */
455 if (initialized)
456 return 0;
457
458 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
459 if (!imxtm)
460 return -ENOMEM;
461
462 imxtm->base = of_iomap(np, 0);
463 if (!imxtm->base)
464 return -ENXIO;
465
466 imxtm->irq = irq_of_parse_and_map(np, 0);
467 if (imxtm->irq <= 0)
468 return -EINVAL;
469
470 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
471
472 /* Try osc_per first, and fall back to per otherwise */
473 imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
474 if (IS_ERR(imxtm->clk_per))
475 imxtm->clk_per = of_clk_get_by_name(np, "per");
476
477 imxtm->type = type;
478
479 ret = _mxc_timer_init(imxtm);
480 if (ret)
481 return ret;
482
483 initialized = 1;
484
485 return 0;
486}
487
488static int __init imx1_timer_init_dt(struct device_node *np)
489{
490 return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
491}
492
493static int __init imx21_timer_init_dt(struct device_node *np)
494{
495 return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
496}
497
498static int __init imx31_timer_init_dt(struct device_node *np)
499{
500 enum imx_gpt_type type = GPT_TYPE_IMX31;
501
502 /*
503 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
504 * GPT device, while they actually have different programming model.
505 * This is a workaround to keep the existing i.MX6DL/S DTBs continue
506 * working with the new kernel.
507 */
508 if (of_machine_is_compatible("fsl,imx6dl"))
509 type = GPT_TYPE_IMX6DL;
510
511 return mxc_timer_init_dt(np, type);
512}
513
514static int __init imx6dl_timer_init_dt(struct device_node *np)
515{
516 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
517}
518
519TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
520TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
521TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
522TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
523TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
524TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
525TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
526TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
527TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
528TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
529TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
530TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);