Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MPIC timer driver
4 *
5 * Copyright 2013 Freescale Semiconductor, Inc.
6 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
7 * Li Yang <leoli@freescale.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/of.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/syscore_ops.h>
21#include <sysdev/fsl_soc.h>
22#include <asm/io.h>
23
24#include <asm/mpic_timer.h>
25
26#define FSL_GLOBAL_TIMER 0x1
27
28/* Clock Ratio
29 * Divide by 64 0x00000300
30 * Divide by 32 0x00000200
31 * Divide by 16 0x00000100
32 * Divide by 8 0x00000000 (Hardware default div)
33 */
34#define MPIC_TIMER_TCR_CLKDIV 0x00000300
35
36#define MPIC_TIMER_TCR_ROVR_OFFSET 24
37
38#define TIMER_STOP 0x80000000
39#define GTCCR_TOG 0x80000000
40#define TIMERS_PER_GROUP 4
41#define MAX_TICKS (~0U >> 1)
42#define MAX_TICKS_CASCADE (~0U)
43#define TIMER_OFFSET(num) (1 << (TIMERS_PER_GROUP - 1 - num))
44
45struct timer_regs {
46 u32 gtccr;
47 u32 res0[3];
48 u32 gtbcr;
49 u32 res1[3];
50 u32 gtvpr;
51 u32 res2[3];
52 u32 gtdr;
53 u32 res3[3];
54};
55
56struct cascade_priv {
57 u32 tcr_value; /* TCR register: CASC & ROVR value */
58 unsigned int cascade_map; /* cascade map */
59 unsigned int timer_num; /* cascade control timer */
60};
61
62struct timer_group_priv {
63 struct timer_regs __iomem *regs;
64 struct mpic_timer timer[TIMERS_PER_GROUP];
65 struct list_head node;
66 unsigned int timerfreq;
67 unsigned int idle;
68 unsigned int flags;
69 spinlock_t lock;
70 void __iomem *group_tcr;
71};
72
73static struct cascade_priv cascade_timer[] = {
74 /* cascade timer 0 and 1 */
75 {0x1, 0xc, 0x1},
76 /* cascade timer 1 and 2 */
77 {0x2, 0x6, 0x2},
78 /* cascade timer 2 and 3 */
79 {0x4, 0x3, 0x3}
80};
81
82static LIST_HEAD(timer_group_list);
83
84static void convert_ticks_to_time(struct timer_group_priv *priv,
85 const u64 ticks, time64_t *time)
86{
87 *time = (u64)div_u64(ticks, priv->timerfreq);
88}
89
90/* the time set by the user is converted to "ticks" */
91static int convert_time_to_ticks(struct timer_group_priv *priv,
92 time64_t time, u64 *ticks)
93{
94 u64 max_value; /* prevent u64 overflow */
95
96 max_value = div_u64(ULLONG_MAX, priv->timerfreq);
97
98 if (time > max_value)
99 return -EINVAL;
100
101 *ticks = (u64)time * (u64)priv->timerfreq;
102
103 return 0;
104}
105
106/* detect whether there is a cascade timer available */
107static struct mpic_timer *detect_idle_cascade_timer(
108 struct timer_group_priv *priv)
109{
110 struct cascade_priv *casc_priv;
111 unsigned int map;
112 unsigned int array_size = ARRAY_SIZE(cascade_timer);
113 unsigned int num;
114 unsigned int i;
115 unsigned long flags;
116
117 casc_priv = cascade_timer;
118 for (i = 0; i < array_size; i++) {
119 spin_lock_irqsave(&priv->lock, flags);
120 map = casc_priv->cascade_map & priv->idle;
121 if (map == casc_priv->cascade_map) {
122 num = casc_priv->timer_num;
123 priv->timer[num].cascade_handle = casc_priv;
124
125 /* set timer busy */
126 priv->idle &= ~casc_priv->cascade_map;
127 spin_unlock_irqrestore(&priv->lock, flags);
128 return &priv->timer[num];
129 }
130 spin_unlock_irqrestore(&priv->lock, flags);
131 casc_priv++;
132 }
133
134 return NULL;
135}
136
137static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
138 unsigned int num)
139{
140 struct cascade_priv *casc_priv;
141 u32 tcr;
142 u32 tmp_ticks;
143 u32 rem_ticks;
144
145 /* set group tcr reg for cascade */
146 casc_priv = priv->timer[num].cascade_handle;
147 if (!casc_priv)
148 return -EINVAL;
149
150 tcr = casc_priv->tcr_value |
151 (casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
152 setbits32(priv->group_tcr, tcr);
153
154 tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
155
156 out_be32(&priv->regs[num].gtccr, 0);
157 out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
158
159 out_be32(&priv->regs[num - 1].gtccr, 0);
160 out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
161
162 return 0;
163}
164
165static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
166 u64 ticks)
167{
168 struct mpic_timer *allocated_timer;
169
170 /* Two cascade timers: Support the maximum time */
171 const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
172 int ret;
173
174 if (ticks > max_ticks)
175 return NULL;
176
177 /* detect idle timer */
178 allocated_timer = detect_idle_cascade_timer(priv);
179 if (!allocated_timer)
180 return NULL;
181
182 /* set ticks to timer */
183 ret = set_cascade_timer(priv, ticks, allocated_timer->num);
184 if (ret < 0)
185 return NULL;
186
187 return allocated_timer;
188}
189
190static struct mpic_timer *get_timer(time64_t time)
191{
192 struct timer_group_priv *priv;
193 struct mpic_timer *timer;
194
195 u64 ticks;
196 unsigned int num;
197 unsigned int i;
198 unsigned long flags;
199 int ret;
200
201 list_for_each_entry(priv, &timer_group_list, node) {
202 ret = convert_time_to_ticks(priv, time, &ticks);
203 if (ret < 0)
204 return NULL;
205
206 if (ticks > MAX_TICKS) {
207 if (!(priv->flags & FSL_GLOBAL_TIMER))
208 return NULL;
209
210 timer = get_cascade_timer(priv, ticks);
211 if (!timer)
212 continue;
213
214 return timer;
215 }
216
217 for (i = 0; i < TIMERS_PER_GROUP; i++) {
218 /* one timer: Reverse allocation */
219 num = TIMERS_PER_GROUP - 1 - i;
220 spin_lock_irqsave(&priv->lock, flags);
221 if (priv->idle & (1 << i)) {
222 /* set timer busy */
223 priv->idle &= ~(1 << i);
224 /* set ticks & stop timer */
225 out_be32(&priv->regs[num].gtbcr,
226 ticks | TIMER_STOP);
227 out_be32(&priv->regs[num].gtccr, 0);
228 priv->timer[num].cascade_handle = NULL;
229 spin_unlock_irqrestore(&priv->lock, flags);
230 return &priv->timer[num];
231 }
232 spin_unlock_irqrestore(&priv->lock, flags);
233 }
234 }
235
236 return NULL;
237}
238
239/**
240 * mpic_start_timer - start hardware timer
241 * @handle: the timer to be started.
242 *
243 * It will do ->fn(->dev) callback from the hardware interrupt at
244 * the 'time64_t' point in the future.
245 */
246void mpic_start_timer(struct mpic_timer *handle)
247{
248 struct timer_group_priv *priv = container_of(handle,
249 struct timer_group_priv, timer[handle->num]);
250
251 clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
252}
253EXPORT_SYMBOL(mpic_start_timer);
254
255/**
256 * mpic_stop_timer - stop hardware timer
257 * @handle: the timer to be stopped
258 *
259 * The timer periodically generates an interrupt. Unless user stops the timer.
260 */
261void mpic_stop_timer(struct mpic_timer *handle)
262{
263 struct timer_group_priv *priv = container_of(handle,
264 struct timer_group_priv, timer[handle->num]);
265 struct cascade_priv *casc_priv;
266
267 setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
268
269 casc_priv = priv->timer[handle->num].cascade_handle;
270 if (casc_priv) {
271 out_be32(&priv->regs[handle->num].gtccr, 0);
272 out_be32(&priv->regs[handle->num - 1].gtccr, 0);
273 } else {
274 out_be32(&priv->regs[handle->num].gtccr, 0);
275 }
276}
277EXPORT_SYMBOL(mpic_stop_timer);
278
279/**
280 * mpic_get_remain_time - get timer time
281 * @handle: the timer to be selected.
282 * @time: time for timer
283 *
284 * Query timer remaining time.
285 */
286void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
287{
288 struct timer_group_priv *priv = container_of(handle,
289 struct timer_group_priv, timer[handle->num]);
290 struct cascade_priv *casc_priv;
291
292 u64 ticks;
293 u32 tmp_ticks;
294
295 casc_priv = priv->timer[handle->num].cascade_handle;
296 if (casc_priv) {
297 tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
298 tmp_ticks &= ~GTCCR_TOG;
299 ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
300 tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
301 ticks += tmp_ticks;
302 } else {
303 ticks = in_be32(&priv->regs[handle->num].gtccr);
304 ticks &= ~GTCCR_TOG;
305 }
306
307 convert_ticks_to_time(priv, ticks, time);
308}
309EXPORT_SYMBOL(mpic_get_remain_time);
310
311/**
312 * mpic_free_timer - free hardware timer
313 * @handle: the timer to be removed.
314 *
315 * Free the timer.
316 *
317 * Note: can not be used in interrupt context.
318 */
319void mpic_free_timer(struct mpic_timer *handle)
320{
321 struct timer_group_priv *priv = container_of(handle,
322 struct timer_group_priv, timer[handle->num]);
323
324 struct cascade_priv *casc_priv;
325 unsigned long flags;
326
327 mpic_stop_timer(handle);
328
329 casc_priv = priv->timer[handle->num].cascade_handle;
330
331 free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
332
333 spin_lock_irqsave(&priv->lock, flags);
334 if (casc_priv) {
335 u32 tcr;
336 tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
337 MPIC_TIMER_TCR_ROVR_OFFSET);
338 clrbits32(priv->group_tcr, tcr);
339 priv->idle |= casc_priv->cascade_map;
340 priv->timer[handle->num].cascade_handle = NULL;
341 } else {
342 priv->idle |= TIMER_OFFSET(handle->num);
343 }
344 spin_unlock_irqrestore(&priv->lock, flags);
345}
346EXPORT_SYMBOL(mpic_free_timer);
347
348/**
349 * mpic_request_timer - get a hardware timer
350 * @fn: interrupt handler function
351 * @dev: callback function of the data
352 * @time: time for timer
353 *
354 * This executes the "request_irq", returning NULL
355 * else "handle" on success.
356 */
357struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
358 time64_t time)
359{
360 struct mpic_timer *allocated_timer;
361 int ret;
362
363 if (list_empty(&timer_group_list))
364 return NULL;
365
366 if (time < 0)
367 return NULL;
368
369 allocated_timer = get_timer(time);
370 if (!allocated_timer)
371 return NULL;
372
373 ret = request_irq(allocated_timer->irq, fn,
374 IRQF_TRIGGER_LOW, "global-timer", dev);
375 if (ret) {
376 mpic_free_timer(allocated_timer);
377 return NULL;
378 }
379
380 allocated_timer->dev = dev;
381
382 return allocated_timer;
383}
384EXPORT_SYMBOL(mpic_request_timer);
385
386static int __init timer_group_get_freq(struct device_node *np,
387 struct timer_group_priv *priv)
388{
389 u32 div;
390
391 if (priv->flags & FSL_GLOBAL_TIMER) {
392 struct device_node *dn;
393
394 dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
395 if (dn) {
396 of_property_read_u32(dn, "clock-frequency",
397 &priv->timerfreq);
398 of_node_put(dn);
399 }
400 }
401
402 if (priv->timerfreq <= 0)
403 return -EINVAL;
404
405 if (priv->flags & FSL_GLOBAL_TIMER) {
406 div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
407 priv->timerfreq /= div;
408 }
409
410 return 0;
411}
412
413static int __init timer_group_get_irq(struct device_node *np,
414 struct timer_group_priv *priv)
415{
416 const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
417 const u32 *p;
418 u32 offset;
419 u32 count;
420
421 unsigned int i;
422 unsigned int j;
423 unsigned int irq_index = 0;
424 unsigned int irq;
425 int len;
426
427 p = of_get_property(np, "fsl,available-ranges", &len);
428 if (p && len % (2 * sizeof(u32)) != 0) {
429 pr_err("%pOF: malformed available-ranges property.\n", np);
430 return -EINVAL;
431 }
432
433 if (!p) {
434 p = all_timer;
435 len = sizeof(all_timer);
436 }
437
438 len /= 2 * sizeof(u32);
439
440 for (i = 0; i < len; i++) {
441 offset = p[i * 2];
442 count = p[i * 2 + 1];
443 for (j = 0; j < count; j++) {
444 irq = irq_of_parse_and_map(np, irq_index);
445 if (!irq) {
446 pr_err("%pOF: irq parse and map failed.\n", np);
447 return -EINVAL;
448 }
449
450 /* Set timer idle */
451 priv->idle |= TIMER_OFFSET((offset + j));
452 priv->timer[offset + j].irq = irq;
453 priv->timer[offset + j].num = offset + j;
454 irq_index++;
455 }
456 }
457
458 return 0;
459}
460
461static void __init timer_group_init(struct device_node *np)
462{
463 struct timer_group_priv *priv;
464 unsigned int i = 0;
465 int ret;
466
467 priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
468 if (!priv) {
469 pr_err("%pOF: cannot allocate memory for group.\n", np);
470 return;
471 }
472
473 if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
474 priv->flags |= FSL_GLOBAL_TIMER;
475
476 priv->regs = of_iomap(np, i++);
477 if (!priv->regs) {
478 pr_err("%pOF: cannot ioremap timer register address.\n", np);
479 goto out;
480 }
481
482 if (priv->flags & FSL_GLOBAL_TIMER) {
483 priv->group_tcr = of_iomap(np, i++);
484 if (!priv->group_tcr) {
485 pr_err("%pOF: cannot ioremap tcr address.\n", np);
486 goto out;
487 }
488 }
489
490 ret = timer_group_get_freq(np, priv);
491 if (ret < 0) {
492 pr_err("%pOF: cannot get timer frequency.\n", np);
493 goto out;
494 }
495
496 ret = timer_group_get_irq(np, priv);
497 if (ret < 0) {
498 pr_err("%pOF: cannot get timer irqs.\n", np);
499 goto out;
500 }
501
502 spin_lock_init(&priv->lock);
503
504 /* Init FSL timer hardware */
505 if (priv->flags & FSL_GLOBAL_TIMER)
506 setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
507
508 list_add_tail(&priv->node, &timer_group_list);
509
510 return;
511
512out:
513 if (priv->regs)
514 iounmap(priv->regs);
515
516 if (priv->group_tcr)
517 iounmap(priv->group_tcr);
518
519 kfree(priv);
520}
521
522static void mpic_timer_resume(void)
523{
524 struct timer_group_priv *priv;
525
526 list_for_each_entry(priv, &timer_group_list, node) {
527 /* Init FSL timer hardware */
528 if (priv->flags & FSL_GLOBAL_TIMER)
529 setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
530 }
531}
532
533static const struct of_device_id mpic_timer_ids[] = {
534 { .compatible = "fsl,mpic-global-timer", },
535 {},
536};
537
538static struct syscore_ops mpic_timer_syscore_ops = {
539 .resume = mpic_timer_resume,
540};
541
542static int __init mpic_timer_init(void)
543{
544 struct device_node *np = NULL;
545
546 for_each_matching_node(np, mpic_timer_ids)
547 timer_group_init(np);
548
549 register_syscore_ops(&mpic_timer_syscore_ops);
550
551 if (list_empty(&timer_group_list))
552 return -ENODEV;
553
554 return 0;
555}
556subsys_initcall(mpic_timer_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MPIC timer driver
4 *
5 * Copyright 2013 Freescale Semiconductor, Inc.
6 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
7 * Li Yang <leoli@freescale.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/of.h>
18#include <linux/of_address.h>
19#include <linux/of_device.h>
20#include <linux/of_irq.h>
21#include <linux/syscore_ops.h>
22#include <sysdev/fsl_soc.h>
23#include <asm/io.h>
24
25#include <asm/mpic_timer.h>
26
27#define FSL_GLOBAL_TIMER 0x1
28
29/* Clock Ratio
30 * Divide by 64 0x00000300
31 * Divide by 32 0x00000200
32 * Divide by 16 0x00000100
33 * Divide by 8 0x00000000 (Hardware default div)
34 */
35#define MPIC_TIMER_TCR_CLKDIV 0x00000300
36
37#define MPIC_TIMER_TCR_ROVR_OFFSET 24
38
39#define TIMER_STOP 0x80000000
40#define GTCCR_TOG 0x80000000
41#define TIMERS_PER_GROUP 4
42#define MAX_TICKS (~0U >> 1)
43#define MAX_TICKS_CASCADE (~0U)
44#define TIMER_OFFSET(num) (1 << (TIMERS_PER_GROUP - 1 - num))
45
46struct timer_regs {
47 u32 gtccr;
48 u32 res0[3];
49 u32 gtbcr;
50 u32 res1[3];
51 u32 gtvpr;
52 u32 res2[3];
53 u32 gtdr;
54 u32 res3[3];
55};
56
57struct cascade_priv {
58 u32 tcr_value; /* TCR register: CASC & ROVR value */
59 unsigned int cascade_map; /* cascade map */
60 unsigned int timer_num; /* cascade control timer */
61};
62
63struct timer_group_priv {
64 struct timer_regs __iomem *regs;
65 struct mpic_timer timer[TIMERS_PER_GROUP];
66 struct list_head node;
67 unsigned int timerfreq;
68 unsigned int idle;
69 unsigned int flags;
70 spinlock_t lock;
71 void __iomem *group_tcr;
72};
73
74static struct cascade_priv cascade_timer[] = {
75 /* cascade timer 0 and 1 */
76 {0x1, 0xc, 0x1},
77 /* cascade timer 1 and 2 */
78 {0x2, 0x6, 0x2},
79 /* cascade timer 2 and 3 */
80 {0x4, 0x3, 0x3}
81};
82
83static LIST_HEAD(timer_group_list);
84
85static void convert_ticks_to_time(struct timer_group_priv *priv,
86 const u64 ticks, time64_t *time)
87{
88 *time = (u64)div_u64(ticks, priv->timerfreq);
89}
90
91/* the time set by the user is converted to "ticks" */
92static int convert_time_to_ticks(struct timer_group_priv *priv,
93 time64_t time, u64 *ticks)
94{
95 u64 max_value; /* prevent u64 overflow */
96
97 max_value = div_u64(ULLONG_MAX, priv->timerfreq);
98
99 if (time > max_value)
100 return -EINVAL;
101
102 *ticks = (u64)time * (u64)priv->timerfreq;
103
104 return 0;
105}
106
107/* detect whether there is a cascade timer available */
108static struct mpic_timer *detect_idle_cascade_timer(
109 struct timer_group_priv *priv)
110{
111 struct cascade_priv *casc_priv;
112 unsigned int map;
113 unsigned int array_size = ARRAY_SIZE(cascade_timer);
114 unsigned int num;
115 unsigned int i;
116 unsigned long flags;
117
118 casc_priv = cascade_timer;
119 for (i = 0; i < array_size; i++) {
120 spin_lock_irqsave(&priv->lock, flags);
121 map = casc_priv->cascade_map & priv->idle;
122 if (map == casc_priv->cascade_map) {
123 num = casc_priv->timer_num;
124 priv->timer[num].cascade_handle = casc_priv;
125
126 /* set timer busy */
127 priv->idle &= ~casc_priv->cascade_map;
128 spin_unlock_irqrestore(&priv->lock, flags);
129 return &priv->timer[num];
130 }
131 spin_unlock_irqrestore(&priv->lock, flags);
132 casc_priv++;
133 }
134
135 return NULL;
136}
137
138static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
139 unsigned int num)
140{
141 struct cascade_priv *casc_priv;
142 u32 tcr;
143 u32 tmp_ticks;
144 u32 rem_ticks;
145
146 /* set group tcr reg for cascade */
147 casc_priv = priv->timer[num].cascade_handle;
148 if (!casc_priv)
149 return -EINVAL;
150
151 tcr = casc_priv->tcr_value |
152 (casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
153 setbits32(priv->group_tcr, tcr);
154
155 tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
156
157 out_be32(&priv->regs[num].gtccr, 0);
158 out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
159
160 out_be32(&priv->regs[num - 1].gtccr, 0);
161 out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
162
163 return 0;
164}
165
166static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
167 u64 ticks)
168{
169 struct mpic_timer *allocated_timer;
170
171 /* Two cascade timers: Support the maximum time */
172 const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
173 int ret;
174
175 if (ticks > max_ticks)
176 return NULL;
177
178 /* detect idle timer */
179 allocated_timer = detect_idle_cascade_timer(priv);
180 if (!allocated_timer)
181 return NULL;
182
183 /* set ticks to timer */
184 ret = set_cascade_timer(priv, ticks, allocated_timer->num);
185 if (ret < 0)
186 return NULL;
187
188 return allocated_timer;
189}
190
191static struct mpic_timer *get_timer(time64_t time)
192{
193 struct timer_group_priv *priv;
194 struct mpic_timer *timer;
195
196 u64 ticks;
197 unsigned int num;
198 unsigned int i;
199 unsigned long flags;
200 int ret;
201
202 list_for_each_entry(priv, &timer_group_list, node) {
203 ret = convert_time_to_ticks(priv, time, &ticks);
204 if (ret < 0)
205 return NULL;
206
207 if (ticks > MAX_TICKS) {
208 if (!(priv->flags & FSL_GLOBAL_TIMER))
209 return NULL;
210
211 timer = get_cascade_timer(priv, ticks);
212 if (!timer)
213 continue;
214
215 return timer;
216 }
217
218 for (i = 0; i < TIMERS_PER_GROUP; i++) {
219 /* one timer: Reverse allocation */
220 num = TIMERS_PER_GROUP - 1 - i;
221 spin_lock_irqsave(&priv->lock, flags);
222 if (priv->idle & (1 << i)) {
223 /* set timer busy */
224 priv->idle &= ~(1 << i);
225 /* set ticks & stop timer */
226 out_be32(&priv->regs[num].gtbcr,
227 ticks | TIMER_STOP);
228 out_be32(&priv->regs[num].gtccr, 0);
229 priv->timer[num].cascade_handle = NULL;
230 spin_unlock_irqrestore(&priv->lock, flags);
231 return &priv->timer[num];
232 }
233 spin_unlock_irqrestore(&priv->lock, flags);
234 }
235 }
236
237 return NULL;
238}
239
240/**
241 * mpic_start_timer - start hardware timer
242 * @handle: the timer to be started.
243 *
244 * It will do ->fn(->dev) callback from the hardware interrupt at
245 * the 'time64_t' point in the future.
246 */
247void mpic_start_timer(struct mpic_timer *handle)
248{
249 struct timer_group_priv *priv = container_of(handle,
250 struct timer_group_priv, timer[handle->num]);
251
252 clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
253}
254EXPORT_SYMBOL(mpic_start_timer);
255
256/**
257 * mpic_stop_timer - stop hardware timer
258 * @handle: the timer to be stoped
259 *
260 * The timer periodically generates an interrupt. Unless user stops the timer.
261 */
262void mpic_stop_timer(struct mpic_timer *handle)
263{
264 struct timer_group_priv *priv = container_of(handle,
265 struct timer_group_priv, timer[handle->num]);
266 struct cascade_priv *casc_priv;
267
268 setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
269
270 casc_priv = priv->timer[handle->num].cascade_handle;
271 if (casc_priv) {
272 out_be32(&priv->regs[handle->num].gtccr, 0);
273 out_be32(&priv->regs[handle->num - 1].gtccr, 0);
274 } else {
275 out_be32(&priv->regs[handle->num].gtccr, 0);
276 }
277}
278EXPORT_SYMBOL(mpic_stop_timer);
279
280/**
281 * mpic_get_remain_time - get timer time
282 * @handle: the timer to be selected.
283 * @time: time for timer
284 *
285 * Query timer remaining time.
286 */
287void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
288{
289 struct timer_group_priv *priv = container_of(handle,
290 struct timer_group_priv, timer[handle->num]);
291 struct cascade_priv *casc_priv;
292
293 u64 ticks;
294 u32 tmp_ticks;
295
296 casc_priv = priv->timer[handle->num].cascade_handle;
297 if (casc_priv) {
298 tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
299 tmp_ticks &= ~GTCCR_TOG;
300 ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
301 tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
302 ticks += tmp_ticks;
303 } else {
304 ticks = in_be32(&priv->regs[handle->num].gtccr);
305 ticks &= ~GTCCR_TOG;
306 }
307
308 convert_ticks_to_time(priv, ticks, time);
309}
310EXPORT_SYMBOL(mpic_get_remain_time);
311
312/**
313 * mpic_free_timer - free hardware timer
314 * @handle: the timer to be removed.
315 *
316 * Free the timer.
317 *
318 * Note: can not be used in interrupt context.
319 */
320void mpic_free_timer(struct mpic_timer *handle)
321{
322 struct timer_group_priv *priv = container_of(handle,
323 struct timer_group_priv, timer[handle->num]);
324
325 struct cascade_priv *casc_priv;
326 unsigned long flags;
327
328 mpic_stop_timer(handle);
329
330 casc_priv = priv->timer[handle->num].cascade_handle;
331
332 free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
333
334 spin_lock_irqsave(&priv->lock, flags);
335 if (casc_priv) {
336 u32 tcr;
337 tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
338 MPIC_TIMER_TCR_ROVR_OFFSET);
339 clrbits32(priv->group_tcr, tcr);
340 priv->idle |= casc_priv->cascade_map;
341 priv->timer[handle->num].cascade_handle = NULL;
342 } else {
343 priv->idle |= TIMER_OFFSET(handle->num);
344 }
345 spin_unlock_irqrestore(&priv->lock, flags);
346}
347EXPORT_SYMBOL(mpic_free_timer);
348
349/**
350 * mpic_request_timer - get a hardware timer
351 * @fn: interrupt handler function
352 * @dev: callback function of the data
353 * @time: time for timer
354 *
355 * This executes the "request_irq", returning NULL
356 * else "handle" on success.
357 */
358struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
359 time64_t time)
360{
361 struct mpic_timer *allocated_timer;
362 int ret;
363
364 if (list_empty(&timer_group_list))
365 return NULL;
366
367 if (time < 0)
368 return NULL;
369
370 allocated_timer = get_timer(time);
371 if (!allocated_timer)
372 return NULL;
373
374 ret = request_irq(allocated_timer->irq, fn,
375 IRQF_TRIGGER_LOW, "global-timer", dev);
376 if (ret) {
377 mpic_free_timer(allocated_timer);
378 return NULL;
379 }
380
381 allocated_timer->dev = dev;
382
383 return allocated_timer;
384}
385EXPORT_SYMBOL(mpic_request_timer);
386
387static int timer_group_get_freq(struct device_node *np,
388 struct timer_group_priv *priv)
389{
390 u32 div;
391
392 if (priv->flags & FSL_GLOBAL_TIMER) {
393 struct device_node *dn;
394
395 dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
396 if (dn) {
397 of_property_read_u32(dn, "clock-frequency",
398 &priv->timerfreq);
399 of_node_put(dn);
400 }
401 }
402
403 if (priv->timerfreq <= 0)
404 return -EINVAL;
405
406 if (priv->flags & FSL_GLOBAL_TIMER) {
407 div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
408 priv->timerfreq /= div;
409 }
410
411 return 0;
412}
413
414static int timer_group_get_irq(struct device_node *np,
415 struct timer_group_priv *priv)
416{
417 const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
418 const u32 *p;
419 u32 offset;
420 u32 count;
421
422 unsigned int i;
423 unsigned int j;
424 unsigned int irq_index = 0;
425 unsigned int irq;
426 int len;
427
428 p = of_get_property(np, "fsl,available-ranges", &len);
429 if (p && len % (2 * sizeof(u32)) != 0) {
430 pr_err("%pOF: malformed available-ranges property.\n", np);
431 return -EINVAL;
432 }
433
434 if (!p) {
435 p = all_timer;
436 len = sizeof(all_timer);
437 }
438
439 len /= 2 * sizeof(u32);
440
441 for (i = 0; i < len; i++) {
442 offset = p[i * 2];
443 count = p[i * 2 + 1];
444 for (j = 0; j < count; j++) {
445 irq = irq_of_parse_and_map(np, irq_index);
446 if (!irq) {
447 pr_err("%pOF: irq parse and map failed.\n", np);
448 return -EINVAL;
449 }
450
451 /* Set timer idle */
452 priv->idle |= TIMER_OFFSET((offset + j));
453 priv->timer[offset + j].irq = irq;
454 priv->timer[offset + j].num = offset + j;
455 irq_index++;
456 }
457 }
458
459 return 0;
460}
461
462static void timer_group_init(struct device_node *np)
463{
464 struct timer_group_priv *priv;
465 unsigned int i = 0;
466 int ret;
467
468 priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
469 if (!priv) {
470 pr_err("%pOF: cannot allocate memory for group.\n", np);
471 return;
472 }
473
474 if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
475 priv->flags |= FSL_GLOBAL_TIMER;
476
477 priv->regs = of_iomap(np, i++);
478 if (!priv->regs) {
479 pr_err("%pOF: cannot ioremap timer register address.\n", np);
480 goto out;
481 }
482
483 if (priv->flags & FSL_GLOBAL_TIMER) {
484 priv->group_tcr = of_iomap(np, i++);
485 if (!priv->group_tcr) {
486 pr_err("%pOF: cannot ioremap tcr address.\n", np);
487 goto out;
488 }
489 }
490
491 ret = timer_group_get_freq(np, priv);
492 if (ret < 0) {
493 pr_err("%pOF: cannot get timer frequency.\n", np);
494 goto out;
495 }
496
497 ret = timer_group_get_irq(np, priv);
498 if (ret < 0) {
499 pr_err("%pOF: cannot get timer irqs.\n", np);
500 goto out;
501 }
502
503 spin_lock_init(&priv->lock);
504
505 /* Init FSL timer hardware */
506 if (priv->flags & FSL_GLOBAL_TIMER)
507 setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
508
509 list_add_tail(&priv->node, &timer_group_list);
510
511 return;
512
513out:
514 if (priv->regs)
515 iounmap(priv->regs);
516
517 if (priv->group_tcr)
518 iounmap(priv->group_tcr);
519
520 kfree(priv);
521}
522
523static void mpic_timer_resume(void)
524{
525 struct timer_group_priv *priv;
526
527 list_for_each_entry(priv, &timer_group_list, node) {
528 /* Init FSL timer hardware */
529 if (priv->flags & FSL_GLOBAL_TIMER)
530 setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
531 }
532}
533
534static const struct of_device_id mpic_timer_ids[] = {
535 { .compatible = "fsl,mpic-global-timer", },
536 {},
537};
538
539static struct syscore_ops mpic_timer_syscore_ops = {
540 .resume = mpic_timer_resume,
541};
542
543static int __init mpic_timer_init(void)
544{
545 struct device_node *np = NULL;
546
547 for_each_matching_node(np, mpic_timer_ids)
548 timer_group_init(np);
549
550 register_syscore_ops(&mpic_timer_syscore_ops);
551
552 if (list_empty(&timer_group_list))
553 return -ENODEV;
554
555 return 0;
556}
557subsys_initcall(mpic_timer_init);