Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * MPIC timer driver
  4 *
  5 * Copyright 2013 Freescale Semiconductor, Inc.
  6 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
  7 *	   Li Yang <leoli@freescale.com>
 
 
 
 
 
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/errno.h>
 14#include <linux/mm.h>
 15#include <linux/interrupt.h>
 16#include <linux/slab.h>
 17#include <linux/of.h>
 18#include <linux/of_address.h>
 
 19#include <linux/of_irq.h>
 20#include <linux/syscore_ops.h>
 21#include <sysdev/fsl_soc.h>
 22#include <asm/io.h>
 23
 24#include <asm/mpic_timer.h>
 25
 26#define FSL_GLOBAL_TIMER		0x1
 27
 28/* Clock Ratio
 29 * Divide by 64 0x00000300
 30 * Divide by 32 0x00000200
 31 * Divide by 16 0x00000100
 32 * Divide by  8 0x00000000 (Hardware default div)
 33 */
 34#define MPIC_TIMER_TCR_CLKDIV		0x00000300
 35
 36#define MPIC_TIMER_TCR_ROVR_OFFSET	24
 37
 38#define TIMER_STOP			0x80000000
 39#define GTCCR_TOG			0x80000000
 40#define TIMERS_PER_GROUP		4
 41#define MAX_TICKS			(~0U >> 1)
 42#define MAX_TICKS_CASCADE		(~0U)
 43#define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
 44
 
 
 
 45struct timer_regs {
 46	u32	gtccr;
 47	u32	res0[3];
 48	u32	gtbcr;
 49	u32	res1[3];
 50	u32	gtvpr;
 51	u32	res2[3];
 52	u32	gtdr;
 53	u32	res3[3];
 54};
 55
 56struct cascade_priv {
 57	u32 tcr_value;			/* TCR register: CASC & ROVR value */
 58	unsigned int cascade_map;	/* cascade map */
 59	unsigned int timer_num;		/* cascade control timer */
 60};
 61
 62struct timer_group_priv {
 63	struct timer_regs __iomem	*regs;
 64	struct mpic_timer		timer[TIMERS_PER_GROUP];
 65	struct list_head		node;
 66	unsigned int			timerfreq;
 67	unsigned int			idle;
 68	unsigned int			flags;
 69	spinlock_t			lock;
 70	void __iomem			*group_tcr;
 71};
 72
 73static struct cascade_priv cascade_timer[] = {
 74	/* cascade timer 0 and 1 */
 75	{0x1, 0xc, 0x1},
 76	/* cascade timer 1 and 2 */
 77	{0x2, 0x6, 0x2},
 78	/* cascade timer 2 and 3 */
 79	{0x4, 0x3, 0x3}
 80};
 81
 82static LIST_HEAD(timer_group_list);
 83
 84static void convert_ticks_to_time(struct timer_group_priv *priv,
 85		const u64 ticks, time64_t *time)
 86{
 87	*time = (u64)div_u64(ticks, priv->timerfreq);
 
 
 
 
 
 
 
 
 
 
 
 88}
 89
 90/* the time set by the user is converted to "ticks" */
 91static int convert_time_to_ticks(struct timer_group_priv *priv,
 92		time64_t time, u64 *ticks)
 93{
 94	u64 max_value;		/* prevent u64 overflow */
 
 
 
 
 
 95
 96	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
 97
 98	if (time > max_value)
 
 99		return -EINVAL;
100
101	*ticks = (u64)time * (u64)priv->timerfreq;
 
 
 
 
 
 
 
 
 
 
 
102
103	return 0;
104}
105
106/* detect whether there is a cascade timer available */
107static struct mpic_timer *detect_idle_cascade_timer(
108					struct timer_group_priv *priv)
109{
110	struct cascade_priv *casc_priv;
111	unsigned int map;
112	unsigned int array_size = ARRAY_SIZE(cascade_timer);
113	unsigned int num;
114	unsigned int i;
115	unsigned long flags;
116
117	casc_priv = cascade_timer;
118	for (i = 0; i < array_size; i++) {
119		spin_lock_irqsave(&priv->lock, flags);
120		map = casc_priv->cascade_map & priv->idle;
121		if (map == casc_priv->cascade_map) {
122			num = casc_priv->timer_num;
123			priv->timer[num].cascade_handle = casc_priv;
124
125			/* set timer busy */
126			priv->idle &= ~casc_priv->cascade_map;
127			spin_unlock_irqrestore(&priv->lock, flags);
128			return &priv->timer[num];
129		}
130		spin_unlock_irqrestore(&priv->lock, flags);
131		casc_priv++;
132	}
133
134	return NULL;
135}
136
137static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
138		unsigned int num)
139{
140	struct cascade_priv *casc_priv;
141	u32 tcr;
142	u32 tmp_ticks;
143	u32 rem_ticks;
144
145	/* set group tcr reg for cascade */
146	casc_priv = priv->timer[num].cascade_handle;
147	if (!casc_priv)
148		return -EINVAL;
149
150	tcr = casc_priv->tcr_value |
151		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
152	setbits32(priv->group_tcr, tcr);
153
154	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
155
156	out_be32(&priv->regs[num].gtccr, 0);
157	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
158
159	out_be32(&priv->regs[num - 1].gtccr, 0);
160	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
161
162	return 0;
163}
164
165static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
166					u64 ticks)
167{
168	struct mpic_timer *allocated_timer;
169
170	/* Two cascade timers: Support the maximum time */
171	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
172	int ret;
173
174	if (ticks > max_ticks)
175		return NULL;
176
177	/* detect idle timer */
178	allocated_timer = detect_idle_cascade_timer(priv);
179	if (!allocated_timer)
180		return NULL;
181
182	/* set ticks to timer */
183	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
184	if (ret < 0)
185		return NULL;
186
187	return allocated_timer;
188}
189
190static struct mpic_timer *get_timer(time64_t time)
191{
192	struct timer_group_priv *priv;
193	struct mpic_timer *timer;
194
195	u64 ticks;
196	unsigned int num;
197	unsigned int i;
198	unsigned long flags;
199	int ret;
200
201	list_for_each_entry(priv, &timer_group_list, node) {
202		ret = convert_time_to_ticks(priv, time, &ticks);
203		if (ret < 0)
204			return NULL;
205
206		if (ticks > MAX_TICKS) {
207			if (!(priv->flags & FSL_GLOBAL_TIMER))
208				return NULL;
209
210			timer = get_cascade_timer(priv, ticks);
211			if (!timer)
212				continue;
213
214			return timer;
215		}
216
217		for (i = 0; i < TIMERS_PER_GROUP; i++) {
218			/* one timer: Reverse allocation */
219			num = TIMERS_PER_GROUP - 1 - i;
220			spin_lock_irqsave(&priv->lock, flags);
221			if (priv->idle & (1 << i)) {
222				/* set timer busy */
223				priv->idle &= ~(1 << i);
224				/* set ticks & stop timer */
225				out_be32(&priv->regs[num].gtbcr,
226					ticks | TIMER_STOP);
227				out_be32(&priv->regs[num].gtccr, 0);
228				priv->timer[num].cascade_handle = NULL;
229				spin_unlock_irqrestore(&priv->lock, flags);
230				return &priv->timer[num];
231			}
232			spin_unlock_irqrestore(&priv->lock, flags);
233		}
234	}
235
236	return NULL;
237}
238
239/**
240 * mpic_start_timer - start hardware timer
241 * @handle: the timer to be started.
242 *
243 * It will do ->fn(->dev) callback from the hardware interrupt at
244 * the 'time64_t' point in the future.
245 */
246void mpic_start_timer(struct mpic_timer *handle)
247{
248	struct timer_group_priv *priv = container_of(handle,
249			struct timer_group_priv, timer[handle->num]);
250
251	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
252}
253EXPORT_SYMBOL(mpic_start_timer);
254
255/**
256 * mpic_stop_timer - stop hardware timer
257 * @handle: the timer to be stopped
258 *
259 * The timer periodically generates an interrupt. Unless user stops the timer.
260 */
261void mpic_stop_timer(struct mpic_timer *handle)
262{
263	struct timer_group_priv *priv = container_of(handle,
264			struct timer_group_priv, timer[handle->num]);
265	struct cascade_priv *casc_priv;
266
267	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
268
269	casc_priv = priv->timer[handle->num].cascade_handle;
270	if (casc_priv) {
271		out_be32(&priv->regs[handle->num].gtccr, 0);
272		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
273	} else {
274		out_be32(&priv->regs[handle->num].gtccr, 0);
275	}
276}
277EXPORT_SYMBOL(mpic_stop_timer);
278
279/**
280 * mpic_get_remain_time - get timer time
281 * @handle: the timer to be selected.
282 * @time: time for timer
283 *
284 * Query timer remaining time.
285 */
286void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
287{
288	struct timer_group_priv *priv = container_of(handle,
289			struct timer_group_priv, timer[handle->num]);
290	struct cascade_priv *casc_priv;
291
292	u64 ticks;
293	u32 tmp_ticks;
294
295	casc_priv = priv->timer[handle->num].cascade_handle;
296	if (casc_priv) {
297		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
298		tmp_ticks &= ~GTCCR_TOG;
299		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
300		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
301		ticks += tmp_ticks;
302	} else {
303		ticks = in_be32(&priv->regs[handle->num].gtccr);
304		ticks &= ~GTCCR_TOG;
305	}
306
307	convert_ticks_to_time(priv, ticks, time);
308}
309EXPORT_SYMBOL(mpic_get_remain_time);
310
311/**
312 * mpic_free_timer - free hardware timer
313 * @handle: the timer to be removed.
314 *
315 * Free the timer.
316 *
317 * Note: can not be used in interrupt context.
318 */
319void mpic_free_timer(struct mpic_timer *handle)
320{
321	struct timer_group_priv *priv = container_of(handle,
322			struct timer_group_priv, timer[handle->num]);
323
324	struct cascade_priv *casc_priv;
325	unsigned long flags;
326
327	mpic_stop_timer(handle);
328
329	casc_priv = priv->timer[handle->num].cascade_handle;
330
331	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
332
333	spin_lock_irqsave(&priv->lock, flags);
334	if (casc_priv) {
335		u32 tcr;
336		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
337					MPIC_TIMER_TCR_ROVR_OFFSET);
338		clrbits32(priv->group_tcr, tcr);
339		priv->idle |= casc_priv->cascade_map;
340		priv->timer[handle->num].cascade_handle = NULL;
341	} else {
342		priv->idle |= TIMER_OFFSET(handle->num);
343	}
344	spin_unlock_irqrestore(&priv->lock, flags);
345}
346EXPORT_SYMBOL(mpic_free_timer);
347
348/**
349 * mpic_request_timer - get a hardware timer
350 * @fn: interrupt handler function
351 * @dev: callback function of the data
352 * @time: time for timer
353 *
354 * This executes the "request_irq", returning NULL
355 * else "handle" on success.
356 */
357struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
358				      time64_t time)
359{
360	struct mpic_timer *allocated_timer;
361	int ret;
362
363	if (list_empty(&timer_group_list))
364		return NULL;
365
366	if (time < 0)
 
 
 
 
367		return NULL;
368
369	allocated_timer = get_timer(time);
370	if (!allocated_timer)
371		return NULL;
372
373	ret = request_irq(allocated_timer->irq, fn,
374			IRQF_TRIGGER_LOW, "global-timer", dev);
375	if (ret) {
376		mpic_free_timer(allocated_timer);
377		return NULL;
378	}
379
380	allocated_timer->dev = dev;
381
382	return allocated_timer;
383}
384EXPORT_SYMBOL(mpic_request_timer);
385
386static int __init timer_group_get_freq(struct device_node *np,
387			struct timer_group_priv *priv)
388{
389	u32 div;
390
391	if (priv->flags & FSL_GLOBAL_TIMER) {
392		struct device_node *dn;
393
394		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
395		if (dn) {
396			of_property_read_u32(dn, "clock-frequency",
397					&priv->timerfreq);
398			of_node_put(dn);
399		}
400	}
401
402	if (priv->timerfreq <= 0)
403		return -EINVAL;
404
405	if (priv->flags & FSL_GLOBAL_TIMER) {
406		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
407		priv->timerfreq /= div;
408	}
409
410	return 0;
411}
412
413static int __init timer_group_get_irq(struct device_node *np,
414		struct timer_group_priv *priv)
415{
416	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
417	const u32 *p;
418	u32 offset;
419	u32 count;
420
421	unsigned int i;
422	unsigned int j;
423	unsigned int irq_index = 0;
424	unsigned int irq;
425	int len;
426
427	p = of_get_property(np, "fsl,available-ranges", &len);
428	if (p && len % (2 * sizeof(u32)) != 0) {
429		pr_err("%pOF: malformed available-ranges property.\n", np);
 
430		return -EINVAL;
431	}
432
433	if (!p) {
434		p = all_timer;
435		len = sizeof(all_timer);
436	}
437
438	len /= 2 * sizeof(u32);
439
440	for (i = 0; i < len; i++) {
441		offset = p[i * 2];
442		count = p[i * 2 + 1];
443		for (j = 0; j < count; j++) {
444			irq = irq_of_parse_and_map(np, irq_index);
445			if (!irq) {
446				pr_err("%pOF: irq parse and map failed.\n", np);
 
447				return -EINVAL;
448			}
449
450			/* Set timer idle */
451			priv->idle |= TIMER_OFFSET((offset + j));
452			priv->timer[offset + j].irq = irq;
453			priv->timer[offset + j].num = offset + j;
454			irq_index++;
455		}
456	}
457
458	return 0;
459}
460
461static void __init timer_group_init(struct device_node *np)
462{
463	struct timer_group_priv *priv;
464	unsigned int i = 0;
465	int ret;
466
467	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
468	if (!priv) {
469		pr_err("%pOF: cannot allocate memory for group.\n", np);
 
470		return;
471	}
472
473	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
474		priv->flags |= FSL_GLOBAL_TIMER;
475
476	priv->regs = of_iomap(np, i++);
477	if (!priv->regs) {
478		pr_err("%pOF: cannot ioremap timer register address.\n", np);
 
479		goto out;
480	}
481
482	if (priv->flags & FSL_GLOBAL_TIMER) {
483		priv->group_tcr = of_iomap(np, i++);
484		if (!priv->group_tcr) {
485			pr_err("%pOF: cannot ioremap tcr address.\n", np);
 
486			goto out;
487		}
488	}
489
490	ret = timer_group_get_freq(np, priv);
491	if (ret < 0) {
492		pr_err("%pOF: cannot get timer frequency.\n", np);
493		goto out;
494	}
495
496	ret = timer_group_get_irq(np, priv);
497	if (ret < 0) {
498		pr_err("%pOF: cannot get timer irqs.\n", np);
499		goto out;
500	}
501
502	spin_lock_init(&priv->lock);
503
504	/* Init FSL timer hardware */
505	if (priv->flags & FSL_GLOBAL_TIMER)
506		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
507
508	list_add_tail(&priv->node, &timer_group_list);
509
510	return;
511
512out:
513	if (priv->regs)
514		iounmap(priv->regs);
515
516	if (priv->group_tcr)
517		iounmap(priv->group_tcr);
518
519	kfree(priv);
520}
521
522static void mpic_timer_resume(void)
523{
524	struct timer_group_priv *priv;
525
526	list_for_each_entry(priv, &timer_group_list, node) {
527		/* Init FSL timer hardware */
528		if (priv->flags & FSL_GLOBAL_TIMER)
529			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
530	}
531}
532
533static const struct of_device_id mpic_timer_ids[] = {
534	{ .compatible = "fsl,mpic-global-timer", },
535	{},
536};
537
538static struct syscore_ops mpic_timer_syscore_ops = {
539	.resume = mpic_timer_resume,
540};
541
542static int __init mpic_timer_init(void)
543{
544	struct device_node *np = NULL;
545
546	for_each_matching_node(np, mpic_timer_ids)
547		timer_group_init(np);
548
549	register_syscore_ops(&mpic_timer_syscore_ops);
550
551	if (list_empty(&timer_group_list))
552		return -ENODEV;
553
554	return 0;
555}
556subsys_initcall(mpic_timer_init);
v4.10.11
 
  1/*
  2 * MPIC timer driver
  3 *
  4 * Copyright 2013 Freescale Semiconductor, Inc.
  5 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
  6 *	   Li Yang <leoli@freescale.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the
 10 * Free Software Foundation; either version 2 of the License, or (at your
 11 * option) any later version.
 12 */
 13
 14#include <linux/kernel.h>
 15#include <linux/init.h>
 16#include <linux/module.h>
 17#include <linux/errno.h>
 18#include <linux/mm.h>
 19#include <linux/interrupt.h>
 20#include <linux/slab.h>
 21#include <linux/of.h>
 22#include <linux/of_address.h>
 23#include <linux/of_device.h>
 24#include <linux/of_irq.h>
 25#include <linux/syscore_ops.h>
 26#include <sysdev/fsl_soc.h>
 27#include <asm/io.h>
 28
 29#include <asm/mpic_timer.h>
 30
 31#define FSL_GLOBAL_TIMER		0x1
 32
 33/* Clock Ratio
 34 * Divide by 64 0x00000300
 35 * Divide by 32 0x00000200
 36 * Divide by 16 0x00000100
 37 * Divide by  8 0x00000000 (Hardware default div)
 38 */
 39#define MPIC_TIMER_TCR_CLKDIV		0x00000300
 40
 41#define MPIC_TIMER_TCR_ROVR_OFFSET	24
 42
 43#define TIMER_STOP			0x80000000
 44#define GTCCR_TOG			0x80000000
 45#define TIMERS_PER_GROUP		4
 46#define MAX_TICKS			(~0U >> 1)
 47#define MAX_TICKS_CASCADE		(~0U)
 48#define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
 49
 50/* tv_usec should be less than ONE_SECOND, otherwise use tv_sec */
 51#define ONE_SECOND			1000000
 52
 53struct timer_regs {
 54	u32	gtccr;
 55	u32	res0[3];
 56	u32	gtbcr;
 57	u32	res1[3];
 58	u32	gtvpr;
 59	u32	res2[3];
 60	u32	gtdr;
 61	u32	res3[3];
 62};
 63
 64struct cascade_priv {
 65	u32 tcr_value;			/* TCR register: CASC & ROVR value */
 66	unsigned int cascade_map;	/* cascade map */
 67	unsigned int timer_num;		/* cascade control timer */
 68};
 69
 70struct timer_group_priv {
 71	struct timer_regs __iomem	*regs;
 72	struct mpic_timer		timer[TIMERS_PER_GROUP];
 73	struct list_head		node;
 74	unsigned int			timerfreq;
 75	unsigned int			idle;
 76	unsigned int			flags;
 77	spinlock_t			lock;
 78	void __iomem			*group_tcr;
 79};
 80
 81static struct cascade_priv cascade_timer[] = {
 82	/* cascade timer 0 and 1 */
 83	{0x1, 0xc, 0x1},
 84	/* cascade timer 1 and 2 */
 85	{0x2, 0x6, 0x2},
 86	/* cascade timer 2 and 3 */
 87	{0x4, 0x3, 0x3}
 88};
 89
 90static LIST_HEAD(timer_group_list);
 91
 92static void convert_ticks_to_time(struct timer_group_priv *priv,
 93		const u64 ticks, struct timeval *time)
 94{
 95	u64 tmp_sec;
 96
 97	time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq);
 98	tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
 99
100	time->tv_usec = 0;
101
102	if (tmp_sec <= ticks)
103		time->tv_usec = (__kernel_suseconds_t)
104			div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
105
106	return;
107}
108
109/* the time set by the user is converted to "ticks" */
110static int convert_time_to_ticks(struct timer_group_priv *priv,
111		const struct timeval *time, u64 *ticks)
112{
113	u64 max_value;		/* prevent u64 overflow */
114	u64 tmp = 0;
115
116	u64 tmp_sec;
117	u64 tmp_ms;
118	u64 tmp_us;
119
120	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
121
122	if (time->tv_sec > max_value ||
123			(time->tv_sec == max_value && time->tv_usec > 0))
124		return -EINVAL;
125
126	tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
127	tmp += tmp_sec;
128
129	tmp_ms = time->tv_usec / 1000;
130	tmp_ms = div_u64((u64)tmp_ms * (u64)priv->timerfreq, 1000);
131	tmp += tmp_ms;
132
133	tmp_us = time->tv_usec % 1000;
134	tmp_us = div_u64((u64)tmp_us * (u64)priv->timerfreq, 1000000);
135	tmp += tmp_us;
136
137	*ticks = tmp;
138
139	return 0;
140}
141
142/* detect whether there is a cascade timer available */
143static struct mpic_timer *detect_idle_cascade_timer(
144					struct timer_group_priv *priv)
145{
146	struct cascade_priv *casc_priv;
147	unsigned int map;
148	unsigned int array_size = ARRAY_SIZE(cascade_timer);
149	unsigned int num;
150	unsigned int i;
151	unsigned long flags;
152
153	casc_priv = cascade_timer;
154	for (i = 0; i < array_size; i++) {
155		spin_lock_irqsave(&priv->lock, flags);
156		map = casc_priv->cascade_map & priv->idle;
157		if (map == casc_priv->cascade_map) {
158			num = casc_priv->timer_num;
159			priv->timer[num].cascade_handle = casc_priv;
160
161			/* set timer busy */
162			priv->idle &= ~casc_priv->cascade_map;
163			spin_unlock_irqrestore(&priv->lock, flags);
164			return &priv->timer[num];
165		}
166		spin_unlock_irqrestore(&priv->lock, flags);
167		casc_priv++;
168	}
169
170	return NULL;
171}
172
173static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
174		unsigned int num)
175{
176	struct cascade_priv *casc_priv;
177	u32 tcr;
178	u32 tmp_ticks;
179	u32 rem_ticks;
180
181	/* set group tcr reg for cascade */
182	casc_priv = priv->timer[num].cascade_handle;
183	if (!casc_priv)
184		return -EINVAL;
185
186	tcr = casc_priv->tcr_value |
187		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
188	setbits32(priv->group_tcr, tcr);
189
190	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
191
192	out_be32(&priv->regs[num].gtccr, 0);
193	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
194
195	out_be32(&priv->regs[num - 1].gtccr, 0);
196	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
197
198	return 0;
199}
200
201static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
202					u64 ticks)
203{
204	struct mpic_timer *allocated_timer;
205
206	/* Two cascade timers: Support the maximum time */
207	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
208	int ret;
209
210	if (ticks > max_ticks)
211		return NULL;
212
213	/* detect idle timer */
214	allocated_timer = detect_idle_cascade_timer(priv);
215	if (!allocated_timer)
216		return NULL;
217
218	/* set ticks to timer */
219	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
220	if (ret < 0)
221		return NULL;
222
223	return allocated_timer;
224}
225
226static struct mpic_timer *get_timer(const struct timeval *time)
227{
228	struct timer_group_priv *priv;
229	struct mpic_timer *timer;
230
231	u64 ticks;
232	unsigned int num;
233	unsigned int i;
234	unsigned long flags;
235	int ret;
236
237	list_for_each_entry(priv, &timer_group_list, node) {
238		ret = convert_time_to_ticks(priv, time, &ticks);
239		if (ret < 0)
240			return NULL;
241
242		if (ticks > MAX_TICKS) {
243			if (!(priv->flags & FSL_GLOBAL_TIMER))
244				return NULL;
245
246			timer = get_cascade_timer(priv, ticks);
247			if (!timer)
248				continue;
249
250			return timer;
251		}
252
253		for (i = 0; i < TIMERS_PER_GROUP; i++) {
254			/* one timer: Reverse allocation */
255			num = TIMERS_PER_GROUP - 1 - i;
256			spin_lock_irqsave(&priv->lock, flags);
257			if (priv->idle & (1 << i)) {
258				/* set timer busy */
259				priv->idle &= ~(1 << i);
260				/* set ticks & stop timer */
261				out_be32(&priv->regs[num].gtbcr,
262					ticks | TIMER_STOP);
263				out_be32(&priv->regs[num].gtccr, 0);
264				priv->timer[num].cascade_handle = NULL;
265				spin_unlock_irqrestore(&priv->lock, flags);
266				return &priv->timer[num];
267			}
268			spin_unlock_irqrestore(&priv->lock, flags);
269		}
270	}
271
272	return NULL;
273}
274
275/**
276 * mpic_start_timer - start hardware timer
277 * @handle: the timer to be started.
278 *
279 * It will do ->fn(->dev) callback from the hardware interrupt at
280 * the ->timeval point in the future.
281 */
282void mpic_start_timer(struct mpic_timer *handle)
283{
284	struct timer_group_priv *priv = container_of(handle,
285			struct timer_group_priv, timer[handle->num]);
286
287	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
288}
289EXPORT_SYMBOL(mpic_start_timer);
290
291/**
292 * mpic_stop_timer - stop hardware timer
293 * @handle: the timer to be stoped
294 *
295 * The timer periodically generates an interrupt. Unless user stops the timer.
296 */
297void mpic_stop_timer(struct mpic_timer *handle)
298{
299	struct timer_group_priv *priv = container_of(handle,
300			struct timer_group_priv, timer[handle->num]);
301	struct cascade_priv *casc_priv;
302
303	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
304
305	casc_priv = priv->timer[handle->num].cascade_handle;
306	if (casc_priv) {
307		out_be32(&priv->regs[handle->num].gtccr, 0);
308		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
309	} else {
310		out_be32(&priv->regs[handle->num].gtccr, 0);
311	}
312}
313EXPORT_SYMBOL(mpic_stop_timer);
314
315/**
316 * mpic_get_remain_time - get timer time
317 * @handle: the timer to be selected.
318 * @time: time for timer
319 *
320 * Query timer remaining time.
321 */
322void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time)
323{
324	struct timer_group_priv *priv = container_of(handle,
325			struct timer_group_priv, timer[handle->num]);
326	struct cascade_priv *casc_priv;
327
328	u64 ticks;
329	u32 tmp_ticks;
330
331	casc_priv = priv->timer[handle->num].cascade_handle;
332	if (casc_priv) {
333		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
334		tmp_ticks &= ~GTCCR_TOG;
335		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
336		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
337		ticks += tmp_ticks;
338	} else {
339		ticks = in_be32(&priv->regs[handle->num].gtccr);
340		ticks &= ~GTCCR_TOG;
341	}
342
343	convert_ticks_to_time(priv, ticks, time);
344}
345EXPORT_SYMBOL(mpic_get_remain_time);
346
347/**
348 * mpic_free_timer - free hardware timer
349 * @handle: the timer to be removed.
350 *
351 * Free the timer.
352 *
353 * Note: can not be used in interrupt context.
354 */
355void mpic_free_timer(struct mpic_timer *handle)
356{
357	struct timer_group_priv *priv = container_of(handle,
358			struct timer_group_priv, timer[handle->num]);
359
360	struct cascade_priv *casc_priv;
361	unsigned long flags;
362
363	mpic_stop_timer(handle);
364
365	casc_priv = priv->timer[handle->num].cascade_handle;
366
367	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
368
369	spin_lock_irqsave(&priv->lock, flags);
370	if (casc_priv) {
371		u32 tcr;
372		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
373					MPIC_TIMER_TCR_ROVR_OFFSET);
374		clrbits32(priv->group_tcr, tcr);
375		priv->idle |= casc_priv->cascade_map;
376		priv->timer[handle->num].cascade_handle = NULL;
377	} else {
378		priv->idle |= TIMER_OFFSET(handle->num);
379	}
380	spin_unlock_irqrestore(&priv->lock, flags);
381}
382EXPORT_SYMBOL(mpic_free_timer);
383
384/**
385 * mpic_request_timer - get a hardware timer
386 * @fn: interrupt handler function
387 * @dev: callback function of the data
388 * @time: time for timer
389 *
390 * This executes the "request_irq", returning NULL
391 * else "handle" on success.
392 */
393struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
394					const struct timeval *time)
395{
396	struct mpic_timer *allocated_timer;
397	int ret;
398
399	if (list_empty(&timer_group_list))
400		return NULL;
401
402	if (!(time->tv_sec + time->tv_usec) ||
403			time->tv_sec < 0 || time->tv_usec < 0)
404		return NULL;
405
406	if (time->tv_usec > ONE_SECOND)
407		return NULL;
408
409	allocated_timer = get_timer(time);
410	if (!allocated_timer)
411		return NULL;
412
413	ret = request_irq(allocated_timer->irq, fn,
414			IRQF_TRIGGER_LOW, "global-timer", dev);
415	if (ret) {
416		mpic_free_timer(allocated_timer);
417		return NULL;
418	}
419
420	allocated_timer->dev = dev;
421
422	return allocated_timer;
423}
424EXPORT_SYMBOL(mpic_request_timer);
425
426static int timer_group_get_freq(struct device_node *np,
427			struct timer_group_priv *priv)
428{
429	u32 div;
430
431	if (priv->flags & FSL_GLOBAL_TIMER) {
432		struct device_node *dn;
433
434		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
435		if (dn) {
436			of_property_read_u32(dn, "clock-frequency",
437					&priv->timerfreq);
438			of_node_put(dn);
439		}
440	}
441
442	if (priv->timerfreq <= 0)
443		return -EINVAL;
444
445	if (priv->flags & FSL_GLOBAL_TIMER) {
446		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
447		priv->timerfreq /= div;
448	}
449
450	return 0;
451}
452
453static int timer_group_get_irq(struct device_node *np,
454		struct timer_group_priv *priv)
455{
456	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
457	const u32 *p;
458	u32 offset;
459	u32 count;
460
461	unsigned int i;
462	unsigned int j;
463	unsigned int irq_index = 0;
464	unsigned int irq;
465	int len;
466
467	p = of_get_property(np, "fsl,available-ranges", &len);
468	if (p && len % (2 * sizeof(u32)) != 0) {
469		pr_err("%s: malformed available-ranges property.\n",
470				np->full_name);
471		return -EINVAL;
472	}
473
474	if (!p) {
475		p = all_timer;
476		len = sizeof(all_timer);
477	}
478
479	len /= 2 * sizeof(u32);
480
481	for (i = 0; i < len; i++) {
482		offset = p[i * 2];
483		count = p[i * 2 + 1];
484		for (j = 0; j < count; j++) {
485			irq = irq_of_parse_and_map(np, irq_index);
486			if (!irq) {
487				pr_err("%s: irq parse and map failed.\n",
488						np->full_name);
489				return -EINVAL;
490			}
491
492			/* Set timer idle */
493			priv->idle |= TIMER_OFFSET((offset + j));
494			priv->timer[offset + j].irq = irq;
495			priv->timer[offset + j].num = offset + j;
496			irq_index++;
497		}
498	}
499
500	return 0;
501}
502
503static void timer_group_init(struct device_node *np)
504{
505	struct timer_group_priv *priv;
506	unsigned int i = 0;
507	int ret;
508
509	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
510	if (!priv) {
511		pr_err("%s: cannot allocate memory for group.\n",
512				np->full_name);
513		return;
514	}
515
516	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
517		priv->flags |= FSL_GLOBAL_TIMER;
518
519	priv->regs = of_iomap(np, i++);
520	if (!priv->regs) {
521		pr_err("%s: cannot ioremap timer register address.\n",
522				np->full_name);
523		goto out;
524	}
525
526	if (priv->flags & FSL_GLOBAL_TIMER) {
527		priv->group_tcr = of_iomap(np, i++);
528		if (!priv->group_tcr) {
529			pr_err("%s: cannot ioremap tcr address.\n",
530					np->full_name);
531			goto out;
532		}
533	}
534
535	ret = timer_group_get_freq(np, priv);
536	if (ret < 0) {
537		pr_err("%s: cannot get timer frequency.\n", np->full_name);
538		goto out;
539	}
540
541	ret = timer_group_get_irq(np, priv);
542	if (ret < 0) {
543		pr_err("%s: cannot get timer irqs.\n", np->full_name);
544		goto out;
545	}
546
547	spin_lock_init(&priv->lock);
548
549	/* Init FSL timer hardware */
550	if (priv->flags & FSL_GLOBAL_TIMER)
551		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
552
553	list_add_tail(&priv->node, &timer_group_list);
554
555	return;
556
557out:
558	if (priv->regs)
559		iounmap(priv->regs);
560
561	if (priv->group_tcr)
562		iounmap(priv->group_tcr);
563
564	kfree(priv);
565}
566
567static void mpic_timer_resume(void)
568{
569	struct timer_group_priv *priv;
570
571	list_for_each_entry(priv, &timer_group_list, node) {
572		/* Init FSL timer hardware */
573		if (priv->flags & FSL_GLOBAL_TIMER)
574			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
575	}
576}
577
578static const struct of_device_id mpic_timer_ids[] = {
579	{ .compatible = "fsl,mpic-global-timer", },
580	{},
581};
582
583static struct syscore_ops mpic_timer_syscore_ops = {
584	.resume = mpic_timer_resume,
585};
586
587static int __init mpic_timer_init(void)
588{
589	struct device_node *np = NULL;
590
591	for_each_matching_node(np, mpic_timer_ids)
592		timer_group_init(np);
593
594	register_syscore_ops(&mpic_timer_syscore_ops);
595
596	if (list_empty(&timer_group_list))
597		return -ENODEV;
598
599	return 0;
600}
601subsys_initcall(mpic_timer_init);