Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * MPIC timer driver
  3 *
  4 * Copyright 2013 Freescale Semiconductor, Inc.
  5 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
  6 *	   Li Yang <leoli@freescale.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the
 10 * Free Software Foundation; either version 2 of the License, or (at your
 11 * option) any later version.
 12 */
 13
 14#include <linux/kernel.h>
 15#include <linux/init.h>
 16#include <linux/module.h>
 17#include <linux/errno.h>
 18#include <linux/mm.h>
 19#include <linux/interrupt.h>
 20#include <linux/slab.h>
 21#include <linux/of.h>
 22#include <linux/of_address.h>
 23#include <linux/of_device.h>
 24#include <linux/of_irq.h>
 25#include <linux/syscore_ops.h>
 26#include <sysdev/fsl_soc.h>
 27#include <asm/io.h>
 28
 29#include <asm/mpic_timer.h>
 30
 31#define FSL_GLOBAL_TIMER		0x1
 32
 33/* Clock Ratio
 34 * Divide by 64 0x00000300
 35 * Divide by 32 0x00000200
 36 * Divide by 16 0x00000100
 37 * Divide by  8 0x00000000 (Hardware default div)
 38 */
 39#define MPIC_TIMER_TCR_CLKDIV		0x00000300
 40
 41#define MPIC_TIMER_TCR_ROVR_OFFSET	24
 42
 43#define TIMER_STOP			0x80000000
 44#define GTCCR_TOG			0x80000000
 45#define TIMERS_PER_GROUP		4
 46#define MAX_TICKS			(~0U >> 1)
 47#define MAX_TICKS_CASCADE		(~0U)
 48#define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
 49
 50struct timer_regs {
 51	u32	gtccr;
 52	u32	res0[3];
 53	u32	gtbcr;
 54	u32	res1[3];
 55	u32	gtvpr;
 56	u32	res2[3];
 57	u32	gtdr;
 58	u32	res3[3];
 59};
 60
 61struct cascade_priv {
 62	u32 tcr_value;			/* TCR register: CASC & ROVR value */
 63	unsigned int cascade_map;	/* cascade map */
 64	unsigned int timer_num;		/* cascade control timer */
 65};
 66
 67struct timer_group_priv {
 68	struct timer_regs __iomem	*regs;
 69	struct mpic_timer		timer[TIMERS_PER_GROUP];
 70	struct list_head		node;
 71	unsigned int			timerfreq;
 72	unsigned int			idle;
 73	unsigned int			flags;
 74	spinlock_t			lock;
 75	void __iomem			*group_tcr;
 76};
 77
 78static struct cascade_priv cascade_timer[] = {
 79	/* cascade timer 0 and 1 */
 80	{0x1, 0xc, 0x1},
 81	/* cascade timer 1 and 2 */
 82	{0x2, 0x6, 0x2},
 83	/* cascade timer 2 and 3 */
 84	{0x4, 0x3, 0x3}
 85};
 86
 87static LIST_HEAD(timer_group_list);
 88
 89static void convert_ticks_to_time(struct timer_group_priv *priv,
 90		const u64 ticks, time64_t *time)
 91{
 92	*time = (u64)div_u64(ticks, priv->timerfreq);
 93}
 94
 95/* the time set by the user is converted to "ticks" */
 96static int convert_time_to_ticks(struct timer_group_priv *priv,
 97		time64_t time, u64 *ticks)
 98{
 99	u64 max_value;		/* prevent u64 overflow */
100
101	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
102
103	if (time > max_value)
104		return -EINVAL;
105
106	*ticks = (u64)time * (u64)priv->timerfreq;
107
108	return 0;
109}
110
111/* detect whether there is a cascade timer available */
112static struct mpic_timer *detect_idle_cascade_timer(
113					struct timer_group_priv *priv)
114{
115	struct cascade_priv *casc_priv;
116	unsigned int map;
117	unsigned int array_size = ARRAY_SIZE(cascade_timer);
118	unsigned int num;
119	unsigned int i;
120	unsigned long flags;
121
122	casc_priv = cascade_timer;
123	for (i = 0; i < array_size; i++) {
124		spin_lock_irqsave(&priv->lock, flags);
125		map = casc_priv->cascade_map & priv->idle;
126		if (map == casc_priv->cascade_map) {
127			num = casc_priv->timer_num;
128			priv->timer[num].cascade_handle = casc_priv;
129
130			/* set timer busy */
131			priv->idle &= ~casc_priv->cascade_map;
132			spin_unlock_irqrestore(&priv->lock, flags);
133			return &priv->timer[num];
134		}
135		spin_unlock_irqrestore(&priv->lock, flags);
136		casc_priv++;
137	}
138
139	return NULL;
140}
141
142static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
143		unsigned int num)
144{
145	struct cascade_priv *casc_priv;
146	u32 tcr;
147	u32 tmp_ticks;
148	u32 rem_ticks;
149
150	/* set group tcr reg for cascade */
151	casc_priv = priv->timer[num].cascade_handle;
152	if (!casc_priv)
153		return -EINVAL;
154
155	tcr = casc_priv->tcr_value |
156		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
157	setbits32(priv->group_tcr, tcr);
158
159	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
160
161	out_be32(&priv->regs[num].gtccr, 0);
162	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
163
164	out_be32(&priv->regs[num - 1].gtccr, 0);
165	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
166
167	return 0;
168}
169
170static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
171					u64 ticks)
172{
173	struct mpic_timer *allocated_timer;
174
175	/* Two cascade timers: Support the maximum time */
176	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
177	int ret;
178
179	if (ticks > max_ticks)
180		return NULL;
181
182	/* detect idle timer */
183	allocated_timer = detect_idle_cascade_timer(priv);
184	if (!allocated_timer)
185		return NULL;
186
187	/* set ticks to timer */
188	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
189	if (ret < 0)
190		return NULL;
191
192	return allocated_timer;
193}
194
195static struct mpic_timer *get_timer(time64_t time)
196{
197	struct timer_group_priv *priv;
198	struct mpic_timer *timer;
199
200	u64 ticks;
201	unsigned int num;
202	unsigned int i;
203	unsigned long flags;
204	int ret;
205
206	list_for_each_entry(priv, &timer_group_list, node) {
207		ret = convert_time_to_ticks(priv, time, &ticks);
208		if (ret < 0)
209			return NULL;
210
211		if (ticks > MAX_TICKS) {
212			if (!(priv->flags & FSL_GLOBAL_TIMER))
213				return NULL;
214
215			timer = get_cascade_timer(priv, ticks);
216			if (!timer)
217				continue;
218
219			return timer;
220		}
221
222		for (i = 0; i < TIMERS_PER_GROUP; i++) {
223			/* one timer: Reverse allocation */
224			num = TIMERS_PER_GROUP - 1 - i;
225			spin_lock_irqsave(&priv->lock, flags);
226			if (priv->idle & (1 << i)) {
227				/* set timer busy */
228				priv->idle &= ~(1 << i);
229				/* set ticks & stop timer */
230				out_be32(&priv->regs[num].gtbcr,
231					ticks | TIMER_STOP);
232				out_be32(&priv->regs[num].gtccr, 0);
233				priv->timer[num].cascade_handle = NULL;
234				spin_unlock_irqrestore(&priv->lock, flags);
235				return &priv->timer[num];
236			}
237			spin_unlock_irqrestore(&priv->lock, flags);
238		}
239	}
240
241	return NULL;
242}
243
244/**
245 * mpic_start_timer - start hardware timer
246 * @handle: the timer to be started.
247 *
248 * It will do ->fn(->dev) callback from the hardware interrupt at
249 * the 'time64_t' point in the future.
250 */
251void mpic_start_timer(struct mpic_timer *handle)
252{
253	struct timer_group_priv *priv = container_of(handle,
254			struct timer_group_priv, timer[handle->num]);
255
256	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
257}
258EXPORT_SYMBOL(mpic_start_timer);
259
260/**
261 * mpic_stop_timer - stop hardware timer
262 * @handle: the timer to be stoped
263 *
264 * The timer periodically generates an interrupt. Unless user stops the timer.
265 */
266void mpic_stop_timer(struct mpic_timer *handle)
267{
268	struct timer_group_priv *priv = container_of(handle,
269			struct timer_group_priv, timer[handle->num]);
270	struct cascade_priv *casc_priv;
271
272	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
273
274	casc_priv = priv->timer[handle->num].cascade_handle;
275	if (casc_priv) {
276		out_be32(&priv->regs[handle->num].gtccr, 0);
277		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
278	} else {
279		out_be32(&priv->regs[handle->num].gtccr, 0);
280	}
281}
282EXPORT_SYMBOL(mpic_stop_timer);
283
284/**
285 * mpic_get_remain_time - get timer time
286 * @handle: the timer to be selected.
287 * @time: time for timer
288 *
289 * Query timer remaining time.
290 */
291void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
292{
293	struct timer_group_priv *priv = container_of(handle,
294			struct timer_group_priv, timer[handle->num]);
295	struct cascade_priv *casc_priv;
296
297	u64 ticks;
298	u32 tmp_ticks;
299
300	casc_priv = priv->timer[handle->num].cascade_handle;
301	if (casc_priv) {
302		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
303		tmp_ticks &= ~GTCCR_TOG;
304		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
305		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
306		ticks += tmp_ticks;
307	} else {
308		ticks = in_be32(&priv->regs[handle->num].gtccr);
309		ticks &= ~GTCCR_TOG;
310	}
311
312	convert_ticks_to_time(priv, ticks, time);
313}
314EXPORT_SYMBOL(mpic_get_remain_time);
315
316/**
317 * mpic_free_timer - free hardware timer
318 * @handle: the timer to be removed.
319 *
320 * Free the timer.
321 *
322 * Note: can not be used in interrupt context.
323 */
324void mpic_free_timer(struct mpic_timer *handle)
325{
326	struct timer_group_priv *priv = container_of(handle,
327			struct timer_group_priv, timer[handle->num]);
328
329	struct cascade_priv *casc_priv;
330	unsigned long flags;
331
332	mpic_stop_timer(handle);
333
334	casc_priv = priv->timer[handle->num].cascade_handle;
335
336	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
337
338	spin_lock_irqsave(&priv->lock, flags);
339	if (casc_priv) {
340		u32 tcr;
341		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
342					MPIC_TIMER_TCR_ROVR_OFFSET);
343		clrbits32(priv->group_tcr, tcr);
344		priv->idle |= casc_priv->cascade_map;
345		priv->timer[handle->num].cascade_handle = NULL;
346	} else {
347		priv->idle |= TIMER_OFFSET(handle->num);
348	}
349	spin_unlock_irqrestore(&priv->lock, flags);
350}
351EXPORT_SYMBOL(mpic_free_timer);
352
353/**
354 * mpic_request_timer - get a hardware timer
355 * @fn: interrupt handler function
356 * @dev: callback function of the data
357 * @time: time for timer
358 *
359 * This executes the "request_irq", returning NULL
360 * else "handle" on success.
361 */
362struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
363				      time64_t time)
364{
365	struct mpic_timer *allocated_timer;
366	int ret;
367
368	if (list_empty(&timer_group_list))
369		return NULL;
370
371	if (time < 0)
372		return NULL;
373
374	allocated_timer = get_timer(time);
375	if (!allocated_timer)
376		return NULL;
377
378	ret = request_irq(allocated_timer->irq, fn,
379			IRQF_TRIGGER_LOW, "global-timer", dev);
380	if (ret) {
381		mpic_free_timer(allocated_timer);
382		return NULL;
383	}
384
385	allocated_timer->dev = dev;
386
387	return allocated_timer;
388}
389EXPORT_SYMBOL(mpic_request_timer);
390
391static int timer_group_get_freq(struct device_node *np,
392			struct timer_group_priv *priv)
393{
394	u32 div;
395
396	if (priv->flags & FSL_GLOBAL_TIMER) {
397		struct device_node *dn;
398
399		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
400		if (dn) {
401			of_property_read_u32(dn, "clock-frequency",
402					&priv->timerfreq);
403			of_node_put(dn);
404		}
405	}
406
407	if (priv->timerfreq <= 0)
408		return -EINVAL;
409
410	if (priv->flags & FSL_GLOBAL_TIMER) {
411		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
412		priv->timerfreq /= div;
413	}
414
415	return 0;
416}
417
418static int timer_group_get_irq(struct device_node *np,
419		struct timer_group_priv *priv)
420{
421	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
422	const u32 *p;
423	u32 offset;
424	u32 count;
425
426	unsigned int i;
427	unsigned int j;
428	unsigned int irq_index = 0;
429	unsigned int irq;
430	int len;
431
432	p = of_get_property(np, "fsl,available-ranges", &len);
433	if (p && len % (2 * sizeof(u32)) != 0) {
434		pr_err("%pOF: malformed available-ranges property.\n", np);
435		return -EINVAL;
436	}
437
438	if (!p) {
439		p = all_timer;
440		len = sizeof(all_timer);
441	}
442
443	len /= 2 * sizeof(u32);
444
445	for (i = 0; i < len; i++) {
446		offset = p[i * 2];
447		count = p[i * 2 + 1];
448		for (j = 0; j < count; j++) {
449			irq = irq_of_parse_and_map(np, irq_index);
450			if (!irq) {
451				pr_err("%pOF: irq parse and map failed.\n", np);
452				return -EINVAL;
453			}
454
455			/* Set timer idle */
456			priv->idle |= TIMER_OFFSET((offset + j));
457			priv->timer[offset + j].irq = irq;
458			priv->timer[offset + j].num = offset + j;
459			irq_index++;
460		}
461	}
462
463	return 0;
464}
465
466static void timer_group_init(struct device_node *np)
467{
468	struct timer_group_priv *priv;
469	unsigned int i = 0;
470	int ret;
471
472	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
473	if (!priv) {
474		pr_err("%pOF: cannot allocate memory for group.\n", np);
475		return;
476	}
477
478	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
479		priv->flags |= FSL_GLOBAL_TIMER;
480
481	priv->regs = of_iomap(np, i++);
482	if (!priv->regs) {
483		pr_err("%pOF: cannot ioremap timer register address.\n", np);
484		goto out;
485	}
486
487	if (priv->flags & FSL_GLOBAL_TIMER) {
488		priv->group_tcr = of_iomap(np, i++);
489		if (!priv->group_tcr) {
490			pr_err("%pOF: cannot ioremap tcr address.\n", np);
491			goto out;
492		}
493	}
494
495	ret = timer_group_get_freq(np, priv);
496	if (ret < 0) {
497		pr_err("%pOF: cannot get timer frequency.\n", np);
498		goto out;
499	}
500
501	ret = timer_group_get_irq(np, priv);
502	if (ret < 0) {
503		pr_err("%pOF: cannot get timer irqs.\n", np);
504		goto out;
505	}
506
507	spin_lock_init(&priv->lock);
508
509	/* Init FSL timer hardware */
510	if (priv->flags & FSL_GLOBAL_TIMER)
511		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
512
513	list_add_tail(&priv->node, &timer_group_list);
514
515	return;
516
517out:
518	if (priv->regs)
519		iounmap(priv->regs);
520
521	if (priv->group_tcr)
522		iounmap(priv->group_tcr);
523
524	kfree(priv);
525}
526
527static void mpic_timer_resume(void)
528{
529	struct timer_group_priv *priv;
530
531	list_for_each_entry(priv, &timer_group_list, node) {
532		/* Init FSL timer hardware */
533		if (priv->flags & FSL_GLOBAL_TIMER)
534			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
535	}
536}
537
538static const struct of_device_id mpic_timer_ids[] = {
539	{ .compatible = "fsl,mpic-global-timer", },
540	{},
541};
542
543static struct syscore_ops mpic_timer_syscore_ops = {
544	.resume = mpic_timer_resume,
545};
546
547static int __init mpic_timer_init(void)
548{
549	struct device_node *np = NULL;
550
551	for_each_matching_node(np, mpic_timer_ids)
552		timer_group_init(np);
553
554	register_syscore_ops(&mpic_timer_syscore_ops);
555
556	if (list_empty(&timer_group_list))
557		return -ENODEV;
558
559	return 0;
560}
561subsys_initcall(mpic_timer_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * MPIC timer driver
  4 *
  5 * Copyright 2013 Freescale Semiconductor, Inc.
  6 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
  7 *	   Li Yang <leoli@freescale.com>
 
 
 
 
 
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/errno.h>
 14#include <linux/mm.h>
 15#include <linux/interrupt.h>
 16#include <linux/slab.h>
 17#include <linux/of.h>
 18#include <linux/of_address.h>
 19#include <linux/of_device.h>
 20#include <linux/of_irq.h>
 21#include <linux/syscore_ops.h>
 22#include <sysdev/fsl_soc.h>
 23#include <asm/io.h>
 24
 25#include <asm/mpic_timer.h>
 26
 27#define FSL_GLOBAL_TIMER		0x1
 28
 29/* Clock Ratio
 30 * Divide by 64 0x00000300
 31 * Divide by 32 0x00000200
 32 * Divide by 16 0x00000100
 33 * Divide by  8 0x00000000 (Hardware default div)
 34 */
 35#define MPIC_TIMER_TCR_CLKDIV		0x00000300
 36
 37#define MPIC_TIMER_TCR_ROVR_OFFSET	24
 38
 39#define TIMER_STOP			0x80000000
 40#define GTCCR_TOG			0x80000000
 41#define TIMERS_PER_GROUP		4
 42#define MAX_TICKS			(~0U >> 1)
 43#define MAX_TICKS_CASCADE		(~0U)
 44#define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
 45
 46struct timer_regs {
 47	u32	gtccr;
 48	u32	res0[3];
 49	u32	gtbcr;
 50	u32	res1[3];
 51	u32	gtvpr;
 52	u32	res2[3];
 53	u32	gtdr;
 54	u32	res3[3];
 55};
 56
 57struct cascade_priv {
 58	u32 tcr_value;			/* TCR register: CASC & ROVR value */
 59	unsigned int cascade_map;	/* cascade map */
 60	unsigned int timer_num;		/* cascade control timer */
 61};
 62
 63struct timer_group_priv {
 64	struct timer_regs __iomem	*regs;
 65	struct mpic_timer		timer[TIMERS_PER_GROUP];
 66	struct list_head		node;
 67	unsigned int			timerfreq;
 68	unsigned int			idle;
 69	unsigned int			flags;
 70	spinlock_t			lock;
 71	void __iomem			*group_tcr;
 72};
 73
 74static struct cascade_priv cascade_timer[] = {
 75	/* cascade timer 0 and 1 */
 76	{0x1, 0xc, 0x1},
 77	/* cascade timer 1 and 2 */
 78	{0x2, 0x6, 0x2},
 79	/* cascade timer 2 and 3 */
 80	{0x4, 0x3, 0x3}
 81};
 82
 83static LIST_HEAD(timer_group_list);
 84
 85static void convert_ticks_to_time(struct timer_group_priv *priv,
 86		const u64 ticks, time64_t *time)
 87{
 88	*time = (u64)div_u64(ticks, priv->timerfreq);
 89}
 90
 91/* the time set by the user is converted to "ticks" */
 92static int convert_time_to_ticks(struct timer_group_priv *priv,
 93		time64_t time, u64 *ticks)
 94{
 95	u64 max_value;		/* prevent u64 overflow */
 96
 97	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
 98
 99	if (time > max_value)
100		return -EINVAL;
101
102	*ticks = (u64)time * (u64)priv->timerfreq;
103
104	return 0;
105}
106
107/* detect whether there is a cascade timer available */
108static struct mpic_timer *detect_idle_cascade_timer(
109					struct timer_group_priv *priv)
110{
111	struct cascade_priv *casc_priv;
112	unsigned int map;
113	unsigned int array_size = ARRAY_SIZE(cascade_timer);
114	unsigned int num;
115	unsigned int i;
116	unsigned long flags;
117
118	casc_priv = cascade_timer;
119	for (i = 0; i < array_size; i++) {
120		spin_lock_irqsave(&priv->lock, flags);
121		map = casc_priv->cascade_map & priv->idle;
122		if (map == casc_priv->cascade_map) {
123			num = casc_priv->timer_num;
124			priv->timer[num].cascade_handle = casc_priv;
125
126			/* set timer busy */
127			priv->idle &= ~casc_priv->cascade_map;
128			spin_unlock_irqrestore(&priv->lock, flags);
129			return &priv->timer[num];
130		}
131		spin_unlock_irqrestore(&priv->lock, flags);
132		casc_priv++;
133	}
134
135	return NULL;
136}
137
138static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
139		unsigned int num)
140{
141	struct cascade_priv *casc_priv;
142	u32 tcr;
143	u32 tmp_ticks;
144	u32 rem_ticks;
145
146	/* set group tcr reg for cascade */
147	casc_priv = priv->timer[num].cascade_handle;
148	if (!casc_priv)
149		return -EINVAL;
150
151	tcr = casc_priv->tcr_value |
152		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
153	setbits32(priv->group_tcr, tcr);
154
155	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
156
157	out_be32(&priv->regs[num].gtccr, 0);
158	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
159
160	out_be32(&priv->regs[num - 1].gtccr, 0);
161	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
162
163	return 0;
164}
165
166static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
167					u64 ticks)
168{
169	struct mpic_timer *allocated_timer;
170
171	/* Two cascade timers: Support the maximum time */
172	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
173	int ret;
174
175	if (ticks > max_ticks)
176		return NULL;
177
178	/* detect idle timer */
179	allocated_timer = detect_idle_cascade_timer(priv);
180	if (!allocated_timer)
181		return NULL;
182
183	/* set ticks to timer */
184	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
185	if (ret < 0)
186		return NULL;
187
188	return allocated_timer;
189}
190
191static struct mpic_timer *get_timer(time64_t time)
192{
193	struct timer_group_priv *priv;
194	struct mpic_timer *timer;
195
196	u64 ticks;
197	unsigned int num;
198	unsigned int i;
199	unsigned long flags;
200	int ret;
201
202	list_for_each_entry(priv, &timer_group_list, node) {
203		ret = convert_time_to_ticks(priv, time, &ticks);
204		if (ret < 0)
205			return NULL;
206
207		if (ticks > MAX_TICKS) {
208			if (!(priv->flags & FSL_GLOBAL_TIMER))
209				return NULL;
210
211			timer = get_cascade_timer(priv, ticks);
212			if (!timer)
213				continue;
214
215			return timer;
216		}
217
218		for (i = 0; i < TIMERS_PER_GROUP; i++) {
219			/* one timer: Reverse allocation */
220			num = TIMERS_PER_GROUP - 1 - i;
221			spin_lock_irqsave(&priv->lock, flags);
222			if (priv->idle & (1 << i)) {
223				/* set timer busy */
224				priv->idle &= ~(1 << i);
225				/* set ticks & stop timer */
226				out_be32(&priv->regs[num].gtbcr,
227					ticks | TIMER_STOP);
228				out_be32(&priv->regs[num].gtccr, 0);
229				priv->timer[num].cascade_handle = NULL;
230				spin_unlock_irqrestore(&priv->lock, flags);
231				return &priv->timer[num];
232			}
233			spin_unlock_irqrestore(&priv->lock, flags);
234		}
235	}
236
237	return NULL;
238}
239
240/**
241 * mpic_start_timer - start hardware timer
242 * @handle: the timer to be started.
243 *
244 * It will do ->fn(->dev) callback from the hardware interrupt at
245 * the 'time64_t' point in the future.
246 */
247void mpic_start_timer(struct mpic_timer *handle)
248{
249	struct timer_group_priv *priv = container_of(handle,
250			struct timer_group_priv, timer[handle->num]);
251
252	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
253}
254EXPORT_SYMBOL(mpic_start_timer);
255
256/**
257 * mpic_stop_timer - stop hardware timer
258 * @handle: the timer to be stoped
259 *
260 * The timer periodically generates an interrupt. Unless user stops the timer.
261 */
262void mpic_stop_timer(struct mpic_timer *handle)
263{
264	struct timer_group_priv *priv = container_of(handle,
265			struct timer_group_priv, timer[handle->num]);
266	struct cascade_priv *casc_priv;
267
268	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
269
270	casc_priv = priv->timer[handle->num].cascade_handle;
271	if (casc_priv) {
272		out_be32(&priv->regs[handle->num].gtccr, 0);
273		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
274	} else {
275		out_be32(&priv->regs[handle->num].gtccr, 0);
276	}
277}
278EXPORT_SYMBOL(mpic_stop_timer);
279
280/**
281 * mpic_get_remain_time - get timer time
282 * @handle: the timer to be selected.
283 * @time: time for timer
284 *
285 * Query timer remaining time.
286 */
287void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
288{
289	struct timer_group_priv *priv = container_of(handle,
290			struct timer_group_priv, timer[handle->num]);
291	struct cascade_priv *casc_priv;
292
293	u64 ticks;
294	u32 tmp_ticks;
295
296	casc_priv = priv->timer[handle->num].cascade_handle;
297	if (casc_priv) {
298		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
299		tmp_ticks &= ~GTCCR_TOG;
300		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
301		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
302		ticks += tmp_ticks;
303	} else {
304		ticks = in_be32(&priv->regs[handle->num].gtccr);
305		ticks &= ~GTCCR_TOG;
306	}
307
308	convert_ticks_to_time(priv, ticks, time);
309}
310EXPORT_SYMBOL(mpic_get_remain_time);
311
312/**
313 * mpic_free_timer - free hardware timer
314 * @handle: the timer to be removed.
315 *
316 * Free the timer.
317 *
318 * Note: can not be used in interrupt context.
319 */
320void mpic_free_timer(struct mpic_timer *handle)
321{
322	struct timer_group_priv *priv = container_of(handle,
323			struct timer_group_priv, timer[handle->num]);
324
325	struct cascade_priv *casc_priv;
326	unsigned long flags;
327
328	mpic_stop_timer(handle);
329
330	casc_priv = priv->timer[handle->num].cascade_handle;
331
332	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
333
334	spin_lock_irqsave(&priv->lock, flags);
335	if (casc_priv) {
336		u32 tcr;
337		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
338					MPIC_TIMER_TCR_ROVR_OFFSET);
339		clrbits32(priv->group_tcr, tcr);
340		priv->idle |= casc_priv->cascade_map;
341		priv->timer[handle->num].cascade_handle = NULL;
342	} else {
343		priv->idle |= TIMER_OFFSET(handle->num);
344	}
345	spin_unlock_irqrestore(&priv->lock, flags);
346}
347EXPORT_SYMBOL(mpic_free_timer);
348
349/**
350 * mpic_request_timer - get a hardware timer
351 * @fn: interrupt handler function
352 * @dev: callback function of the data
353 * @time: time for timer
354 *
355 * This executes the "request_irq", returning NULL
356 * else "handle" on success.
357 */
358struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
359				      time64_t time)
360{
361	struct mpic_timer *allocated_timer;
362	int ret;
363
364	if (list_empty(&timer_group_list))
365		return NULL;
366
367	if (time < 0)
368		return NULL;
369
370	allocated_timer = get_timer(time);
371	if (!allocated_timer)
372		return NULL;
373
374	ret = request_irq(allocated_timer->irq, fn,
375			IRQF_TRIGGER_LOW, "global-timer", dev);
376	if (ret) {
377		mpic_free_timer(allocated_timer);
378		return NULL;
379	}
380
381	allocated_timer->dev = dev;
382
383	return allocated_timer;
384}
385EXPORT_SYMBOL(mpic_request_timer);
386
387static int timer_group_get_freq(struct device_node *np,
388			struct timer_group_priv *priv)
389{
390	u32 div;
391
392	if (priv->flags & FSL_GLOBAL_TIMER) {
393		struct device_node *dn;
394
395		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
396		if (dn) {
397			of_property_read_u32(dn, "clock-frequency",
398					&priv->timerfreq);
399			of_node_put(dn);
400		}
401	}
402
403	if (priv->timerfreq <= 0)
404		return -EINVAL;
405
406	if (priv->flags & FSL_GLOBAL_TIMER) {
407		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
408		priv->timerfreq /= div;
409	}
410
411	return 0;
412}
413
414static int timer_group_get_irq(struct device_node *np,
415		struct timer_group_priv *priv)
416{
417	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
418	const u32 *p;
419	u32 offset;
420	u32 count;
421
422	unsigned int i;
423	unsigned int j;
424	unsigned int irq_index = 0;
425	unsigned int irq;
426	int len;
427
428	p = of_get_property(np, "fsl,available-ranges", &len);
429	if (p && len % (2 * sizeof(u32)) != 0) {
430		pr_err("%pOF: malformed available-ranges property.\n", np);
431		return -EINVAL;
432	}
433
434	if (!p) {
435		p = all_timer;
436		len = sizeof(all_timer);
437	}
438
439	len /= 2 * sizeof(u32);
440
441	for (i = 0; i < len; i++) {
442		offset = p[i * 2];
443		count = p[i * 2 + 1];
444		for (j = 0; j < count; j++) {
445			irq = irq_of_parse_and_map(np, irq_index);
446			if (!irq) {
447				pr_err("%pOF: irq parse and map failed.\n", np);
448				return -EINVAL;
449			}
450
451			/* Set timer idle */
452			priv->idle |= TIMER_OFFSET((offset + j));
453			priv->timer[offset + j].irq = irq;
454			priv->timer[offset + j].num = offset + j;
455			irq_index++;
456		}
457	}
458
459	return 0;
460}
461
462static void timer_group_init(struct device_node *np)
463{
464	struct timer_group_priv *priv;
465	unsigned int i = 0;
466	int ret;
467
468	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
469	if (!priv) {
470		pr_err("%pOF: cannot allocate memory for group.\n", np);
471		return;
472	}
473
474	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
475		priv->flags |= FSL_GLOBAL_TIMER;
476
477	priv->regs = of_iomap(np, i++);
478	if (!priv->regs) {
479		pr_err("%pOF: cannot ioremap timer register address.\n", np);
480		goto out;
481	}
482
483	if (priv->flags & FSL_GLOBAL_TIMER) {
484		priv->group_tcr = of_iomap(np, i++);
485		if (!priv->group_tcr) {
486			pr_err("%pOF: cannot ioremap tcr address.\n", np);
487			goto out;
488		}
489	}
490
491	ret = timer_group_get_freq(np, priv);
492	if (ret < 0) {
493		pr_err("%pOF: cannot get timer frequency.\n", np);
494		goto out;
495	}
496
497	ret = timer_group_get_irq(np, priv);
498	if (ret < 0) {
499		pr_err("%pOF: cannot get timer irqs.\n", np);
500		goto out;
501	}
502
503	spin_lock_init(&priv->lock);
504
505	/* Init FSL timer hardware */
506	if (priv->flags & FSL_GLOBAL_TIMER)
507		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
508
509	list_add_tail(&priv->node, &timer_group_list);
510
511	return;
512
513out:
514	if (priv->regs)
515		iounmap(priv->regs);
516
517	if (priv->group_tcr)
518		iounmap(priv->group_tcr);
519
520	kfree(priv);
521}
522
523static void mpic_timer_resume(void)
524{
525	struct timer_group_priv *priv;
526
527	list_for_each_entry(priv, &timer_group_list, node) {
528		/* Init FSL timer hardware */
529		if (priv->flags & FSL_GLOBAL_TIMER)
530			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
531	}
532}
533
534static const struct of_device_id mpic_timer_ids[] = {
535	{ .compatible = "fsl,mpic-global-timer", },
536	{},
537};
538
539static struct syscore_ops mpic_timer_syscore_ops = {
540	.resume = mpic_timer_resume,
541};
542
543static int __init mpic_timer_init(void)
544{
545	struct device_node *np = NULL;
546
547	for_each_matching_node(np, mpic_timer_ids)
548		timer_group_init(np);
549
550	register_syscore_ops(&mpic_timer_syscore_ops);
551
552	if (list_empty(&timer_group_list))
553		return -ENODEV;
554
555	return 0;
556}
557subsys_initcall(mpic_timer_init);