Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * SuperH Timer Support - TMU
  3 *
  4 *  Copyright (C) 2009 Magnus Damm
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 
 
 
 
 14 */
 15
 16#include <linux/clk.h>
 17#include <linux/clockchips.h>
 18#include <linux/clocksource.h>
 19#include <linux/delay.h>
 20#include <linux/err.h>
 21#include <linux/init.h>
 
 
 22#include <linux/interrupt.h>
 23#include <linux/io.h>
 24#include <linux/ioport.h>
 
 
 
 25#include <linux/irq.h>
 
 
 
 
 
 26#include <linux/module.h>
 27#include <linux/of.h>
 28#include <linux/platform_device.h>
 29#include <linux/pm_domain.h>
 30#include <linux/pm_runtime.h>
 31#include <linux/sh_timer.h>
 32#include <linux/slab.h>
 33#include <linux/spinlock.h>
 34
 35enum sh_tmu_model {
 36	SH_TMU,
 37	SH_TMU_SH3,
 38};
 39
 40struct sh_tmu_device;
 41
 42struct sh_tmu_channel {
 43	struct sh_tmu_device *tmu;
 44	unsigned int index;
 45
 46	void __iomem *base;
 47	int irq;
 48
 
 
 
 
 
 49	unsigned long rate;
 50	unsigned long periodic;
 51	struct clock_event_device ced;
 52	struct clocksource cs;
 53	bool cs_enabled;
 54	unsigned int enable_count;
 55};
 56
 57struct sh_tmu_device {
 58	struct platform_device *pdev;
 59
 60	void __iomem *mapbase;
 61	struct clk *clk;
 62
 63	enum sh_tmu_model model;
 64
 65	raw_spinlock_t lock; /* Protect the shared start/stop register */
 66
 67	struct sh_tmu_channel *channels;
 68	unsigned int num_channels;
 69
 70	bool has_clockevent;
 71	bool has_clocksource;
 72};
 73
 74#define TSTR -1 /* shared register */
 75#define TCOR  0 /* channel register */
 76#define TCNT 1 /* channel register */
 77#define TCR 2 /* channel register */
 78
 79#define TCR_UNF			(1 << 8)
 80#define TCR_UNIE		(1 << 5)
 81#define TCR_TPSC_CLK4		(0 << 0)
 82#define TCR_TPSC_CLK16		(1 << 0)
 83#define TCR_TPSC_CLK64		(2 << 0)
 84#define TCR_TPSC_CLK256		(3 << 0)
 85#define TCR_TPSC_CLK1024	(4 << 0)
 86#define TCR_TPSC_MASK		(7 << 0)
 87
 88static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
 89{
 
 
 90	unsigned long offs;
 91
 92	if (reg_nr == TSTR) {
 93		switch (ch->tmu->model) {
 94		case SH_TMU_SH3:
 95			return ioread8(ch->tmu->mapbase + 2);
 96		case SH_TMU:
 97			return ioread8(ch->tmu->mapbase + 4);
 98		}
 99	}
100
101	offs = reg_nr << 2;
102
103	if (reg_nr == TCR)
104		return ioread16(ch->base + offs);
105	else
106		return ioread32(ch->base + offs);
107}
108
109static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
110				unsigned long value)
111{
 
 
112	unsigned long offs;
113
114	if (reg_nr == TSTR) {
115		switch (ch->tmu->model) {
116		case SH_TMU_SH3:
117			return iowrite8(value, ch->tmu->mapbase + 2);
118		case SH_TMU:
119			return iowrite8(value, ch->tmu->mapbase + 4);
120		}
121	}
122
123	offs = reg_nr << 2;
124
125	if (reg_nr == TCR)
126		iowrite16(value, ch->base + offs);
127	else
128		iowrite32(value, ch->base + offs);
129}
130
131static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
132{
 
133	unsigned long flags, value;
134
135	/* start stop register shared by multiple timer channels */
136	raw_spin_lock_irqsave(&ch->tmu->lock, flags);
137	value = sh_tmu_read(ch, TSTR);
138
139	if (start)
140		value |= 1 << ch->index;
141	else
142		value &= ~(1 << ch->index);
143
144	sh_tmu_write(ch, TSTR, value);
145	raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
146}
147
148static int __sh_tmu_enable(struct sh_tmu_channel *ch)
149{
150	int ret;
151
152	/* enable clock */
153	ret = clk_enable(ch->tmu->clk);
154	if (ret) {
155		dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
156			ch->index);
157		return ret;
158	}
159
160	/* make sure channel is disabled */
161	sh_tmu_start_stop_ch(ch, 0);
162
163	/* maximum timeout */
164	sh_tmu_write(ch, TCOR, 0xffffffff);
165	sh_tmu_write(ch, TCNT, 0xffffffff);
166
167	/* configure channel to parent clock / 4, irq off */
168	ch->rate = clk_get_rate(ch->tmu->clk) / 4;
169	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
170
171	/* enable channel */
172	sh_tmu_start_stop_ch(ch, 1);
173
174	return 0;
175}
176
177static int sh_tmu_enable(struct sh_tmu_channel *ch)
178{
179	if (ch->enable_count++ > 0)
180		return 0;
181
182	pm_runtime_get_sync(&ch->tmu->pdev->dev);
183	dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
184
185	return __sh_tmu_enable(ch);
186}
187
188static void __sh_tmu_disable(struct sh_tmu_channel *ch)
189{
190	/* disable channel */
191	sh_tmu_start_stop_ch(ch, 0);
192
193	/* disable interrupts in TMU block */
194	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
195
196	/* stop clock */
197	clk_disable(ch->tmu->clk);
198}
199
200static void sh_tmu_disable(struct sh_tmu_channel *ch)
201{
202	if (WARN_ON(ch->enable_count == 0))
203		return;
204
205	if (--ch->enable_count > 0)
206		return;
207
208	__sh_tmu_disable(ch);
209
210	dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
211	pm_runtime_put(&ch->tmu->pdev->dev);
212}
213
214static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
215			    int periodic)
216{
217	/* stop timer */
218	sh_tmu_start_stop_ch(ch, 0);
219
220	/* acknowledge interrupt */
221	sh_tmu_read(ch, TCR);
222
223	/* enable interrupt */
224	sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
225
226	/* reload delta value in case of periodic timer */
227	if (periodic)
228		sh_tmu_write(ch, TCOR, delta);
229	else
230		sh_tmu_write(ch, TCOR, 0xffffffff);
231
232	sh_tmu_write(ch, TCNT, delta);
233
234	/* start timer */
235	sh_tmu_start_stop_ch(ch, 1);
236}
237
238static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
239{
240	struct sh_tmu_channel *ch = dev_id;
241
242	/* disable or acknowledge interrupt */
243	if (clockevent_state_oneshot(&ch->ced))
244		sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
245	else
246		sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
247
248	/* notify clockevent layer */
249	ch->ced.event_handler(&ch->ced);
250	return IRQ_HANDLED;
251}
252
253static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
254{
255	return container_of(cs, struct sh_tmu_channel, cs);
256}
257
258static u64 sh_tmu_clocksource_read(struct clocksource *cs)
259{
260	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
261
262	return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
263}
264
265static int sh_tmu_clocksource_enable(struct clocksource *cs)
266{
267	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
268	int ret;
269
270	if (WARN_ON(ch->cs_enabled))
271		return 0;
272
273	ret = sh_tmu_enable(ch);
274	if (!ret) {
275		__clocksource_update_freq_hz(cs, ch->rate);
276		ch->cs_enabled = true;
277	}
278
279	return ret;
280}
281
282static void sh_tmu_clocksource_disable(struct clocksource *cs)
283{
284	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
285
286	if (WARN_ON(!ch->cs_enabled))
287		return;
288
289	sh_tmu_disable(ch);
290	ch->cs_enabled = false;
291}
292
293static void sh_tmu_clocksource_suspend(struct clocksource *cs)
294{
295	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
296
297	if (!ch->cs_enabled)
298		return;
299
300	if (--ch->enable_count == 0) {
301		__sh_tmu_disable(ch);
302		pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
303	}
304}
305
306static void sh_tmu_clocksource_resume(struct clocksource *cs)
307{
308	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
309
310	if (!ch->cs_enabled)
311		return;
312
313	if (ch->enable_count++ == 0) {
314		pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
315		__sh_tmu_enable(ch);
316	}
317}
318
319static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
320				       const char *name)
321{
322	struct clocksource *cs = &ch->cs;
323
324	cs->name = name;
325	cs->rating = 200;
326	cs->read = sh_tmu_clocksource_read;
327	cs->enable = sh_tmu_clocksource_enable;
328	cs->disable = sh_tmu_clocksource_disable;
329	cs->suspend = sh_tmu_clocksource_suspend;
330	cs->resume = sh_tmu_clocksource_resume;
331	cs->mask = CLOCKSOURCE_MASK(32);
332	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
333
334	dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
335		 ch->index);
336
337	/* Register with dummy 1 Hz value, gets updated in ->enable() */
338	clocksource_register_hz(cs, 1);
339	return 0;
340}
341
342static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
343{
344	return container_of(ced, struct sh_tmu_channel, ced);
345}
346
347static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
348{
349	struct clock_event_device *ced = &ch->ced;
350
351	sh_tmu_enable(ch);
352
353	clockevents_config(ced, ch->rate);
354
355	if (periodic) {
356		ch->periodic = (ch->rate + HZ/2) / HZ;
357		sh_tmu_set_next(ch, ch->periodic, 1);
358	}
359}
360
361static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
 
362{
363	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
364
365	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
366		sh_tmu_disable(ch);
367	return 0;
368}
369
370static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
371					int periodic)
372{
373	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
374
375	/* deal with old setting first */
376	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
377		sh_tmu_disable(ch);
378
379	dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
380		 ch->index, periodic ? "periodic" : "oneshot");
381	sh_tmu_clock_event_start(ch, periodic);
382	return 0;
383}
384
385static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
386{
387	return sh_tmu_clock_event_set_state(ced, 0);
388}
389
390static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
391{
392	return sh_tmu_clock_event_set_state(ced, 1);
 
 
 
 
 
 
 
 
 
 
393}
394
395static int sh_tmu_clock_event_next(unsigned long delta,
396				   struct clock_event_device *ced)
397{
398	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
399
400	BUG_ON(!clockevent_state_oneshot(ced));
401
402	/* program new delta value */
403	sh_tmu_set_next(ch, delta, 0);
404	return 0;
405}
406
407static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
408{
409	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
410}
411
412static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
413{
414	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
415}
416
417static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
418				       const char *name)
419{
420	struct clock_event_device *ced = &ch->ced;
421	int ret;
422
 
 
423	ced->name = name;
424	ced->features = CLOCK_EVT_FEAT_PERIODIC;
425	ced->features |= CLOCK_EVT_FEAT_ONESHOT;
426	ced->rating = 200;
427	ced->cpumask = cpu_possible_mask;
428	ced->set_next_event = sh_tmu_clock_event_next;
429	ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
430	ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
431	ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
432	ced->suspend = sh_tmu_clock_event_suspend;
433	ced->resume = sh_tmu_clock_event_resume;
434
435	dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
436		 ch->index);
437
438	clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
439
440	ret = request_irq(ch->irq, sh_tmu_interrupt,
441			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
442			  dev_name(&ch->tmu->pdev->dev), ch);
443	if (ret) {
444		dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
445			ch->index, ch->irq);
446		return;
447	}
448}
449
450static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
451			   bool clockevent, bool clocksource)
452{
453	if (clockevent) {
454		ch->tmu->has_clockevent = true;
455		sh_tmu_register_clockevent(ch, name);
456	} else if (clocksource) {
457		ch->tmu->has_clocksource = true;
458		sh_tmu_register_clocksource(ch, name);
459	}
460
461	return 0;
462}
463
464static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
465				bool clockevent, bool clocksource,
466				struct sh_tmu_device *tmu)
467{
468	/* Skip unused channels. */
469	if (!clockevent && !clocksource)
470		return 0;
471
472	ch->tmu = tmu;
473	ch->index = index;
474
475	if (tmu->model == SH_TMU_SH3)
476		ch->base = tmu->mapbase + 4 + ch->index * 12;
477	else
478		ch->base = tmu->mapbase + 8 + ch->index * 12;
479
480	ch->irq = platform_get_irq(tmu->pdev, index);
481	if (ch->irq < 0) {
482		dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
483			ch->index);
484		return ch->irq;
485	}
486
487	ch->cs_enabled = false;
488	ch->enable_count = 0;
489
490	return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
491			       clockevent, clocksource);
492}
493
494static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
495{
496	struct resource *res;
497
498	res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
499	if (!res) {
500		dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
501		return -ENXIO;
502	}
503
504	tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
505	if (tmu->mapbase == NULL)
506		return -ENXIO;
507
508	return 0;
509}
510
511static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
512{
513	struct device_node *np = tmu->pdev->dev.of_node;
514
515	tmu->model = SH_TMU;
516	tmu->num_channels = 3;
517
518	of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
519
520	if (tmu->num_channels != 2 && tmu->num_channels != 3) {
521		dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
522			tmu->num_channels);
523		return -EINVAL;
524	}
525
526	return 0;
527}
528
529static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
530{
531	unsigned int i;
532	int ret;
533
534	tmu->pdev = pdev;
535
536	raw_spin_lock_init(&tmu->lock);
537
538	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
539		ret = sh_tmu_parse_dt(tmu);
540		if (ret < 0)
541			return ret;
542	} else if (pdev->dev.platform_data) {
543		const struct platform_device_id *id = pdev->id_entry;
544		struct sh_timer_config *cfg = pdev->dev.platform_data;
545
546		tmu->model = id->driver_data;
547		tmu->num_channels = hweight8(cfg->channels_mask);
548	} else {
549		dev_err(&tmu->pdev->dev, "missing platform data\n");
550		return -ENXIO;
551	}
552
553	/* Get hold of clock. */
554	tmu->clk = clk_get(&tmu->pdev->dev, "fck");
555	if (IS_ERR(tmu->clk)) {
556		dev_err(&tmu->pdev->dev, "cannot get clock\n");
557		return PTR_ERR(tmu->clk);
 
 
 
 
 
 
 
 
558	}
559
560	ret = clk_prepare(tmu->clk);
561	if (ret < 0)
562		goto err_clk_put;
563
564	/* Map the memory resource. */
565	ret = sh_tmu_map_memory(tmu);
566	if (ret < 0) {
567		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
568		goto err_clk_unprepare;
569	}
570
571	/* Allocate and setup the channels. */
572	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
573				GFP_KERNEL);
574	if (tmu->channels == NULL) {
575		ret = -ENOMEM;
576		goto err_unmap;
577	}
578
579	/*
580	 * Use the first channel as a clock event device and the second channel
581	 * as a clock source.
582	 */
583	for (i = 0; i < tmu->num_channels; ++i) {
584		ret = sh_tmu_channel_setup(&tmu->channels[i], i,
585					   i == 0, i == 1, tmu);
586		if (ret < 0)
587			goto err_unmap;
588	}
589
590	platform_set_drvdata(pdev, tmu);
 
 
 
 
591
592	return 0;
593
594err_unmap:
595	kfree(tmu->channels);
596	iounmap(tmu->mapbase);
597err_clk_unprepare:
598	clk_unprepare(tmu->clk);
599err_clk_put:
600	clk_put(tmu->clk);
601	return ret;
602}
603
604static int sh_tmu_probe(struct platform_device *pdev)
605{
606	struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
 
607	int ret;
608
609	if (!is_early_platform_device(pdev)) {
610		pm_runtime_set_active(&pdev->dev);
611		pm_runtime_enable(&pdev->dev);
612	}
613
614	if (tmu) {
615		dev_info(&pdev->dev, "kept as earlytimer\n");
616		goto out;
617	}
618
619	tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
620	if (tmu == NULL)
 
621		return -ENOMEM;
 
622
623	ret = sh_tmu_setup(tmu, pdev);
624	if (ret) {
625		kfree(tmu);
626		pm_runtime_idle(&pdev->dev);
627		return ret;
628	}
629	if (is_early_platform_device(pdev))
630		return 0;
631
632 out:
633	if (tmu->has_clockevent || tmu->has_clocksource)
634		pm_runtime_irq_safe(&pdev->dev);
635	else
636		pm_runtime_idle(&pdev->dev);
637
638	return 0;
639}
640
641static int sh_tmu_remove(struct platform_device *pdev)
642{
643	return -EBUSY; /* cannot unregister clockevent and clocksource */
644}
645
646static const struct platform_device_id sh_tmu_id_table[] = {
647	{ "sh-tmu", SH_TMU },
648	{ "sh-tmu-sh3", SH_TMU_SH3 },
649	{ }
650};
651MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
652
653static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
654	{ .compatible = "renesas,tmu" },
655	{ }
656};
657MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
658
659static struct platform_driver sh_tmu_device_driver = {
660	.probe		= sh_tmu_probe,
661	.remove		= sh_tmu_remove,
662	.driver		= {
663		.name	= "sh_tmu",
664		.of_match_table = of_match_ptr(sh_tmu_of_table),
665	},
666	.id_table	= sh_tmu_id_table,
667};
668
669static int __init sh_tmu_init(void)
670{
671	return platform_driver_register(&sh_tmu_device_driver);
672}
673
674static void __exit sh_tmu_exit(void)
675{
676	platform_driver_unregister(&sh_tmu_device_driver);
677}
678
679early_platform_init("earlytimer", &sh_tmu_device_driver);
680subsys_initcall(sh_tmu_init);
681module_exit(sh_tmu_exit);
682
683MODULE_AUTHOR("Magnus Damm");
684MODULE_DESCRIPTION("SuperH TMU Timer Driver");
685MODULE_LICENSE("GPL v2");
v3.15
  1/*
  2 * SuperH Timer Support - TMU
  3 *
  4 *  Copyright (C) 2009 Magnus Damm
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 18 */
 19
 
 
 
 
 
 20#include <linux/init.h>
 21#include <linux/platform_device.h>
 22#include <linux/spinlock.h>
 23#include <linux/interrupt.h>
 
 24#include <linux/ioport.h>
 25#include <linux/delay.h>
 26#include <linux/io.h>
 27#include <linux/clk.h>
 28#include <linux/irq.h>
 29#include <linux/err.h>
 30#include <linux/clocksource.h>
 31#include <linux/clockchips.h>
 32#include <linux/sh_timer.h>
 33#include <linux/slab.h>
 34#include <linux/module.h>
 
 
 35#include <linux/pm_domain.h>
 36#include <linux/pm_runtime.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38struct sh_tmu_priv {
 39	void __iomem *mapbase;
 40	struct clk *clk;
 41	struct irqaction irqaction;
 42	struct platform_device *pdev;
 43	unsigned long rate;
 44	unsigned long periodic;
 45	struct clock_event_device ced;
 46	struct clocksource cs;
 47	bool cs_enabled;
 48	unsigned int enable_count;
 49};
 50
 51static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52
 53#define TSTR -1 /* shared register */
 54#define TCOR  0 /* channel register */
 55#define TCNT 1 /* channel register */
 56#define TCR 2 /* channel register */
 57
 58static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
 
 
 
 
 
 
 
 
 
 59{
 60	struct sh_timer_config *cfg = p->pdev->dev.platform_data;
 61	void __iomem *base = p->mapbase;
 62	unsigned long offs;
 63
 64	if (reg_nr == TSTR)
 65		return ioread8(base - cfg->channel_offset);
 
 
 
 
 
 
 66
 67	offs = reg_nr << 2;
 68
 69	if (reg_nr == TCR)
 70		return ioread16(base + offs);
 71	else
 72		return ioread32(base + offs);
 73}
 74
 75static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
 76				unsigned long value)
 77{
 78	struct sh_timer_config *cfg = p->pdev->dev.platform_data;
 79	void __iomem *base = p->mapbase;
 80	unsigned long offs;
 81
 82	if (reg_nr == TSTR) {
 83		iowrite8(value, base - cfg->channel_offset);
 84		return;
 
 
 
 
 85	}
 86
 87	offs = reg_nr << 2;
 88
 89	if (reg_nr == TCR)
 90		iowrite16(value, base + offs);
 91	else
 92		iowrite32(value, base + offs);
 93}
 94
 95static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
 96{
 97	struct sh_timer_config *cfg = p->pdev->dev.platform_data;
 98	unsigned long flags, value;
 99
100	/* start stop register shared by multiple timer channels */
101	raw_spin_lock_irqsave(&sh_tmu_lock, flags);
102	value = sh_tmu_read(p, TSTR);
103
104	if (start)
105		value |= 1 << cfg->timer_bit;
106	else
107		value &= ~(1 << cfg->timer_bit);
108
109	sh_tmu_write(p, TSTR, value);
110	raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
111}
112
113static int __sh_tmu_enable(struct sh_tmu_priv *p)
114{
115	int ret;
116
117	/* enable clock */
118	ret = clk_enable(p->clk);
119	if (ret) {
120		dev_err(&p->pdev->dev, "cannot enable clock\n");
 
121		return ret;
122	}
123
124	/* make sure channel is disabled */
125	sh_tmu_start_stop_ch(p, 0);
126
127	/* maximum timeout */
128	sh_tmu_write(p, TCOR, 0xffffffff);
129	sh_tmu_write(p, TCNT, 0xffffffff);
130
131	/* configure channel to parent clock / 4, irq off */
132	p->rate = clk_get_rate(p->clk) / 4;
133	sh_tmu_write(p, TCR, 0x0000);
134
135	/* enable channel */
136	sh_tmu_start_stop_ch(p, 1);
137
138	return 0;
139}
140
141static int sh_tmu_enable(struct sh_tmu_priv *p)
142{
143	if (p->enable_count++ > 0)
144		return 0;
145
146	pm_runtime_get_sync(&p->pdev->dev);
147	dev_pm_syscore_device(&p->pdev->dev, true);
148
149	return __sh_tmu_enable(p);
150}
151
152static void __sh_tmu_disable(struct sh_tmu_priv *p)
153{
154	/* disable channel */
155	sh_tmu_start_stop_ch(p, 0);
156
157	/* disable interrupts in TMU block */
158	sh_tmu_write(p, TCR, 0x0000);
159
160	/* stop clock */
161	clk_disable(p->clk);
162}
163
164static void sh_tmu_disable(struct sh_tmu_priv *p)
165{
166	if (WARN_ON(p->enable_count == 0))
167		return;
168
169	if (--p->enable_count > 0)
170		return;
171
172	__sh_tmu_disable(p);
173
174	dev_pm_syscore_device(&p->pdev->dev, false);
175	pm_runtime_put(&p->pdev->dev);
176}
177
178static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
179			    int periodic)
180{
181	/* stop timer */
182	sh_tmu_start_stop_ch(p, 0);
183
184	/* acknowledge interrupt */
185	sh_tmu_read(p, TCR);
186
187	/* enable interrupt */
188	sh_tmu_write(p, TCR, 0x0020);
189
190	/* reload delta value in case of periodic timer */
191	if (periodic)
192		sh_tmu_write(p, TCOR, delta);
193	else
194		sh_tmu_write(p, TCOR, 0xffffffff);
195
196	sh_tmu_write(p, TCNT, delta);
197
198	/* start timer */
199	sh_tmu_start_stop_ch(p, 1);
200}
201
202static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
203{
204	struct sh_tmu_priv *p = dev_id;
205
206	/* disable or acknowledge interrupt */
207	if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
208		sh_tmu_write(p, TCR, 0x0000);
209	else
210		sh_tmu_write(p, TCR, 0x0020);
211
212	/* notify clockevent layer */
213	p->ced.event_handler(&p->ced);
214	return IRQ_HANDLED;
215}
216
217static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
218{
219	return container_of(cs, struct sh_tmu_priv, cs);
220}
221
222static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
223{
224	struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
225
226	return sh_tmu_read(p, TCNT) ^ 0xffffffff;
227}
228
229static int sh_tmu_clocksource_enable(struct clocksource *cs)
230{
231	struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
232	int ret;
233
234	if (WARN_ON(p->cs_enabled))
235		return 0;
236
237	ret = sh_tmu_enable(p);
238	if (!ret) {
239		__clocksource_updatefreq_hz(cs, p->rate);
240		p->cs_enabled = true;
241	}
242
243	return ret;
244}
245
246static void sh_tmu_clocksource_disable(struct clocksource *cs)
247{
248	struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
249
250	if (WARN_ON(!p->cs_enabled))
251		return;
252
253	sh_tmu_disable(p);
254	p->cs_enabled = false;
255}
256
257static void sh_tmu_clocksource_suspend(struct clocksource *cs)
258{
259	struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
260
261	if (!p->cs_enabled)
262		return;
263
264	if (--p->enable_count == 0) {
265		__sh_tmu_disable(p);
266		pm_genpd_syscore_poweroff(&p->pdev->dev);
267	}
268}
269
270static void sh_tmu_clocksource_resume(struct clocksource *cs)
271{
272	struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
273
274	if (!p->cs_enabled)
275		return;
276
277	if (p->enable_count++ == 0) {
278		pm_genpd_syscore_poweron(&p->pdev->dev);
279		__sh_tmu_enable(p);
280	}
281}
282
283static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
284				       char *name, unsigned long rating)
285{
286	struct clocksource *cs = &p->cs;
287
288	cs->name = name;
289	cs->rating = rating;
290	cs->read = sh_tmu_clocksource_read;
291	cs->enable = sh_tmu_clocksource_enable;
292	cs->disable = sh_tmu_clocksource_disable;
293	cs->suspend = sh_tmu_clocksource_suspend;
294	cs->resume = sh_tmu_clocksource_resume;
295	cs->mask = CLOCKSOURCE_MASK(32);
296	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
297
298	dev_info(&p->pdev->dev, "used as clock source\n");
 
299
300	/* Register with dummy 1 Hz value, gets updated in ->enable() */
301	clocksource_register_hz(cs, 1);
302	return 0;
303}
304
305static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
306{
307	return container_of(ced, struct sh_tmu_priv, ced);
308}
309
310static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
311{
312	struct clock_event_device *ced = &p->ced;
313
314	sh_tmu_enable(p);
315
316	clockevents_config(ced, p->rate);
317
318	if (periodic) {
319		p->periodic = (p->rate + HZ/2) / HZ;
320		sh_tmu_set_next(p, p->periodic, 1);
321	}
322}
323
324static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
325				    struct clock_event_device *ced)
326{
327	struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
328	int disabled = 0;
 
 
 
 
 
 
 
 
 
329
330	/* deal with old setting first */
331	switch (ced->mode) {
332	case CLOCK_EVT_MODE_PERIODIC:
333	case CLOCK_EVT_MODE_ONESHOT:
334		sh_tmu_disable(p);
335		disabled = 1;
336		break;
337	default:
338		break;
339	}
340
341	switch (mode) {
342	case CLOCK_EVT_MODE_PERIODIC:
343		dev_info(&p->pdev->dev, "used for periodic clock events\n");
344		sh_tmu_clock_event_start(p, 1);
345		break;
346	case CLOCK_EVT_MODE_ONESHOT:
347		dev_info(&p->pdev->dev, "used for oneshot clock events\n");
348		sh_tmu_clock_event_start(p, 0);
349		break;
350	case CLOCK_EVT_MODE_UNUSED:
351		if (!disabled)
352			sh_tmu_disable(p);
353		break;
354	case CLOCK_EVT_MODE_SHUTDOWN:
355	default:
356		break;
357	}
358}
359
360static int sh_tmu_clock_event_next(unsigned long delta,
361				   struct clock_event_device *ced)
362{
363	struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
364
365	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
366
367	/* program new delta value */
368	sh_tmu_set_next(p, delta, 0);
369	return 0;
370}
371
372static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
373{
374	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
375}
376
377static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
378{
379	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
380}
381
382static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
383				       char *name, unsigned long rating)
384{
385	struct clock_event_device *ced = &p->ced;
386	int ret;
387
388	memset(ced, 0, sizeof(*ced));
389
390	ced->name = name;
391	ced->features = CLOCK_EVT_FEAT_PERIODIC;
392	ced->features |= CLOCK_EVT_FEAT_ONESHOT;
393	ced->rating = rating;
394	ced->cpumask = cpumask_of(0);
395	ced->set_next_event = sh_tmu_clock_event_next;
396	ced->set_mode = sh_tmu_clock_event_mode;
 
 
397	ced->suspend = sh_tmu_clock_event_suspend;
398	ced->resume = sh_tmu_clock_event_resume;
399
400	dev_info(&p->pdev->dev, "used for clock events\n");
 
401
402	clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
403
404	ret = setup_irq(p->irqaction.irq, &p->irqaction);
 
 
405	if (ret) {
406		dev_err(&p->pdev->dev, "failed to request irq %d\n",
407			p->irqaction.irq);
408		return;
409	}
410}
411
412static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
413		    unsigned long clockevent_rating,
414		    unsigned long clocksource_rating)
415{
416	if (clockevent_rating)
417		sh_tmu_register_clockevent(p, name, clockevent_rating);
418	else if (clocksource_rating)
419		sh_tmu_register_clocksource(p, name, clocksource_rating);
 
 
420
421	return 0;
422}
423
424static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
 
 
425{
426	struct sh_timer_config *cfg = pdev->dev.platform_data;
427	struct resource *res;
428	int irq, ret;
429	ret = -ENXIO;
 
 
430
431	memset(p, 0, sizeof(*p));
432	p->pdev = pdev;
 
 
433
434	if (!cfg) {
435		dev_err(&p->pdev->dev, "missing platform data\n");
436		goto err0;
 
 
437	}
438
439	platform_set_drvdata(pdev, p);
 
440
441	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
 
 
 
 
 
 
 
 
442	if (!res) {
443		dev_err(&p->pdev->dev, "failed to get I/O memory\n");
444		goto err0;
445	}
446
447	irq = platform_get_irq(p->pdev, 0);
448	if (irq < 0) {
449		dev_err(&p->pdev->dev, "failed to get irq\n");
450		goto err0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451	}
452
453	/* map memory, let mapbase point to our channel */
454	p->mapbase = ioremap_nocache(res->start, resource_size(res));
455	if (p->mapbase == NULL) {
456		dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
457		goto err0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458	}
459
460	/* setup data for setup_irq() (too early for request_irq()) */
461	p->irqaction.name = dev_name(&p->pdev->dev);
462	p->irqaction.handler = sh_tmu_interrupt;
463	p->irqaction.dev_id = p;
464	p->irqaction.irq = irq;
465	p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
466
467	/* get hold of clock */
468	p->clk = clk_get(&p->pdev->dev, "tmu_fck");
469	if (IS_ERR(p->clk)) {
470		dev_err(&p->pdev->dev, "cannot get clock\n");
471		ret = PTR_ERR(p->clk);
472		goto err1;
473	}
474
475	ret = clk_prepare(p->clk);
476	if (ret < 0)
477		goto err2;
478
479	p->cs_enabled = false;
480	p->enable_count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
482	ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
483			      cfg->clockevent_rating,
484			      cfg->clocksource_rating);
485	if (ret < 0)
486		goto err3;
487
488	return 0;
489
490 err3:
491	clk_unprepare(p->clk);
492 err2:
493	clk_put(p->clk);
494 err1:
495	iounmap(p->mapbase);
496 err0:
497	return ret;
498}
499
500static int sh_tmu_probe(struct platform_device *pdev)
501{
502	struct sh_tmu_priv *p = platform_get_drvdata(pdev);
503	struct sh_timer_config *cfg = pdev->dev.platform_data;
504	int ret;
505
506	if (!is_early_platform_device(pdev)) {
507		pm_runtime_set_active(&pdev->dev);
508		pm_runtime_enable(&pdev->dev);
509	}
510
511	if (p) {
512		dev_info(&pdev->dev, "kept as earlytimer\n");
513		goto out;
514	}
515
516	p = kmalloc(sizeof(*p), GFP_KERNEL);
517	if (p == NULL) {
518		dev_err(&pdev->dev, "failed to allocate driver data\n");
519		return -ENOMEM;
520	}
521
522	ret = sh_tmu_setup(p, pdev);
523	if (ret) {
524		kfree(p);
525		pm_runtime_idle(&pdev->dev);
526		return ret;
527	}
528	if (is_early_platform_device(pdev))
529		return 0;
530
531 out:
532	if (cfg->clockevent_rating || cfg->clocksource_rating)
533		pm_runtime_irq_safe(&pdev->dev);
534	else
535		pm_runtime_idle(&pdev->dev);
536
537	return 0;
538}
539
540static int sh_tmu_remove(struct platform_device *pdev)
541{
542	return -EBUSY; /* cannot unregister clockevent and clocksource */
543}
544
 
 
 
 
 
 
 
 
 
 
 
 
 
545static struct platform_driver sh_tmu_device_driver = {
546	.probe		= sh_tmu_probe,
547	.remove		= sh_tmu_remove,
548	.driver		= {
549		.name	= "sh_tmu",
550	}
 
 
551};
552
553static int __init sh_tmu_init(void)
554{
555	return platform_driver_register(&sh_tmu_device_driver);
556}
557
558static void __exit sh_tmu_exit(void)
559{
560	platform_driver_unregister(&sh_tmu_device_driver);
561}
562
563early_platform_init("earlytimer", &sh_tmu_device_driver);
564subsys_initcall(sh_tmu_init);
565module_exit(sh_tmu_exit);
566
567MODULE_AUTHOR("Magnus Damm");
568MODULE_DESCRIPTION("SuperH TMU Timer Driver");
569MODULE_LICENSE("GPL v2");