Loading...
1/*
2 * SuperH Timer Support - TMU
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35#include <linux/pm_domain.h>
36
37struct sh_tmu_priv {
38 void __iomem *mapbase;
39 struct clk *clk;
40 struct irqaction irqaction;
41 struct platform_device *pdev;
42 unsigned long rate;
43 unsigned long periodic;
44 struct clock_event_device ced;
45 struct clocksource cs;
46};
47
48static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
49
50#define TSTR -1 /* shared register */
51#define TCOR 0 /* channel register */
52#define TCNT 1 /* channel register */
53#define TCR 2 /* channel register */
54
55static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
56{
57 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
58 void __iomem *base = p->mapbase;
59 unsigned long offs;
60
61 if (reg_nr == TSTR)
62 return ioread8(base - cfg->channel_offset);
63
64 offs = reg_nr << 2;
65
66 if (reg_nr == TCR)
67 return ioread16(base + offs);
68 else
69 return ioread32(base + offs);
70}
71
72static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
73 unsigned long value)
74{
75 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
76 void __iomem *base = p->mapbase;
77 unsigned long offs;
78
79 if (reg_nr == TSTR) {
80 iowrite8(value, base - cfg->channel_offset);
81 return;
82 }
83
84 offs = reg_nr << 2;
85
86 if (reg_nr == TCR)
87 iowrite16(value, base + offs);
88 else
89 iowrite32(value, base + offs);
90}
91
92static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
93{
94 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
95 unsigned long flags, value;
96
97 /* start stop register shared by multiple timer channels */
98 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
99 value = sh_tmu_read(p, TSTR);
100
101 if (start)
102 value |= 1 << cfg->timer_bit;
103 else
104 value &= ~(1 << cfg->timer_bit);
105
106 sh_tmu_write(p, TSTR, value);
107 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
108}
109
110static int sh_tmu_enable(struct sh_tmu_priv *p)
111{
112 int ret;
113
114 /* enable clock */
115 ret = clk_enable(p->clk);
116 if (ret) {
117 dev_err(&p->pdev->dev, "cannot enable clock\n");
118 return ret;
119 }
120
121 /* make sure channel is disabled */
122 sh_tmu_start_stop_ch(p, 0);
123
124 /* maximum timeout */
125 sh_tmu_write(p, TCOR, 0xffffffff);
126 sh_tmu_write(p, TCNT, 0xffffffff);
127
128 /* configure channel to parent clock / 4, irq off */
129 p->rate = clk_get_rate(p->clk) / 4;
130 sh_tmu_write(p, TCR, 0x0000);
131
132 /* enable channel */
133 sh_tmu_start_stop_ch(p, 1);
134
135 return 0;
136}
137
138static void sh_tmu_disable(struct sh_tmu_priv *p)
139{
140 /* disable channel */
141 sh_tmu_start_stop_ch(p, 0);
142
143 /* disable interrupts in TMU block */
144 sh_tmu_write(p, TCR, 0x0000);
145
146 /* stop clock */
147 clk_disable(p->clk);
148}
149
150static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
151 int periodic)
152{
153 /* stop timer */
154 sh_tmu_start_stop_ch(p, 0);
155
156 /* acknowledge interrupt */
157 sh_tmu_read(p, TCR);
158
159 /* enable interrupt */
160 sh_tmu_write(p, TCR, 0x0020);
161
162 /* reload delta value in case of periodic timer */
163 if (periodic)
164 sh_tmu_write(p, TCOR, delta);
165 else
166 sh_tmu_write(p, TCOR, 0xffffffff);
167
168 sh_tmu_write(p, TCNT, delta);
169
170 /* start timer */
171 sh_tmu_start_stop_ch(p, 1);
172}
173
174static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
175{
176 struct sh_tmu_priv *p = dev_id;
177
178 /* disable or acknowledge interrupt */
179 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
180 sh_tmu_write(p, TCR, 0x0000);
181 else
182 sh_tmu_write(p, TCR, 0x0020);
183
184 /* notify clockevent layer */
185 p->ced.event_handler(&p->ced);
186 return IRQ_HANDLED;
187}
188
189static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
190{
191 return container_of(cs, struct sh_tmu_priv, cs);
192}
193
194static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
195{
196 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
197
198 return sh_tmu_read(p, TCNT) ^ 0xffffffff;
199}
200
201static int sh_tmu_clocksource_enable(struct clocksource *cs)
202{
203 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
204 int ret;
205
206 ret = sh_tmu_enable(p);
207 if (!ret)
208 __clocksource_updatefreq_hz(cs, p->rate);
209 return ret;
210}
211
212static void sh_tmu_clocksource_disable(struct clocksource *cs)
213{
214 sh_tmu_disable(cs_to_sh_tmu(cs));
215}
216
217static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
218 char *name, unsigned long rating)
219{
220 struct clocksource *cs = &p->cs;
221
222 cs->name = name;
223 cs->rating = rating;
224 cs->read = sh_tmu_clocksource_read;
225 cs->enable = sh_tmu_clocksource_enable;
226 cs->disable = sh_tmu_clocksource_disable;
227 cs->mask = CLOCKSOURCE_MASK(32);
228 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
229
230 dev_info(&p->pdev->dev, "used as clock source\n");
231
232 /* Register with dummy 1 Hz value, gets updated in ->enable() */
233 clocksource_register_hz(cs, 1);
234 return 0;
235}
236
237static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
238{
239 return container_of(ced, struct sh_tmu_priv, ced);
240}
241
242static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
243{
244 struct clock_event_device *ced = &p->ced;
245
246 sh_tmu_enable(p);
247
248 clockevents_config(ced, p->rate);
249
250 if (periodic) {
251 p->periodic = (p->rate + HZ/2) / HZ;
252 sh_tmu_set_next(p, p->periodic, 1);
253 }
254}
255
256static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
257 struct clock_event_device *ced)
258{
259 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
260 int disabled = 0;
261
262 /* deal with old setting first */
263 switch (ced->mode) {
264 case CLOCK_EVT_MODE_PERIODIC:
265 case CLOCK_EVT_MODE_ONESHOT:
266 sh_tmu_disable(p);
267 disabled = 1;
268 break;
269 default:
270 break;
271 }
272
273 switch (mode) {
274 case CLOCK_EVT_MODE_PERIODIC:
275 dev_info(&p->pdev->dev, "used for periodic clock events\n");
276 sh_tmu_clock_event_start(p, 1);
277 break;
278 case CLOCK_EVT_MODE_ONESHOT:
279 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
280 sh_tmu_clock_event_start(p, 0);
281 break;
282 case CLOCK_EVT_MODE_UNUSED:
283 if (!disabled)
284 sh_tmu_disable(p);
285 break;
286 case CLOCK_EVT_MODE_SHUTDOWN:
287 default:
288 break;
289 }
290}
291
292static int sh_tmu_clock_event_next(unsigned long delta,
293 struct clock_event_device *ced)
294{
295 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
296
297 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
298
299 /* program new delta value */
300 sh_tmu_set_next(p, delta, 0);
301 return 0;
302}
303
304static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
305 char *name, unsigned long rating)
306{
307 struct clock_event_device *ced = &p->ced;
308 int ret;
309
310 memset(ced, 0, sizeof(*ced));
311
312 ced->name = name;
313 ced->features = CLOCK_EVT_FEAT_PERIODIC;
314 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
315 ced->rating = rating;
316 ced->cpumask = cpumask_of(0);
317 ced->set_next_event = sh_tmu_clock_event_next;
318 ced->set_mode = sh_tmu_clock_event_mode;
319
320 dev_info(&p->pdev->dev, "used for clock events\n");
321
322 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
323
324 ret = setup_irq(p->irqaction.irq, &p->irqaction);
325 if (ret) {
326 dev_err(&p->pdev->dev, "failed to request irq %d\n",
327 p->irqaction.irq);
328 return;
329 }
330}
331
332static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
333 unsigned long clockevent_rating,
334 unsigned long clocksource_rating)
335{
336 if (clockevent_rating)
337 sh_tmu_register_clockevent(p, name, clockevent_rating);
338 else if (clocksource_rating)
339 sh_tmu_register_clocksource(p, name, clocksource_rating);
340
341 return 0;
342}
343
344static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
345{
346 struct sh_timer_config *cfg = pdev->dev.platform_data;
347 struct resource *res;
348 int irq, ret;
349 ret = -ENXIO;
350
351 memset(p, 0, sizeof(*p));
352 p->pdev = pdev;
353
354 if (!cfg) {
355 dev_err(&p->pdev->dev, "missing platform data\n");
356 goto err0;
357 }
358
359 platform_set_drvdata(pdev, p);
360
361 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
362 if (!res) {
363 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
364 goto err0;
365 }
366
367 irq = platform_get_irq(p->pdev, 0);
368 if (irq < 0) {
369 dev_err(&p->pdev->dev, "failed to get irq\n");
370 goto err0;
371 }
372
373 /* map memory, let mapbase point to our channel */
374 p->mapbase = ioremap_nocache(res->start, resource_size(res));
375 if (p->mapbase == NULL) {
376 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
377 goto err0;
378 }
379
380 /* setup data for setup_irq() (too early for request_irq()) */
381 p->irqaction.name = dev_name(&p->pdev->dev);
382 p->irqaction.handler = sh_tmu_interrupt;
383 p->irqaction.dev_id = p;
384 p->irqaction.irq = irq;
385 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
386 IRQF_IRQPOLL | IRQF_NOBALANCING;
387
388 /* get hold of clock */
389 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
390 if (IS_ERR(p->clk)) {
391 dev_err(&p->pdev->dev, "cannot get clock\n");
392 ret = PTR_ERR(p->clk);
393 goto err1;
394 }
395
396 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
397 cfg->clockevent_rating,
398 cfg->clocksource_rating);
399 err1:
400 iounmap(p->mapbase);
401 err0:
402 return ret;
403}
404
405static int __devinit sh_tmu_probe(struct platform_device *pdev)
406{
407 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
408 int ret;
409
410 if (!is_early_platform_device(pdev))
411 pm_genpd_dev_always_on(&pdev->dev, true);
412
413 if (p) {
414 dev_info(&pdev->dev, "kept as earlytimer\n");
415 return 0;
416 }
417
418 p = kmalloc(sizeof(*p), GFP_KERNEL);
419 if (p == NULL) {
420 dev_err(&pdev->dev, "failed to allocate driver data\n");
421 return -ENOMEM;
422 }
423
424 ret = sh_tmu_setup(p, pdev);
425 if (ret) {
426 kfree(p);
427 platform_set_drvdata(pdev, NULL);
428 }
429 return ret;
430}
431
432static int __devexit sh_tmu_remove(struct platform_device *pdev)
433{
434 return -EBUSY; /* cannot unregister clockevent and clocksource */
435}
436
437static struct platform_driver sh_tmu_device_driver = {
438 .probe = sh_tmu_probe,
439 .remove = __devexit_p(sh_tmu_remove),
440 .driver = {
441 .name = "sh_tmu",
442 }
443};
444
445static int __init sh_tmu_init(void)
446{
447 return platform_driver_register(&sh_tmu_device_driver);
448}
449
450static void __exit sh_tmu_exit(void)
451{
452 platform_driver_unregister(&sh_tmu_device_driver);
453}
454
455early_platform_init("earlytimer", &sh_tmu_device_driver);
456module_init(sh_tmu_init);
457module_exit(sh_tmu_exit);
458
459MODULE_AUTHOR("Magnus Damm");
460MODULE_DESCRIPTION("SuperH TMU Timer Driver");
461MODULE_LICENSE("GPL v2");
1/*
2 * SuperH Timer Support - TMU
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35#include <linux/pm_domain.h>
36#include <linux/pm_runtime.h>
37
38struct sh_tmu_priv {
39 void __iomem *mapbase;
40 struct clk *clk;
41 struct irqaction irqaction;
42 struct platform_device *pdev;
43 unsigned long rate;
44 unsigned long periodic;
45 struct clock_event_device ced;
46 struct clocksource cs;
47 bool cs_enabled;
48 unsigned int enable_count;
49};
50
51static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
52
53#define TSTR -1 /* shared register */
54#define TCOR 0 /* channel register */
55#define TCNT 1 /* channel register */
56#define TCR 2 /* channel register */
57
58static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
59{
60 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
61 void __iomem *base = p->mapbase;
62 unsigned long offs;
63
64 if (reg_nr == TSTR)
65 return ioread8(base - cfg->channel_offset);
66
67 offs = reg_nr << 2;
68
69 if (reg_nr == TCR)
70 return ioread16(base + offs);
71 else
72 return ioread32(base + offs);
73}
74
75static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
76 unsigned long value)
77{
78 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
79 void __iomem *base = p->mapbase;
80 unsigned long offs;
81
82 if (reg_nr == TSTR) {
83 iowrite8(value, base - cfg->channel_offset);
84 return;
85 }
86
87 offs = reg_nr << 2;
88
89 if (reg_nr == TCR)
90 iowrite16(value, base + offs);
91 else
92 iowrite32(value, base + offs);
93}
94
95static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
96{
97 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
98 unsigned long flags, value;
99
100 /* start stop register shared by multiple timer channels */
101 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
102 value = sh_tmu_read(p, TSTR);
103
104 if (start)
105 value |= 1 << cfg->timer_bit;
106 else
107 value &= ~(1 << cfg->timer_bit);
108
109 sh_tmu_write(p, TSTR, value);
110 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
111}
112
113static int __sh_tmu_enable(struct sh_tmu_priv *p)
114{
115 int ret;
116
117 /* enable clock */
118 ret = clk_enable(p->clk);
119 if (ret) {
120 dev_err(&p->pdev->dev, "cannot enable clock\n");
121 return ret;
122 }
123
124 /* make sure channel is disabled */
125 sh_tmu_start_stop_ch(p, 0);
126
127 /* maximum timeout */
128 sh_tmu_write(p, TCOR, 0xffffffff);
129 sh_tmu_write(p, TCNT, 0xffffffff);
130
131 /* configure channel to parent clock / 4, irq off */
132 p->rate = clk_get_rate(p->clk) / 4;
133 sh_tmu_write(p, TCR, 0x0000);
134
135 /* enable channel */
136 sh_tmu_start_stop_ch(p, 1);
137
138 return 0;
139}
140
141static int sh_tmu_enable(struct sh_tmu_priv *p)
142{
143 if (p->enable_count++ > 0)
144 return 0;
145
146 pm_runtime_get_sync(&p->pdev->dev);
147 dev_pm_syscore_device(&p->pdev->dev, true);
148
149 return __sh_tmu_enable(p);
150}
151
152static void __sh_tmu_disable(struct sh_tmu_priv *p)
153{
154 /* disable channel */
155 sh_tmu_start_stop_ch(p, 0);
156
157 /* disable interrupts in TMU block */
158 sh_tmu_write(p, TCR, 0x0000);
159
160 /* stop clock */
161 clk_disable(p->clk);
162}
163
164static void sh_tmu_disable(struct sh_tmu_priv *p)
165{
166 if (WARN_ON(p->enable_count == 0))
167 return;
168
169 if (--p->enable_count > 0)
170 return;
171
172 __sh_tmu_disable(p);
173
174 dev_pm_syscore_device(&p->pdev->dev, false);
175 pm_runtime_put(&p->pdev->dev);
176}
177
178static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
179 int periodic)
180{
181 /* stop timer */
182 sh_tmu_start_stop_ch(p, 0);
183
184 /* acknowledge interrupt */
185 sh_tmu_read(p, TCR);
186
187 /* enable interrupt */
188 sh_tmu_write(p, TCR, 0x0020);
189
190 /* reload delta value in case of periodic timer */
191 if (periodic)
192 sh_tmu_write(p, TCOR, delta);
193 else
194 sh_tmu_write(p, TCOR, 0xffffffff);
195
196 sh_tmu_write(p, TCNT, delta);
197
198 /* start timer */
199 sh_tmu_start_stop_ch(p, 1);
200}
201
202static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
203{
204 struct sh_tmu_priv *p = dev_id;
205
206 /* disable or acknowledge interrupt */
207 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
208 sh_tmu_write(p, TCR, 0x0000);
209 else
210 sh_tmu_write(p, TCR, 0x0020);
211
212 /* notify clockevent layer */
213 p->ced.event_handler(&p->ced);
214 return IRQ_HANDLED;
215}
216
217static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
218{
219 return container_of(cs, struct sh_tmu_priv, cs);
220}
221
222static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
223{
224 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
225
226 return sh_tmu_read(p, TCNT) ^ 0xffffffff;
227}
228
229static int sh_tmu_clocksource_enable(struct clocksource *cs)
230{
231 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
232 int ret;
233
234 if (WARN_ON(p->cs_enabled))
235 return 0;
236
237 ret = sh_tmu_enable(p);
238 if (!ret) {
239 __clocksource_updatefreq_hz(cs, p->rate);
240 p->cs_enabled = true;
241 }
242
243 return ret;
244}
245
246static void sh_tmu_clocksource_disable(struct clocksource *cs)
247{
248 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
249
250 if (WARN_ON(!p->cs_enabled))
251 return;
252
253 sh_tmu_disable(p);
254 p->cs_enabled = false;
255}
256
257static void sh_tmu_clocksource_suspend(struct clocksource *cs)
258{
259 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
260
261 if (!p->cs_enabled)
262 return;
263
264 if (--p->enable_count == 0) {
265 __sh_tmu_disable(p);
266 pm_genpd_syscore_poweroff(&p->pdev->dev);
267 }
268}
269
270static void sh_tmu_clocksource_resume(struct clocksource *cs)
271{
272 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
273
274 if (!p->cs_enabled)
275 return;
276
277 if (p->enable_count++ == 0) {
278 pm_genpd_syscore_poweron(&p->pdev->dev);
279 __sh_tmu_enable(p);
280 }
281}
282
283static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
284 char *name, unsigned long rating)
285{
286 struct clocksource *cs = &p->cs;
287
288 cs->name = name;
289 cs->rating = rating;
290 cs->read = sh_tmu_clocksource_read;
291 cs->enable = sh_tmu_clocksource_enable;
292 cs->disable = sh_tmu_clocksource_disable;
293 cs->suspend = sh_tmu_clocksource_suspend;
294 cs->resume = sh_tmu_clocksource_resume;
295 cs->mask = CLOCKSOURCE_MASK(32);
296 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
297
298 dev_info(&p->pdev->dev, "used as clock source\n");
299
300 /* Register with dummy 1 Hz value, gets updated in ->enable() */
301 clocksource_register_hz(cs, 1);
302 return 0;
303}
304
305static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
306{
307 return container_of(ced, struct sh_tmu_priv, ced);
308}
309
310static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
311{
312 struct clock_event_device *ced = &p->ced;
313
314 sh_tmu_enable(p);
315
316 clockevents_config(ced, p->rate);
317
318 if (periodic) {
319 p->periodic = (p->rate + HZ/2) / HZ;
320 sh_tmu_set_next(p, p->periodic, 1);
321 }
322}
323
324static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
325 struct clock_event_device *ced)
326{
327 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
328 int disabled = 0;
329
330 /* deal with old setting first */
331 switch (ced->mode) {
332 case CLOCK_EVT_MODE_PERIODIC:
333 case CLOCK_EVT_MODE_ONESHOT:
334 sh_tmu_disable(p);
335 disabled = 1;
336 break;
337 default:
338 break;
339 }
340
341 switch (mode) {
342 case CLOCK_EVT_MODE_PERIODIC:
343 dev_info(&p->pdev->dev, "used for periodic clock events\n");
344 sh_tmu_clock_event_start(p, 1);
345 break;
346 case CLOCK_EVT_MODE_ONESHOT:
347 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
348 sh_tmu_clock_event_start(p, 0);
349 break;
350 case CLOCK_EVT_MODE_UNUSED:
351 if (!disabled)
352 sh_tmu_disable(p);
353 break;
354 case CLOCK_EVT_MODE_SHUTDOWN:
355 default:
356 break;
357 }
358}
359
360static int sh_tmu_clock_event_next(unsigned long delta,
361 struct clock_event_device *ced)
362{
363 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
364
365 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
366
367 /* program new delta value */
368 sh_tmu_set_next(p, delta, 0);
369 return 0;
370}
371
372static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
373{
374 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
375}
376
377static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
378{
379 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
380}
381
382static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
383 char *name, unsigned long rating)
384{
385 struct clock_event_device *ced = &p->ced;
386 int ret;
387
388 memset(ced, 0, sizeof(*ced));
389
390 ced->name = name;
391 ced->features = CLOCK_EVT_FEAT_PERIODIC;
392 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
393 ced->rating = rating;
394 ced->cpumask = cpumask_of(0);
395 ced->set_next_event = sh_tmu_clock_event_next;
396 ced->set_mode = sh_tmu_clock_event_mode;
397 ced->suspend = sh_tmu_clock_event_suspend;
398 ced->resume = sh_tmu_clock_event_resume;
399
400 dev_info(&p->pdev->dev, "used for clock events\n");
401
402 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
403
404 ret = setup_irq(p->irqaction.irq, &p->irqaction);
405 if (ret) {
406 dev_err(&p->pdev->dev, "failed to request irq %d\n",
407 p->irqaction.irq);
408 return;
409 }
410}
411
412static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
413 unsigned long clockevent_rating,
414 unsigned long clocksource_rating)
415{
416 if (clockevent_rating)
417 sh_tmu_register_clockevent(p, name, clockevent_rating);
418 else if (clocksource_rating)
419 sh_tmu_register_clocksource(p, name, clocksource_rating);
420
421 return 0;
422}
423
424static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
425{
426 struct sh_timer_config *cfg = pdev->dev.platform_data;
427 struct resource *res;
428 int irq, ret;
429 ret = -ENXIO;
430
431 memset(p, 0, sizeof(*p));
432 p->pdev = pdev;
433
434 if (!cfg) {
435 dev_err(&p->pdev->dev, "missing platform data\n");
436 goto err0;
437 }
438
439 platform_set_drvdata(pdev, p);
440
441 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
442 if (!res) {
443 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
444 goto err0;
445 }
446
447 irq = platform_get_irq(p->pdev, 0);
448 if (irq < 0) {
449 dev_err(&p->pdev->dev, "failed to get irq\n");
450 goto err0;
451 }
452
453 /* map memory, let mapbase point to our channel */
454 p->mapbase = ioremap_nocache(res->start, resource_size(res));
455 if (p->mapbase == NULL) {
456 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
457 goto err0;
458 }
459
460 /* setup data for setup_irq() (too early for request_irq()) */
461 p->irqaction.name = dev_name(&p->pdev->dev);
462 p->irqaction.handler = sh_tmu_interrupt;
463 p->irqaction.dev_id = p;
464 p->irqaction.irq = irq;
465 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
466
467 /* get hold of clock */
468 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
469 if (IS_ERR(p->clk)) {
470 dev_err(&p->pdev->dev, "cannot get clock\n");
471 ret = PTR_ERR(p->clk);
472 goto err1;
473 }
474
475 ret = clk_prepare(p->clk);
476 if (ret < 0)
477 goto err2;
478
479 p->cs_enabled = false;
480 p->enable_count = 0;
481
482 ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
483 cfg->clockevent_rating,
484 cfg->clocksource_rating);
485 if (ret < 0)
486 goto err3;
487
488 return 0;
489
490 err3:
491 clk_unprepare(p->clk);
492 err2:
493 clk_put(p->clk);
494 err1:
495 iounmap(p->mapbase);
496 err0:
497 return ret;
498}
499
500static int sh_tmu_probe(struct platform_device *pdev)
501{
502 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
503 struct sh_timer_config *cfg = pdev->dev.platform_data;
504 int ret;
505
506 if (!is_early_platform_device(pdev)) {
507 pm_runtime_set_active(&pdev->dev);
508 pm_runtime_enable(&pdev->dev);
509 }
510
511 if (p) {
512 dev_info(&pdev->dev, "kept as earlytimer\n");
513 goto out;
514 }
515
516 p = kmalloc(sizeof(*p), GFP_KERNEL);
517 if (p == NULL) {
518 dev_err(&pdev->dev, "failed to allocate driver data\n");
519 return -ENOMEM;
520 }
521
522 ret = sh_tmu_setup(p, pdev);
523 if (ret) {
524 kfree(p);
525 pm_runtime_idle(&pdev->dev);
526 return ret;
527 }
528 if (is_early_platform_device(pdev))
529 return 0;
530
531 out:
532 if (cfg->clockevent_rating || cfg->clocksource_rating)
533 pm_runtime_irq_safe(&pdev->dev);
534 else
535 pm_runtime_idle(&pdev->dev);
536
537 return 0;
538}
539
540static int sh_tmu_remove(struct platform_device *pdev)
541{
542 return -EBUSY; /* cannot unregister clockevent and clocksource */
543}
544
545static struct platform_driver sh_tmu_device_driver = {
546 .probe = sh_tmu_probe,
547 .remove = sh_tmu_remove,
548 .driver = {
549 .name = "sh_tmu",
550 }
551};
552
553static int __init sh_tmu_init(void)
554{
555 return platform_driver_register(&sh_tmu_device_driver);
556}
557
558static void __exit sh_tmu_exit(void)
559{
560 platform_driver_unregister(&sh_tmu_device_driver);
561}
562
563early_platform_init("earlytimer", &sh_tmu_device_driver);
564subsys_initcall(sh_tmu_init);
565module_exit(sh_tmu_exit);
566
567MODULE_AUTHOR("Magnus Damm");
568MODULE_DESCRIPTION("SuperH TMU Timer Driver");
569MODULE_LICENSE("GPL v2");