Loading...
1/*
2 * Marvell Armada 370/XP SoC timer handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * Timer 0 is used as free-running clocksource, while timer 1 is
15 * used as clock_event_device.
16 *
17 * ---
18 * Clocksource driver for Armada 370 and Armada XP SoC.
19 * This driver implements one compatible string for each SoC, given
20 * each has its own characteristics:
21 *
22 * * Armada 370 has no 25 MHz fixed timer.
23 *
24 * * Armada XP cannot work properly without such 25 MHz fixed timer as
25 * doing otherwise leads to using a clocksource whose frequency varies
26 * when doing cpufreq frequency changes.
27 *
28 * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
29 */
30
31#include <linux/init.h>
32#include <linux/platform_device.h>
33#include <linux/kernel.h>
34#include <linux/clk.h>
35#include <linux/cpu.h>
36#include <linux/timer.h>
37#include <linux/clockchips.h>
38#include <linux/interrupt.h>
39#include <linux/of.h>
40#include <linux/of_irq.h>
41#include <linux/of_address.h>
42#include <linux/irq.h>
43#include <linux/module.h>
44#include <linux/sched_clock.h>
45#include <linux/percpu.h>
46#include <linux/syscore_ops.h>
47
48#include <asm/delay.h>
49
50/*
51 * Timer block registers.
52 */
53#define TIMER_CTRL_OFF 0x0000
54#define TIMER0_EN BIT(0)
55#define TIMER0_RELOAD_EN BIT(1)
56#define TIMER0_25MHZ BIT(11)
57#define TIMER0_DIV(div) ((div) << 19)
58#define TIMER1_EN BIT(2)
59#define TIMER1_RELOAD_EN BIT(3)
60#define TIMER1_25MHZ BIT(12)
61#define TIMER1_DIV(div) ((div) << 22)
62#define TIMER_EVENTS_STATUS 0x0004
63#define TIMER0_CLR_MASK (~0x1)
64#define TIMER1_CLR_MASK (~0x100)
65#define TIMER0_RELOAD_OFF 0x0010
66#define TIMER0_VAL_OFF 0x0014
67#define TIMER1_RELOAD_OFF 0x0018
68#define TIMER1_VAL_OFF 0x001c
69
70#define LCL_TIMER_EVENTS_STATUS 0x0028
71/* Global timers are connected to the coherency fabric clock, and the
72 below divider reduces their incrementing frequency. */
73#define TIMER_DIVIDER_SHIFT 5
74#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
75
76/*
77 * SoC-specific data.
78 */
79static void __iomem *timer_base, *local_base;
80static unsigned int timer_clk;
81static bool timer25Mhz = true;
82static u32 enable_mask;
83
84/*
85 * Number of timer ticks per jiffy.
86 */
87static u32 ticks_per_jiffy;
88
89static struct clock_event_device __percpu *armada_370_xp_evt;
90
91static void local_timer_ctrl_clrset(u32 clr, u32 set)
92{
93 writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
94 local_base + TIMER_CTRL_OFF);
95}
96
97static u64 notrace armada_370_xp_read_sched_clock(void)
98{
99 return ~readl(timer_base + TIMER0_VAL_OFF);
100}
101
102/*
103 * Clockevent handling.
104 */
105static int
106armada_370_xp_clkevt_next_event(unsigned long delta,
107 struct clock_event_device *dev)
108{
109 /*
110 * Clear clockevent timer interrupt.
111 */
112 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
113
114 /*
115 * Setup new clockevent timer value.
116 */
117 writel(delta, local_base + TIMER0_VAL_OFF);
118
119 /*
120 * Enable the timer.
121 */
122 local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
123 return 0;
124}
125
126static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
127{
128 /*
129 * Disable timer.
130 */
131 local_timer_ctrl_clrset(TIMER0_EN, 0);
132
133 /*
134 * ACK pending timer interrupt.
135 */
136 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
137 return 0;
138}
139
140static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
141{
142 /*
143 * Setup timer to fire at 1/HZ intervals.
144 */
145 writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
146 writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
147
148 /*
149 * Enable timer.
150 */
151 local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
152 return 0;
153}
154
155static int armada_370_xp_clkevt_irq;
156
157static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
158{
159 /*
160 * ACK timer interrupt and call event handler.
161 */
162 struct clock_event_device *evt = dev_id;
163
164 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
165 evt->event_handler(evt);
166
167 return IRQ_HANDLED;
168}
169
170/*
171 * Setup the local clock events for a CPU.
172 */
173static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
174{
175 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
176 u32 clr = 0, set = 0;
177
178 if (timer25Mhz)
179 set = TIMER0_25MHZ;
180 else
181 clr = TIMER0_25MHZ;
182 local_timer_ctrl_clrset(clr, set);
183
184 evt->name = "armada_370_xp_per_cpu_tick",
185 evt->features = CLOCK_EVT_FEAT_ONESHOT |
186 CLOCK_EVT_FEAT_PERIODIC;
187 evt->shift = 32,
188 evt->rating = 300,
189 evt->set_next_event = armada_370_xp_clkevt_next_event,
190 evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
191 evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
192 evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
193 evt->tick_resume = armada_370_xp_clkevt_shutdown;
194 evt->irq = armada_370_xp_clkevt_irq;
195 evt->cpumask = cpumask_of(cpu);
196
197 clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
198 enable_percpu_irq(evt->irq, 0);
199
200 return 0;
201}
202
203static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
204{
205 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
206
207 evt->set_state_shutdown(evt);
208 disable_percpu_irq(evt->irq);
209 return 0;
210}
211
212static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
213
214static int armada_370_xp_timer_suspend(void)
215{
216 timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
217 timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
218 return 0;
219}
220
221static void armada_370_xp_timer_resume(void)
222{
223 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
224 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
225 writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
226 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
227}
228
229static struct syscore_ops armada_370_xp_timer_syscore_ops = {
230 .suspend = armada_370_xp_timer_suspend,
231 .resume = armada_370_xp_timer_resume,
232};
233
234static unsigned long armada_370_delay_timer_read(void)
235{
236 return ~readl(timer_base + TIMER0_VAL_OFF);
237}
238
239static struct delay_timer armada_370_delay_timer = {
240 .read_current_timer = armada_370_delay_timer_read,
241};
242
243static int __init armada_370_xp_timer_common_init(struct device_node *np)
244{
245 u32 clr = 0, set = 0;
246 int res;
247
248 timer_base = of_iomap(np, 0);
249 if (!timer_base) {
250 pr_err("Failed to iomap\n");
251 return -ENXIO;
252 }
253
254 local_base = of_iomap(np, 1);
255 if (!local_base) {
256 pr_err("Failed to iomap\n");
257 return -ENXIO;
258 }
259
260 if (timer25Mhz) {
261 set = TIMER0_25MHZ;
262 enable_mask = TIMER0_EN;
263 } else {
264 clr = TIMER0_25MHZ;
265 enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
266 }
267 atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
268 local_timer_ctrl_clrset(clr, set);
269
270 /*
271 * We use timer 0 as clocksource, and private(local) timer 0
272 * for clockevents
273 */
274 armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
275
276 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
277
278 /*
279 * Setup free-running clocksource timer (interrupts
280 * disabled).
281 */
282 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
283 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
284
285 atomic_io_modify(timer_base + TIMER_CTRL_OFF,
286 TIMER0_RELOAD_EN | enable_mask,
287 TIMER0_RELOAD_EN | enable_mask);
288
289 armada_370_delay_timer.freq = timer_clk;
290 register_current_timer_delay(&armada_370_delay_timer);
291
292 /*
293 * Set scale and timer for sched_clock.
294 */
295 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
296
297 res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
298 "armada_370_xp_clocksource",
299 timer_clk, 300, 32, clocksource_mmio_readl_down);
300 if (res) {
301 pr_err("Failed to initialize clocksource mmio\n");
302 return res;
303 }
304
305 armada_370_xp_evt = alloc_percpu(struct clock_event_device);
306 if (!armada_370_xp_evt)
307 return -ENOMEM;
308
309 /*
310 * Setup clockevent timer (interrupt-driven).
311 */
312 res = request_percpu_irq(armada_370_xp_clkevt_irq,
313 armada_370_xp_timer_interrupt,
314 "armada_370_xp_per_cpu_tick",
315 armada_370_xp_evt);
316 /* Immediately configure the timer on the boot CPU */
317 if (res) {
318 pr_err("Failed to request percpu irq\n");
319 return res;
320 }
321
322 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
323 "clockevents/armada:starting",
324 armada_370_xp_timer_starting_cpu,
325 armada_370_xp_timer_dying_cpu);
326 if (res) {
327 pr_err("Failed to setup hotplug state and timer\n");
328 return res;
329 }
330
331 register_syscore_ops(&armada_370_xp_timer_syscore_ops);
332
333 return 0;
334}
335
336static int __init armada_xp_timer_init(struct device_node *np)
337{
338 struct clk *clk = of_clk_get_by_name(np, "fixed");
339 int ret;
340
341 if (IS_ERR(clk)) {
342 pr_err("Failed to get clock\n");
343 return PTR_ERR(clk);
344 }
345
346 ret = clk_prepare_enable(clk);
347 if (ret)
348 return ret;
349
350 timer_clk = clk_get_rate(clk);
351
352 return armada_370_xp_timer_common_init(np);
353}
354TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
355 armada_xp_timer_init);
356
357static int __init armada_375_timer_init(struct device_node *np)
358{
359 struct clk *clk;
360 int ret;
361
362 clk = of_clk_get_by_name(np, "fixed");
363 if (!IS_ERR(clk)) {
364 ret = clk_prepare_enable(clk);
365 if (ret)
366 return ret;
367 timer_clk = clk_get_rate(clk);
368 } else {
369
370 /*
371 * This fallback is required in order to retain proper
372 * devicetree backwards compatibility.
373 */
374 clk = of_clk_get(np, 0);
375
376 /* Must have at least a clock */
377 if (IS_ERR(clk)) {
378 pr_err("Failed to get clock\n");
379 return PTR_ERR(clk);
380 }
381
382 ret = clk_prepare_enable(clk);
383 if (ret)
384 return ret;
385
386 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
387 timer25Mhz = false;
388 }
389
390 return armada_370_xp_timer_common_init(np);
391}
392TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer",
393 armada_375_timer_init);
394
395static int __init armada_370_timer_init(struct device_node *np)
396{
397 struct clk *clk;
398 int ret;
399
400 clk = of_clk_get(np, 0);
401 if (IS_ERR(clk)) {
402 pr_err("Failed to get clock\n");
403 return PTR_ERR(clk);
404 }
405
406 ret = clk_prepare_enable(clk);
407 if (ret)
408 return ret;
409
410 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
411 timer25Mhz = false;
412
413 return armada_370_xp_timer_common_init(np);
414}
415TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer",
416 armada_370_timer_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Marvell Armada 370/XP SoC timer handling.
4 *
5 * Copyright (C) 2012 Marvell
6 *
7 * Lior Amsalem <alior@marvell.com>
8 * Gregory CLEMENT <gregory.clement@free-electrons.com>
9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10 *
11 * Timer 0 is used as free-running clocksource, while timer 1 is
12 * used as clock_event_device.
13 *
14 * ---
15 * Clocksource driver for Armada 370 and Armada XP SoC.
16 * This driver implements one compatible string for each SoC, given
17 * each has its own characteristics:
18 *
19 * * Armada 370 has no 25 MHz fixed timer.
20 *
21 * * Armada XP cannot work properly without such 25 MHz fixed timer as
22 * doing otherwise leads to using a clocksource whose frequency varies
23 * when doing cpufreq frequency changes.
24 *
25 * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
26 */
27
28#include <linux/init.h>
29#include <linux/platform_device.h>
30#include <linux/kernel.h>
31#include <linux/clk.h>
32#include <linux/cpu.h>
33#include <linux/timer.h>
34#include <linux/clockchips.h>
35#include <linux/interrupt.h>
36#include <linux/of.h>
37#include <linux/of_irq.h>
38#include <linux/of_address.h>
39#include <linux/irq.h>
40#include <linux/module.h>
41#include <linux/sched_clock.h>
42#include <linux/percpu.h>
43#include <linux/syscore_ops.h>
44
45#include <asm/delay.h>
46
47/*
48 * Timer block registers.
49 */
50#define TIMER_CTRL_OFF 0x0000
51#define TIMER0_EN BIT(0)
52#define TIMER0_RELOAD_EN BIT(1)
53#define TIMER0_25MHZ BIT(11)
54#define TIMER0_DIV(div) ((div) << 19)
55#define TIMER1_EN BIT(2)
56#define TIMER1_RELOAD_EN BIT(3)
57#define TIMER1_25MHZ BIT(12)
58#define TIMER1_DIV(div) ((div) << 22)
59#define TIMER_EVENTS_STATUS 0x0004
60#define TIMER0_CLR_MASK (~0x1)
61#define TIMER1_CLR_MASK (~0x100)
62#define TIMER0_RELOAD_OFF 0x0010
63#define TIMER0_VAL_OFF 0x0014
64#define TIMER1_RELOAD_OFF 0x0018
65#define TIMER1_VAL_OFF 0x001c
66
67#define LCL_TIMER_EVENTS_STATUS 0x0028
68/* Global timers are connected to the coherency fabric clock, and the
69 below divider reduces their incrementing frequency. */
70#define TIMER_DIVIDER_SHIFT 5
71#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
72
73/*
74 * SoC-specific data.
75 */
76static void __iomem *timer_base, *local_base;
77static unsigned int timer_clk;
78static bool timer25Mhz = true;
79static u32 enable_mask;
80
81/*
82 * Number of timer ticks per jiffy.
83 */
84static u32 ticks_per_jiffy;
85
86static struct clock_event_device __percpu *armada_370_xp_evt;
87
88static void local_timer_ctrl_clrset(u32 clr, u32 set)
89{
90 writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
91 local_base + TIMER_CTRL_OFF);
92}
93
94static u64 notrace armada_370_xp_read_sched_clock(void)
95{
96 return ~readl(timer_base + TIMER0_VAL_OFF);
97}
98
99/*
100 * Clockevent handling.
101 */
102static int
103armada_370_xp_clkevt_next_event(unsigned long delta,
104 struct clock_event_device *dev)
105{
106 /*
107 * Clear clockevent timer interrupt.
108 */
109 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
110
111 /*
112 * Setup new clockevent timer value.
113 */
114 writel(delta, local_base + TIMER0_VAL_OFF);
115
116 /*
117 * Enable the timer.
118 */
119 local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
120 return 0;
121}
122
123static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
124{
125 /*
126 * Disable timer.
127 */
128 local_timer_ctrl_clrset(TIMER0_EN, 0);
129
130 /*
131 * ACK pending timer interrupt.
132 */
133 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
134 return 0;
135}
136
137static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
138{
139 /*
140 * Setup timer to fire at 1/HZ intervals.
141 */
142 writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
143 writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
144
145 /*
146 * Enable timer.
147 */
148 local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
149 return 0;
150}
151
152static int armada_370_xp_clkevt_irq;
153
154static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
155{
156 /*
157 * ACK timer interrupt and call event handler.
158 */
159 struct clock_event_device *evt = dev_id;
160
161 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
162 evt->event_handler(evt);
163
164 return IRQ_HANDLED;
165}
166
167/*
168 * Setup the local clock events for a CPU.
169 */
170static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
171{
172 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
173 u32 clr = 0, set = 0;
174
175 if (timer25Mhz)
176 set = TIMER0_25MHZ;
177 else
178 clr = TIMER0_25MHZ;
179 local_timer_ctrl_clrset(clr, set);
180
181 evt->name = "armada_370_xp_per_cpu_tick";
182 evt->features = CLOCK_EVT_FEAT_ONESHOT |
183 CLOCK_EVT_FEAT_PERIODIC;
184 evt->shift = 32;
185 evt->rating = 300;
186 evt->set_next_event = armada_370_xp_clkevt_next_event;
187 evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
188 evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
189 evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
190 evt->tick_resume = armada_370_xp_clkevt_shutdown;
191 evt->irq = armada_370_xp_clkevt_irq;
192 evt->cpumask = cpumask_of(cpu);
193
194 clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
195 enable_percpu_irq(evt->irq, 0);
196
197 return 0;
198}
199
200static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
201{
202 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
203
204 disable_percpu_irq(evt->irq);
205 return 0;
206}
207
208static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
209
210static int armada_370_xp_timer_suspend(void)
211{
212 timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
213 timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
214 return 0;
215}
216
217static void armada_370_xp_timer_resume(void)
218{
219 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
220 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
221 writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
222 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
223}
224
225static struct syscore_ops armada_370_xp_timer_syscore_ops = {
226 .suspend = armada_370_xp_timer_suspend,
227 .resume = armada_370_xp_timer_resume,
228};
229
230static unsigned long armada_370_delay_timer_read(void)
231{
232 return ~readl(timer_base + TIMER0_VAL_OFF);
233}
234
235static struct delay_timer armada_370_delay_timer = {
236 .read_current_timer = armada_370_delay_timer_read,
237};
238
239static int __init armada_370_xp_timer_common_init(struct device_node *np)
240{
241 u32 clr = 0, set = 0;
242 int res;
243
244 timer_base = of_iomap(np, 0);
245 if (!timer_base) {
246 pr_err("Failed to iomap\n");
247 return -ENXIO;
248 }
249
250 local_base = of_iomap(np, 1);
251 if (!local_base) {
252 pr_err("Failed to iomap\n");
253 return -ENXIO;
254 }
255
256 if (timer25Mhz) {
257 set = TIMER0_25MHZ;
258 enable_mask = TIMER0_EN;
259 } else {
260 clr = TIMER0_25MHZ;
261 enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
262 }
263 atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
264 local_timer_ctrl_clrset(clr, set);
265
266 /*
267 * We use timer 0 as clocksource, and private(local) timer 0
268 * for clockevents
269 */
270 armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
271
272 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
273
274 /*
275 * Setup free-running clocksource timer (interrupts
276 * disabled).
277 */
278 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
279 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
280
281 atomic_io_modify(timer_base + TIMER_CTRL_OFF,
282 TIMER0_RELOAD_EN | enable_mask,
283 TIMER0_RELOAD_EN | enable_mask);
284
285 armada_370_delay_timer.freq = timer_clk;
286 register_current_timer_delay(&armada_370_delay_timer);
287
288 /*
289 * Set scale and timer for sched_clock.
290 */
291 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
292
293 res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
294 "armada_370_xp_clocksource",
295 timer_clk, 300, 32, clocksource_mmio_readl_down);
296 if (res) {
297 pr_err("Failed to initialize clocksource mmio\n");
298 return res;
299 }
300
301 armada_370_xp_evt = alloc_percpu(struct clock_event_device);
302 if (!armada_370_xp_evt)
303 return -ENOMEM;
304
305 /*
306 * Setup clockevent timer (interrupt-driven).
307 */
308 res = request_percpu_irq(armada_370_xp_clkevt_irq,
309 armada_370_xp_timer_interrupt,
310 "armada_370_xp_per_cpu_tick",
311 armada_370_xp_evt);
312 /* Immediately configure the timer on the boot CPU */
313 if (res) {
314 pr_err("Failed to request percpu irq\n");
315 return res;
316 }
317
318 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
319 "clockevents/armada:starting",
320 armada_370_xp_timer_starting_cpu,
321 armada_370_xp_timer_dying_cpu);
322 if (res) {
323 pr_err("Failed to setup hotplug state and timer\n");
324 return res;
325 }
326
327 register_syscore_ops(&armada_370_xp_timer_syscore_ops);
328
329 return 0;
330}
331
332static int __init armada_xp_timer_init(struct device_node *np)
333{
334 struct clk *clk = of_clk_get_by_name(np, "fixed");
335 int ret;
336
337 if (IS_ERR(clk)) {
338 pr_err("Failed to get clock\n");
339 return PTR_ERR(clk);
340 }
341
342 ret = clk_prepare_enable(clk);
343 if (ret)
344 return ret;
345
346 timer_clk = clk_get_rate(clk);
347
348 return armada_370_xp_timer_common_init(np);
349}
350TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
351 armada_xp_timer_init);
352
353static int __init armada_375_timer_init(struct device_node *np)
354{
355 struct clk *clk;
356 int ret;
357
358 clk = of_clk_get_by_name(np, "fixed");
359 if (!IS_ERR(clk)) {
360 ret = clk_prepare_enable(clk);
361 if (ret)
362 return ret;
363 timer_clk = clk_get_rate(clk);
364 } else {
365
366 /*
367 * This fallback is required in order to retain proper
368 * devicetree backwards compatibility.
369 */
370 clk = of_clk_get(np, 0);
371
372 /* Must have at least a clock */
373 if (IS_ERR(clk)) {
374 pr_err("Failed to get clock\n");
375 return PTR_ERR(clk);
376 }
377
378 ret = clk_prepare_enable(clk);
379 if (ret)
380 return ret;
381
382 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
383 timer25Mhz = false;
384 }
385
386 return armada_370_xp_timer_common_init(np);
387}
388TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer",
389 armada_375_timer_init);
390
391static int __init armada_370_timer_init(struct device_node *np)
392{
393 struct clk *clk;
394 int ret;
395
396 clk = of_clk_get(np, 0);
397 if (IS_ERR(clk)) {
398 pr_err("Failed to get clock\n");
399 return PTR_ERR(clk);
400 }
401
402 ret = clk_prepare_enable(clk);
403 if (ret)
404 return ret;
405
406 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
407 timer25Mhz = false;
408
409 return armada_370_xp_timer_common_init(np);
410}
411TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer",
412 armada_370_timer_init);