Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Marvell Armada 370/XP SoC timer handling.
4 *
5 * Copyright (C) 2012 Marvell
6 *
7 * Lior Amsalem <alior@marvell.com>
8 * Gregory CLEMENT <gregory.clement@free-electrons.com>
9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10 *
11 * Timer 0 is used as free-running clocksource, while timer 1 is
12 * used as clock_event_device.
13 *
14 * ---
15 * Clocksource driver for Armada 370 and Armada XP SoC.
16 * This driver implements one compatible string for each SoC, given
17 * each has its own characteristics:
18 *
19 * * Armada 370 has no 25 MHz fixed timer.
20 *
21 * * Armada XP cannot work properly without such 25 MHz fixed timer as
22 * doing otherwise leads to using a clocksource whose frequency varies
23 * when doing cpufreq frequency changes.
24 *
25 * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
26 */
27
28#include <linux/init.h>
29#include <linux/platform_device.h>
30#include <linux/kernel.h>
31#include <linux/clk.h>
32#include <linux/cpu.h>
33#include <linux/timer.h>
34#include <linux/clockchips.h>
35#include <linux/interrupt.h>
36#include <linux/of.h>
37#include <linux/of_irq.h>
38#include <linux/of_address.h>
39#include <linux/irq.h>
40#include <linux/module.h>
41#include <linux/sched_clock.h>
42#include <linux/percpu.h>
43#include <linux/syscore_ops.h>
44
45#include <asm/delay.h>
46
47/*
48 * Timer block registers.
49 */
50#define TIMER_CTRL_OFF 0x0000
51#define TIMER0_EN BIT(0)
52#define TIMER0_RELOAD_EN BIT(1)
53#define TIMER0_25MHZ BIT(11)
54#define TIMER0_DIV(div) ((div) << 19)
55#define TIMER1_EN BIT(2)
56#define TIMER1_RELOAD_EN BIT(3)
57#define TIMER1_25MHZ BIT(12)
58#define TIMER1_DIV(div) ((div) << 22)
59#define TIMER_EVENTS_STATUS 0x0004
60#define TIMER0_CLR_MASK (~0x1)
61#define TIMER1_CLR_MASK (~0x100)
62#define TIMER0_RELOAD_OFF 0x0010
63#define TIMER0_VAL_OFF 0x0014
64#define TIMER1_RELOAD_OFF 0x0018
65#define TIMER1_VAL_OFF 0x001c
66
67#define LCL_TIMER_EVENTS_STATUS 0x0028
68/* Global timers are connected to the coherency fabric clock, and the
69 below divider reduces their incrementing frequency. */
70#define TIMER_DIVIDER_SHIFT 5
71#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
72
73/*
74 * SoC-specific data.
75 */
76static void __iomem *timer_base, *local_base;
77static unsigned int timer_clk;
78static bool timer25Mhz = true;
79static u32 enable_mask;
80
81/*
82 * Number of timer ticks per jiffy.
83 */
84static u32 ticks_per_jiffy;
85
86static struct clock_event_device __percpu *armada_370_xp_evt;
87
88static void local_timer_ctrl_clrset(u32 clr, u32 set)
89{
90 writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
91 local_base + TIMER_CTRL_OFF);
92}
93
94static u64 notrace armada_370_xp_read_sched_clock(void)
95{
96 return ~readl(timer_base + TIMER0_VAL_OFF);
97}
98
99/*
100 * Clockevent handling.
101 */
102static int
103armada_370_xp_clkevt_next_event(unsigned long delta,
104 struct clock_event_device *dev)
105{
106 /*
107 * Clear clockevent timer interrupt.
108 */
109 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
110
111 /*
112 * Setup new clockevent timer value.
113 */
114 writel(delta, local_base + TIMER0_VAL_OFF);
115
116 /*
117 * Enable the timer.
118 */
119 local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
120 return 0;
121}
122
123static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
124{
125 /*
126 * Disable timer.
127 */
128 local_timer_ctrl_clrset(TIMER0_EN, 0);
129
130 /*
131 * ACK pending timer interrupt.
132 */
133 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
134 return 0;
135}
136
137static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
138{
139 /*
140 * Setup timer to fire at 1/HZ intervals.
141 */
142 writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
143 writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
144
145 /*
146 * Enable timer.
147 */
148 local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
149 return 0;
150}
151
152static int armada_370_xp_clkevt_irq;
153
154static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
155{
156 /*
157 * ACK timer interrupt and call event handler.
158 */
159 struct clock_event_device *evt = dev_id;
160
161 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
162 evt->event_handler(evt);
163
164 return IRQ_HANDLED;
165}
166
167/*
168 * Setup the local clock events for a CPU.
169 */
170static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
171{
172 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
173 u32 clr = 0, set = 0;
174
175 if (timer25Mhz)
176 set = TIMER0_25MHZ;
177 else
178 clr = TIMER0_25MHZ;
179 local_timer_ctrl_clrset(clr, set);
180
181 evt->name = "armada_370_xp_per_cpu_tick";
182 evt->features = CLOCK_EVT_FEAT_ONESHOT |
183 CLOCK_EVT_FEAT_PERIODIC;
184 evt->shift = 32;
185 evt->rating = 300;
186 evt->set_next_event = armada_370_xp_clkevt_next_event;
187 evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
188 evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
189 evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
190 evt->tick_resume = armada_370_xp_clkevt_shutdown;
191 evt->irq = armada_370_xp_clkevt_irq;
192 evt->cpumask = cpumask_of(cpu);
193
194 clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
195 enable_percpu_irq(evt->irq, 0);
196
197 return 0;
198}
199
200static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
201{
202 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
203
204 evt->set_state_shutdown(evt);
205 disable_percpu_irq(evt->irq);
206 return 0;
207}
208
209static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
210
211static int armada_370_xp_timer_suspend(void)
212{
213 timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
214 timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
215 return 0;
216}
217
218static void armada_370_xp_timer_resume(void)
219{
220 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
221 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
222 writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
223 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
224}
225
226static struct syscore_ops armada_370_xp_timer_syscore_ops = {
227 .suspend = armada_370_xp_timer_suspend,
228 .resume = armada_370_xp_timer_resume,
229};
230
231static unsigned long armada_370_delay_timer_read(void)
232{
233 return ~readl(timer_base + TIMER0_VAL_OFF);
234}
235
236static struct delay_timer armada_370_delay_timer = {
237 .read_current_timer = armada_370_delay_timer_read,
238};
239
240static int __init armada_370_xp_timer_common_init(struct device_node *np)
241{
242 u32 clr = 0, set = 0;
243 int res;
244
245 timer_base = of_iomap(np, 0);
246 if (!timer_base) {
247 pr_err("Failed to iomap\n");
248 return -ENXIO;
249 }
250
251 local_base = of_iomap(np, 1);
252 if (!local_base) {
253 pr_err("Failed to iomap\n");
254 return -ENXIO;
255 }
256
257 if (timer25Mhz) {
258 set = TIMER0_25MHZ;
259 enable_mask = TIMER0_EN;
260 } else {
261 clr = TIMER0_25MHZ;
262 enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
263 }
264 atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
265 local_timer_ctrl_clrset(clr, set);
266
267 /*
268 * We use timer 0 as clocksource, and private(local) timer 0
269 * for clockevents
270 */
271 armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
272
273 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
274
275 /*
276 * Setup free-running clocksource timer (interrupts
277 * disabled).
278 */
279 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
280 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
281
282 atomic_io_modify(timer_base + TIMER_CTRL_OFF,
283 TIMER0_RELOAD_EN | enable_mask,
284 TIMER0_RELOAD_EN | enable_mask);
285
286 armada_370_delay_timer.freq = timer_clk;
287 register_current_timer_delay(&armada_370_delay_timer);
288
289 /*
290 * Set scale and timer for sched_clock.
291 */
292 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
293
294 res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
295 "armada_370_xp_clocksource",
296 timer_clk, 300, 32, clocksource_mmio_readl_down);
297 if (res) {
298 pr_err("Failed to initialize clocksource mmio\n");
299 return res;
300 }
301
302 armada_370_xp_evt = alloc_percpu(struct clock_event_device);
303 if (!armada_370_xp_evt)
304 return -ENOMEM;
305
306 /*
307 * Setup clockevent timer (interrupt-driven).
308 */
309 res = request_percpu_irq(armada_370_xp_clkevt_irq,
310 armada_370_xp_timer_interrupt,
311 "armada_370_xp_per_cpu_tick",
312 armada_370_xp_evt);
313 /* Immediately configure the timer on the boot CPU */
314 if (res) {
315 pr_err("Failed to request percpu irq\n");
316 return res;
317 }
318
319 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
320 "clockevents/armada:starting",
321 armada_370_xp_timer_starting_cpu,
322 armada_370_xp_timer_dying_cpu);
323 if (res) {
324 pr_err("Failed to setup hotplug state and timer\n");
325 return res;
326 }
327
328 register_syscore_ops(&armada_370_xp_timer_syscore_ops);
329
330 return 0;
331}
332
333static int __init armada_xp_timer_init(struct device_node *np)
334{
335 struct clk *clk = of_clk_get_by_name(np, "fixed");
336 int ret;
337
338 if (IS_ERR(clk)) {
339 pr_err("Failed to get clock\n");
340 return PTR_ERR(clk);
341 }
342
343 ret = clk_prepare_enable(clk);
344 if (ret)
345 return ret;
346
347 timer_clk = clk_get_rate(clk);
348
349 return armada_370_xp_timer_common_init(np);
350}
351TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
352 armada_xp_timer_init);
353
354static int __init armada_375_timer_init(struct device_node *np)
355{
356 struct clk *clk;
357 int ret;
358
359 clk = of_clk_get_by_name(np, "fixed");
360 if (!IS_ERR(clk)) {
361 ret = clk_prepare_enable(clk);
362 if (ret)
363 return ret;
364 timer_clk = clk_get_rate(clk);
365 } else {
366
367 /*
368 * This fallback is required in order to retain proper
369 * devicetree backwards compatibility.
370 */
371 clk = of_clk_get(np, 0);
372
373 /* Must have at least a clock */
374 if (IS_ERR(clk)) {
375 pr_err("Failed to get clock\n");
376 return PTR_ERR(clk);
377 }
378
379 ret = clk_prepare_enable(clk);
380 if (ret)
381 return ret;
382
383 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
384 timer25Mhz = false;
385 }
386
387 return armada_370_xp_timer_common_init(np);
388}
389TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer",
390 armada_375_timer_init);
391
392static int __init armada_370_timer_init(struct device_node *np)
393{
394 struct clk *clk;
395 int ret;
396
397 clk = of_clk_get(np, 0);
398 if (IS_ERR(clk)) {
399 pr_err("Failed to get clock\n");
400 return PTR_ERR(clk);
401 }
402
403 ret = clk_prepare_enable(clk);
404 if (ret)
405 return ret;
406
407 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
408 timer25Mhz = false;
409
410 return armada_370_xp_timer_common_init(np);
411}
412TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer",
413 armada_370_timer_init);
1/*
2 * Marvell Armada 370/XP SoC timer handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * Timer 0 is used as free-running clocksource, while timer 1 is
15 * used as clock_event_device.
16 *
17 * ---
18 * Clocksource driver for Armada 370 and Armada XP SoC.
19 * This driver implements one compatible string for each SoC, given
20 * each has its own characteristics:
21 *
22 * * Armada 370 has no 25 MHz fixed timer.
23 *
24 * * Armada XP cannot work properly without such 25 MHz fixed timer as
25 * doing otherwise leads to using a clocksource whose frequency varies
26 * when doing cpufreq frequency changes.
27 *
28 * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
29 */
30
31#include <linux/init.h>
32#include <linux/platform_device.h>
33#include <linux/kernel.h>
34#include <linux/clk.h>
35#include <linux/cpu.h>
36#include <linux/timer.h>
37#include <linux/clockchips.h>
38#include <linux/interrupt.h>
39#include <linux/of.h>
40#include <linux/of_irq.h>
41#include <linux/of_address.h>
42#include <linux/irq.h>
43#include <linux/module.h>
44#include <linux/sched_clock.h>
45#include <linux/percpu.h>
46#include <linux/syscore_ops.h>
47
48#include <asm/delay.h>
49
50/*
51 * Timer block registers.
52 */
53#define TIMER_CTRL_OFF 0x0000
54#define TIMER0_EN BIT(0)
55#define TIMER0_RELOAD_EN BIT(1)
56#define TIMER0_25MHZ BIT(11)
57#define TIMER0_DIV(div) ((div) << 19)
58#define TIMER1_EN BIT(2)
59#define TIMER1_RELOAD_EN BIT(3)
60#define TIMER1_25MHZ BIT(12)
61#define TIMER1_DIV(div) ((div) << 22)
62#define TIMER_EVENTS_STATUS 0x0004
63#define TIMER0_CLR_MASK (~0x1)
64#define TIMER1_CLR_MASK (~0x100)
65#define TIMER0_RELOAD_OFF 0x0010
66#define TIMER0_VAL_OFF 0x0014
67#define TIMER1_RELOAD_OFF 0x0018
68#define TIMER1_VAL_OFF 0x001c
69
70#define LCL_TIMER_EVENTS_STATUS 0x0028
71/* Global timers are connected to the coherency fabric clock, and the
72 below divider reduces their incrementing frequency. */
73#define TIMER_DIVIDER_SHIFT 5
74#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
75
76/*
77 * SoC-specific data.
78 */
79static void __iomem *timer_base, *local_base;
80static unsigned int timer_clk;
81static bool timer25Mhz = true;
82static u32 enable_mask;
83
84/*
85 * Number of timer ticks per jiffy.
86 */
87static u32 ticks_per_jiffy;
88
89static struct clock_event_device __percpu *armada_370_xp_evt;
90
91static void local_timer_ctrl_clrset(u32 clr, u32 set)
92{
93 writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
94 local_base + TIMER_CTRL_OFF);
95}
96
97static u64 notrace armada_370_xp_read_sched_clock(void)
98{
99 return ~readl(timer_base + TIMER0_VAL_OFF);
100}
101
102/*
103 * Clockevent handling.
104 */
105static int
106armada_370_xp_clkevt_next_event(unsigned long delta,
107 struct clock_event_device *dev)
108{
109 /*
110 * Clear clockevent timer interrupt.
111 */
112 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
113
114 /*
115 * Setup new clockevent timer value.
116 */
117 writel(delta, local_base + TIMER0_VAL_OFF);
118
119 /*
120 * Enable the timer.
121 */
122 local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
123 return 0;
124}
125
126static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
127{
128 /*
129 * Disable timer.
130 */
131 local_timer_ctrl_clrset(TIMER0_EN, 0);
132
133 /*
134 * ACK pending timer interrupt.
135 */
136 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
137 return 0;
138}
139
140static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
141{
142 /*
143 * Setup timer to fire at 1/HZ intervals.
144 */
145 writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
146 writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
147
148 /*
149 * Enable timer.
150 */
151 local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
152 return 0;
153}
154
155static int armada_370_xp_clkevt_irq;
156
157static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
158{
159 /*
160 * ACK timer interrupt and call event handler.
161 */
162 struct clock_event_device *evt = dev_id;
163
164 writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
165 evt->event_handler(evt);
166
167 return IRQ_HANDLED;
168}
169
170/*
171 * Setup the local clock events for a CPU.
172 */
173static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
174{
175 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
176 u32 clr = 0, set = 0;
177
178 if (timer25Mhz)
179 set = TIMER0_25MHZ;
180 else
181 clr = TIMER0_25MHZ;
182 local_timer_ctrl_clrset(clr, set);
183
184 evt->name = "armada_370_xp_per_cpu_tick",
185 evt->features = CLOCK_EVT_FEAT_ONESHOT |
186 CLOCK_EVT_FEAT_PERIODIC;
187 evt->shift = 32,
188 evt->rating = 300,
189 evt->set_next_event = armada_370_xp_clkevt_next_event,
190 evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
191 evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
192 evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
193 evt->tick_resume = armada_370_xp_clkevt_shutdown;
194 evt->irq = armada_370_xp_clkevt_irq;
195 evt->cpumask = cpumask_of(cpu);
196
197 clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
198 enable_percpu_irq(evt->irq, 0);
199
200 return 0;
201}
202
203static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
204{
205 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
206
207 evt->set_state_shutdown(evt);
208 disable_percpu_irq(evt->irq);
209 return 0;
210}
211
212static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
213
214static int armada_370_xp_timer_suspend(void)
215{
216 timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
217 timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
218 return 0;
219}
220
221static void armada_370_xp_timer_resume(void)
222{
223 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
224 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
225 writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
226 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
227}
228
229static struct syscore_ops armada_370_xp_timer_syscore_ops = {
230 .suspend = armada_370_xp_timer_suspend,
231 .resume = armada_370_xp_timer_resume,
232};
233
234static unsigned long armada_370_delay_timer_read(void)
235{
236 return ~readl(timer_base + TIMER0_VAL_OFF);
237}
238
239static struct delay_timer armada_370_delay_timer = {
240 .read_current_timer = armada_370_delay_timer_read,
241};
242
243static int __init armada_370_xp_timer_common_init(struct device_node *np)
244{
245 u32 clr = 0, set = 0;
246 int res;
247
248 timer_base = of_iomap(np, 0);
249 if (!timer_base) {
250 pr_err("Failed to iomap\n");
251 return -ENXIO;
252 }
253
254 local_base = of_iomap(np, 1);
255 if (!local_base) {
256 pr_err("Failed to iomap\n");
257 return -ENXIO;
258 }
259
260 if (timer25Mhz) {
261 set = TIMER0_25MHZ;
262 enable_mask = TIMER0_EN;
263 } else {
264 clr = TIMER0_25MHZ;
265 enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
266 }
267 atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
268 local_timer_ctrl_clrset(clr, set);
269
270 /*
271 * We use timer 0 as clocksource, and private(local) timer 0
272 * for clockevents
273 */
274 armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
275
276 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
277
278 /*
279 * Setup free-running clocksource timer (interrupts
280 * disabled).
281 */
282 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
283 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
284
285 atomic_io_modify(timer_base + TIMER_CTRL_OFF,
286 TIMER0_RELOAD_EN | enable_mask,
287 TIMER0_RELOAD_EN | enable_mask);
288
289 armada_370_delay_timer.freq = timer_clk;
290 register_current_timer_delay(&armada_370_delay_timer);
291
292 /*
293 * Set scale and timer for sched_clock.
294 */
295 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
296
297 res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
298 "armada_370_xp_clocksource",
299 timer_clk, 300, 32, clocksource_mmio_readl_down);
300 if (res) {
301 pr_err("Failed to initialize clocksource mmio\n");
302 return res;
303 }
304
305 armada_370_xp_evt = alloc_percpu(struct clock_event_device);
306 if (!armada_370_xp_evt)
307 return -ENOMEM;
308
309 /*
310 * Setup clockevent timer (interrupt-driven).
311 */
312 res = request_percpu_irq(armada_370_xp_clkevt_irq,
313 armada_370_xp_timer_interrupt,
314 "armada_370_xp_per_cpu_tick",
315 armada_370_xp_evt);
316 /* Immediately configure the timer on the boot CPU */
317 if (res) {
318 pr_err("Failed to request percpu irq\n");
319 return res;
320 }
321
322 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
323 "clockevents/armada:starting",
324 armada_370_xp_timer_starting_cpu,
325 armada_370_xp_timer_dying_cpu);
326 if (res) {
327 pr_err("Failed to setup hotplug state and timer\n");
328 return res;
329 }
330
331 register_syscore_ops(&armada_370_xp_timer_syscore_ops);
332
333 return 0;
334}
335
336static int __init armada_xp_timer_init(struct device_node *np)
337{
338 struct clk *clk = of_clk_get_by_name(np, "fixed");
339 int ret;
340
341 if (IS_ERR(clk)) {
342 pr_err("Failed to get clock\n");
343 return PTR_ERR(clk);
344 }
345
346 ret = clk_prepare_enable(clk);
347 if (ret)
348 return ret;
349
350 timer_clk = clk_get_rate(clk);
351
352 return armada_370_xp_timer_common_init(np);
353}
354TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
355 armada_xp_timer_init);
356
357static int __init armada_375_timer_init(struct device_node *np)
358{
359 struct clk *clk;
360 int ret;
361
362 clk = of_clk_get_by_name(np, "fixed");
363 if (!IS_ERR(clk)) {
364 ret = clk_prepare_enable(clk);
365 if (ret)
366 return ret;
367 timer_clk = clk_get_rate(clk);
368 } else {
369
370 /*
371 * This fallback is required in order to retain proper
372 * devicetree backwards compatibility.
373 */
374 clk = of_clk_get(np, 0);
375
376 /* Must have at least a clock */
377 if (IS_ERR(clk)) {
378 pr_err("Failed to get clock\n");
379 return PTR_ERR(clk);
380 }
381
382 ret = clk_prepare_enable(clk);
383 if (ret)
384 return ret;
385
386 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
387 timer25Mhz = false;
388 }
389
390 return armada_370_xp_timer_common_init(np);
391}
392TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer",
393 armada_375_timer_init);
394
395static int __init armada_370_timer_init(struct device_node *np)
396{
397 struct clk *clk;
398 int ret;
399
400 clk = of_clk_get(np, 0);
401 if (IS_ERR(clk)) {
402 pr_err("Failed to get clock\n");
403 return PTR_ERR(clk);
404 }
405
406 ret = clk_prepare_enable(clk);
407 if (ret)
408 return ret;
409
410 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
411 timer25Mhz = false;
412
413 return armada_370_xp_timer_common_init(np);
414}
415TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer",
416 armada_370_timer_init);