Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
3
4#include <linux/clk.h>
5#include <linux/clockchips.h>
6#include <linux/clocksource.h>
7#include <linux/interrupt.h>
8#include <linux/of_address.h>
9#include <linux/of_irq.h>
10#include <linux/of_platform.h>
11#include <linux/platform_device.h>
12#include <linux/sched_clock.h>
13
14#define TIMER0_FREQ 1000000
15#define GXP_TIMER_CNT_OFS 0x00
16#define GXP_TIMESTAMP_OFS 0x08
17#define GXP_TIMER_CTRL_OFS 0x14
18
19/* TCS Stands for Timer Control/Status: these are masks to be used in */
20/* the Timer Count Registers */
21#define MASK_TCS_ENABLE 0x01
22#define MASK_TCS_PERIOD 0x02
23#define MASK_TCS_RELOAD 0x04
24#define MASK_TCS_TC 0x80
25
26struct gxp_timer {
27 void __iomem *counter;
28 void __iomem *control;
29 struct clock_event_device evt;
30};
31
32static struct gxp_timer *gxp_timer;
33
34static void __iomem *system_clock __ro_after_init;
35
36static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
37{
38 return container_of(evt_dev, struct gxp_timer, evt);
39}
40
41static u64 notrace gxp_sched_read(void)
42{
43 return readl_relaxed(system_clock);
44}
45
46static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
47{
48 struct gxp_timer *timer = to_gxp_timer(evt_dev);
49
50 /* Stop counting and disable interrupt before updating */
51 writeb_relaxed(MASK_TCS_TC, timer->control);
52 writel_relaxed(event, timer->counter);
53 writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
54
55 return 0;
56}
57
58static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
59{
60 struct gxp_timer *timer = (struct gxp_timer *)dev_id;
61
62 if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
63 return IRQ_NONE;
64
65 writeb_relaxed(MASK_TCS_TC, timer->control);
66
67 timer->evt.event_handler(&timer->evt);
68
69 return IRQ_HANDLED;
70}
71
72static int __init gxp_timer_init(struct device_node *node)
73{
74 void __iomem *base;
75 struct clk *clk;
76 u32 freq;
77 int ret, irq;
78
79 gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
80 if (!gxp_timer) {
81 ret = -ENOMEM;
82 pr_err("Can't allocate gxp_timer");
83 return ret;
84 }
85
86 clk = of_clk_get(node, 0);
87 if (IS_ERR(clk)) {
88 ret = (int)PTR_ERR(clk);
89 pr_err("%pOFn clock not found: %d\n", node, ret);
90 goto err_free;
91 }
92
93 ret = clk_prepare_enable(clk);
94 if (ret) {
95 pr_err("%pOFn clock enable failed: %d\n", node, ret);
96 goto err_clk_enable;
97 }
98
99 base = of_iomap(node, 0);
100 if (!base) {
101 ret = -ENXIO;
102 pr_err("Can't map timer base registers");
103 goto err_iomap;
104 }
105
106 /* Set the offsets to the clock register and timer registers */
107 gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
108 gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
109 system_clock = base + GXP_TIMESTAMP_OFS;
110
111 gxp_timer->evt.name = node->name;
112 gxp_timer->evt.rating = 300;
113 gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
114 gxp_timer->evt.set_next_event = gxp_time_set_next_event;
115 gxp_timer->evt.cpumask = cpumask_of(0);
116
117 irq = irq_of_parse_and_map(node, 0);
118 if (irq <= 0) {
119 ret = -EINVAL;
120 pr_err("GXP Timer Can't parse IRQ %d", irq);
121 goto err_exit;
122 }
123
124 freq = clk_get_rate(clk);
125
126 ret = clocksource_mmio_init(system_clock, node->name, freq,
127 300, 32, clocksource_mmio_readl_up);
128 if (ret) {
129 pr_err("%pOFn init clocksource failed: %d", node, ret);
130 goto err_exit;
131 }
132
133 sched_clock_register(gxp_sched_read, 32, freq);
134
135 irq = irq_of_parse_and_map(node, 0);
136 if (irq <= 0) {
137 ret = -EINVAL;
138 pr_err("%pOFn Can't parse IRQ %d", node, irq);
139 goto err_exit;
140 }
141
142 clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
143 0xf, 0xffffffff);
144
145 ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
146 node->name, gxp_timer);
147 if (ret) {
148 pr_err("%pOFn request_irq() failed: %d", node, ret);
149 goto err_exit;
150 }
151
152 pr_debug("gxp: system timer (irq = %d)\n", irq);
153 return 0;
154
155err_exit:
156 iounmap(base);
157err_iomap:
158 clk_disable_unprepare(clk);
159err_clk_enable:
160 clk_put(clk);
161err_free:
162 kfree(gxp_timer);
163 return ret;
164}
165
166/*
167 * This probe gets called after the timer is already up and running. This will create
168 * the watchdog device as a child since the registers are shared.
169 */
170
171static int gxp_timer_probe(struct platform_device *pdev)
172{
173 struct platform_device *gxp_watchdog_device;
174 struct device *dev = &pdev->dev;
175 int ret;
176
177 if (!gxp_timer) {
178 pr_err("Gxp Timer not initialized, cannot create watchdog");
179 return -ENOMEM;
180 }
181
182 gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
183 if (!gxp_watchdog_device) {
184 pr_err("Timer failed to allocate gxp-wdt");
185 return -ENOMEM;
186 }
187
188 /* Pass the base address (counter) as platform data and nothing else */
189 gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
190 gxp_watchdog_device->dev.parent = dev;
191
192 ret = platform_device_add(gxp_watchdog_device);
193 if (ret)
194 platform_device_put(gxp_watchdog_device);
195
196 return ret;
197}
198
199static const struct of_device_id gxp_timer_of_match[] = {
200 { .compatible = "hpe,gxp-timer", },
201 {},
202};
203
204static struct platform_driver gxp_timer_driver = {
205 .probe = gxp_timer_probe,
206 .driver = {
207 .name = "gxp-timer",
208 .of_match_table = gxp_timer_of_match,
209 .suppress_bind_attrs = true,
210 },
211};
212
213builtin_platform_driver(gxp_timer_driver);
214
215TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
3
4#include <linux/clk.h>
5#include <linux/clockchips.h>
6#include <linux/clocksource.h>
7#include <linux/interrupt.h>
8#include <linux/of_address.h>
9#include <linux/of_irq.h>
10#include <linux/of_platform.h>
11#include <linux/sched_clock.h>
12
13#define TIMER0_FREQ 1000000
14#define GXP_TIMER_CNT_OFS 0x00
15#define GXP_TIMESTAMP_OFS 0x08
16#define GXP_TIMER_CTRL_OFS 0x14
17
18/* TCS Stands for Timer Control/Status: these are masks to be used in */
19/* the Timer Count Registers */
20#define MASK_TCS_ENABLE 0x01
21#define MASK_TCS_PERIOD 0x02
22#define MASK_TCS_RELOAD 0x04
23#define MASK_TCS_TC 0x80
24
25struct gxp_timer {
26 void __iomem *counter;
27 void __iomem *control;
28 struct clock_event_device evt;
29};
30
31static struct gxp_timer *gxp_timer;
32
33static void __iomem *system_clock __ro_after_init;
34
35static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
36{
37 return container_of(evt_dev, struct gxp_timer, evt);
38}
39
40static u64 notrace gxp_sched_read(void)
41{
42 return readl_relaxed(system_clock);
43}
44
45static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
46{
47 struct gxp_timer *timer = to_gxp_timer(evt_dev);
48
49 /* Stop counting and disable interrupt before updating */
50 writeb_relaxed(MASK_TCS_TC, timer->control);
51 writel_relaxed(event, timer->counter);
52 writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
53
54 return 0;
55}
56
57static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
58{
59 struct gxp_timer *timer = (struct gxp_timer *)dev_id;
60
61 if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
62 return IRQ_NONE;
63
64 writeb_relaxed(MASK_TCS_TC, timer->control);
65
66 timer->evt.event_handler(&timer->evt);
67
68 return IRQ_HANDLED;
69}
70
71static int __init gxp_timer_init(struct device_node *node)
72{
73 void __iomem *base;
74 struct clk *clk;
75 u32 freq;
76 int ret, irq;
77
78 gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
79 if (!gxp_timer) {
80 ret = -ENOMEM;
81 pr_err("Can't allocate gxp_timer");
82 return ret;
83 }
84
85 clk = of_clk_get(node, 0);
86 if (IS_ERR(clk)) {
87 ret = (int)PTR_ERR(clk);
88 pr_err("%pOFn clock not found: %d\n", node, ret);
89 goto err_free;
90 }
91
92 ret = clk_prepare_enable(clk);
93 if (ret) {
94 pr_err("%pOFn clock enable failed: %d\n", node, ret);
95 goto err_clk_enable;
96 }
97
98 base = of_iomap(node, 0);
99 if (!base) {
100 ret = -ENXIO;
101 pr_err("Can't map timer base registers");
102 goto err_iomap;
103 }
104
105 /* Set the offsets to the clock register and timer registers */
106 gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
107 gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
108 system_clock = base + GXP_TIMESTAMP_OFS;
109
110 gxp_timer->evt.name = node->name;
111 gxp_timer->evt.rating = 300;
112 gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
113 gxp_timer->evt.set_next_event = gxp_time_set_next_event;
114 gxp_timer->evt.cpumask = cpumask_of(0);
115
116 irq = irq_of_parse_and_map(node, 0);
117 if (irq <= 0) {
118 ret = -EINVAL;
119 pr_err("GXP Timer Can't parse IRQ %d", irq);
120 goto err_exit;
121 }
122
123 freq = clk_get_rate(clk);
124
125 ret = clocksource_mmio_init(system_clock, node->name, freq,
126 300, 32, clocksource_mmio_readl_up);
127 if (ret) {
128 pr_err("%pOFn init clocksource failed: %d", node, ret);
129 goto err_exit;
130 }
131
132 sched_clock_register(gxp_sched_read, 32, freq);
133
134 irq = irq_of_parse_and_map(node, 0);
135 if (irq <= 0) {
136 ret = -EINVAL;
137 pr_err("%pOFn Can't parse IRQ %d", node, irq);
138 goto err_exit;
139 }
140
141 clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
142 0xf, 0xffffffff);
143
144 ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
145 node->name, gxp_timer);
146 if (ret) {
147 pr_err("%pOFn request_irq() failed: %d", node, ret);
148 goto err_exit;
149 }
150
151 pr_debug("gxp: system timer (irq = %d)\n", irq);
152 return 0;
153
154err_exit:
155 iounmap(base);
156err_iomap:
157 clk_disable_unprepare(clk);
158err_clk_enable:
159 clk_put(clk);
160err_free:
161 kfree(gxp_timer);
162 return ret;
163}
164
165/*
166 * This probe gets called after the timer is already up and running. This will create
167 * the watchdog device as a child since the registers are shared.
168 */
169
170static int gxp_timer_probe(struct platform_device *pdev)
171{
172 struct platform_device *gxp_watchdog_device;
173 struct device *dev = &pdev->dev;
174 int ret;
175
176 if (!gxp_timer) {
177 pr_err("Gxp Timer not initialized, cannot create watchdog");
178 return -ENOMEM;
179 }
180
181 gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
182 if (!gxp_watchdog_device) {
183 pr_err("Timer failed to allocate gxp-wdt");
184 return -ENOMEM;
185 }
186
187 /* Pass the base address (counter) as platform data and nothing else */
188 gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
189 gxp_watchdog_device->dev.parent = dev;
190
191 ret = platform_device_add(gxp_watchdog_device);
192 if (ret)
193 platform_device_put(gxp_watchdog_device);
194
195 return ret;
196}
197
198static const struct of_device_id gxp_timer_of_match[] = {
199 { .compatible = "hpe,gxp-timer", },
200 {},
201};
202
203static struct platform_driver gxp_timer_driver = {
204 .probe = gxp_timer_probe,
205 .driver = {
206 .name = "gxp-timer",
207 .of_match_table = gxp_timer_of_match,
208 .suppress_bind_attrs = true,
209 },
210};
211
212builtin_platform_driver(gxp_timer_driver);
213
214TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);