Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2017 SiFive
  5 *
  6 * All RISC-V systems have a timer attached to every hart.  These timers can
  7 * either be read from the "time" and "timeh" CSRs, and can use the SBI to
  8 * setup events, or directly accessed using MMIO registers.
  9 */
 10
 11#define pr_fmt(fmt) "riscv-timer: " fmt
 12
 13#include <linux/acpi.h>
 14#include <linux/clocksource.h>
 15#include <linux/clockchips.h>
 16#include <linux/cpu.h>
 17#include <linux/delay.h>
 18#include <linux/irq.h>
 19#include <linux/irqdomain.h>
 20#include <linux/module.h>
 21#include <linux/sched_clock.h>
 22#include <linux/io-64-nonatomic-lo-hi.h>
 23#include <linux/interrupt.h>
 24#include <linux/of_irq.h>
 25#include <linux/limits.h>
 26#include <clocksource/timer-riscv.h>
 27#include <asm/smp.h>
 28#include <asm/cpufeature.h>
 29#include <asm/sbi.h>
 30#include <asm/timex.h>
 31
 32static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
 33static bool riscv_timer_cannot_wake_cpu;
 34
 35static void riscv_clock_event_stop(void)
 36{
 37	if (static_branch_likely(&riscv_sstc_available)) {
 38		csr_write(CSR_STIMECMP, ULONG_MAX);
 39		if (IS_ENABLED(CONFIG_32BIT))
 40			csr_write(CSR_STIMECMPH, ULONG_MAX);
 41	} else {
 42		sbi_set_timer(U64_MAX);
 43	}
 44}
 45
 46static int riscv_clock_next_event(unsigned long delta,
 47		struct clock_event_device *ce)
 48{
 49	u64 next_tval = get_cycles64() + delta;
 50
 
 51	if (static_branch_likely(&riscv_sstc_available)) {
 52#if defined(CONFIG_32BIT)
 53		csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
 54		csr_write(CSR_STIMECMPH, next_tval >> 32);
 55#else
 56		csr_write(CSR_STIMECMP, next_tval);
 57#endif
 58	} else
 59		sbi_set_timer(next_tval);
 60
 61	return 0;
 62}
 63
 64static int riscv_clock_shutdown(struct clock_event_device *evt)
 65{
 66	riscv_clock_event_stop();
 67	return 0;
 68}
 69
 70static unsigned int riscv_clock_event_irq;
 71static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
 72	.name			= "riscv_timer_clockevent",
 73	.features		= CLOCK_EVT_FEAT_ONESHOT,
 74	.rating			= 100,
 75	.set_next_event		= riscv_clock_next_event,
 76	.set_state_shutdown	= riscv_clock_shutdown,
 77};
 78
 79/*
 80 * It is guaranteed that all the timers across all the harts are synchronized
 81 * within one tick of each other, so while this could technically go
 82 * backwards when hopping between CPUs, practically it won't happen.
 83 */
 84static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
 85{
 86	return get_cycles64();
 87}
 88
 89static u64 notrace riscv_sched_clock(void)
 90{
 91	return get_cycles64();
 92}
 93
 94static struct clocksource riscv_clocksource = {
 95	.name		= "riscv_clocksource",
 96	.rating		= 400,
 97	.mask		= CLOCKSOURCE_MASK(64),
 98	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 99	.read		= riscv_clocksource_rdtime,
100#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)
101	.vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
102#else
103	.vdso_clock_mode = VDSO_CLOCKMODE_NONE,
104#endif
105};
106
107static int riscv_timer_starting_cpu(unsigned int cpu)
108{
109	struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
110
111	ce->cpumask = cpumask_of(cpu);
112	ce->irq = riscv_clock_event_irq;
113	if (riscv_timer_cannot_wake_cpu)
114		ce->features |= CLOCK_EVT_FEAT_C3STOP;
115	if (static_branch_likely(&riscv_sstc_available))
116		ce->rating = 450;
117	clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
118
119	enable_percpu_irq(riscv_clock_event_irq,
120			  irq_get_trigger_type(riscv_clock_event_irq));
121	return 0;
122}
123
124static int riscv_timer_dying_cpu(unsigned int cpu)
125{
126	disable_percpu_irq(riscv_clock_event_irq);
127	return 0;
128}
129
130void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
131{
132	*mult = riscv_clocksource.mult;
133	*shift = riscv_clocksource.shift;
134}
135EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
136
137/* called directly from the low-level interrupt handler */
138static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
139{
140	struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
141
142	riscv_clock_event_stop();
143	evdev->event_handler(evdev);
144
145	return IRQ_HANDLED;
146}
147
148static int __init riscv_timer_init_common(void)
149{
150	int error;
 
 
151	struct irq_domain *domain;
152	struct fwnode_handle *intc_fwnode = riscv_get_intc_hwnode();
153
154	domain = irq_find_matching_fwnode(intc_fwnode, DOMAIN_BUS_ANY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155	if (!domain) {
156		pr_err("Failed to find irq_domain for INTC node [%pfwP]\n",
157		       intc_fwnode);
158		return -ENODEV;
159	}
160
161	riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
162	if (!riscv_clock_event_irq) {
163		pr_err("Failed to map timer interrupt for node [%pfwP]\n", intc_fwnode);
164		return -ENODEV;
165	}
166
 
 
167	error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
168	if (error) {
169		pr_err("RISCV timer registration failed [%d]\n", error);
 
170		return error;
171	}
172
173	sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
174
175	error = request_percpu_irq(riscv_clock_event_irq,
176				    riscv_timer_interrupt,
177				    "riscv-timer", &riscv_clock_event);
178	if (error) {
179		pr_err("registering percpu irq failed [%d]\n", error);
180		return error;
181	}
182
183	if (riscv_isa_extension_available(NULL, SSTC)) {
184		pr_info("Timer interrupt in S-mode is available via sstc extension\n");
185		static_branch_enable(&riscv_sstc_available);
186	}
187
188	error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
189			 "clockevents/riscv/timer:starting",
190			 riscv_timer_starting_cpu, riscv_timer_dying_cpu);
191	if (error)
192		pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
193		       error);
194
195	return error;
196}
197
198static int __init riscv_timer_init_dt(struct device_node *n)
199{
200	int cpuid, error;
201	unsigned long hartid;
202	struct device_node *child;
203
204	error = riscv_of_processor_hartid(n, &hartid);
205	if (error < 0) {
206		pr_warn("Invalid hartid for node [%pOF] error = [%lu]\n",
207			n, hartid);
208		return error;
209	}
210
211	cpuid = riscv_hartid_to_cpuid(hartid);
212	if (cpuid < 0) {
213		pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
214		return cpuid;
215	}
216
217	if (cpuid != smp_processor_id())
218		return 0;
219
220	child = of_find_compatible_node(NULL, NULL, "riscv,timer");
221	if (child) {
222		riscv_timer_cannot_wake_cpu = of_property_read_bool(child,
223					"riscv,timer-cannot-wake-cpu");
224		of_node_put(child);
225	}
226
227	return riscv_timer_init_common();
228}
229
230TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);
231
232#ifdef CONFIG_ACPI
233static int __init riscv_timer_acpi_init(struct acpi_table_header *table)
234{
235	struct acpi_table_rhct *rhct = (struct acpi_table_rhct *)table;
236
237	riscv_timer_cannot_wake_cpu = rhct->flags & ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU;
238
239	return riscv_timer_init_common();
240}
241
242TIMER_ACPI_DECLARE(aclint_mtimer, ACPI_SIG_RHCT, riscv_timer_acpi_init);
243
244#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2017 SiFive
  5 *
  6 * All RISC-V systems have a timer attached to every hart.  These timers can
  7 * either be read from the "time" and "timeh" CSRs, and can use the SBI to
  8 * setup events, or directly accessed using MMIO registers.
  9 */
 10
 11#define pr_fmt(fmt) "riscv-timer: " fmt
 12
 
 13#include <linux/clocksource.h>
 14#include <linux/clockchips.h>
 15#include <linux/cpu.h>
 16#include <linux/delay.h>
 17#include <linux/irq.h>
 18#include <linux/irqdomain.h>
 19#include <linux/module.h>
 20#include <linux/sched_clock.h>
 21#include <linux/io-64-nonatomic-lo-hi.h>
 22#include <linux/interrupt.h>
 23#include <linux/of_irq.h>
 
 24#include <clocksource/timer-riscv.h>
 25#include <asm/smp.h>
 26#include <asm/hwcap.h>
 27#include <asm/sbi.h>
 28#include <asm/timex.h>
 29
 30static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32static int riscv_clock_next_event(unsigned long delta,
 33		struct clock_event_device *ce)
 34{
 35	u64 next_tval = get_cycles64() + delta;
 36
 37	csr_set(CSR_IE, IE_TIE);
 38	if (static_branch_likely(&riscv_sstc_available)) {
 39#if defined(CONFIG_32BIT)
 40		csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
 41		csr_write(CSR_STIMECMPH, next_tval >> 32);
 42#else
 43		csr_write(CSR_STIMECMP, next_tval);
 44#endif
 45	} else
 46		sbi_set_timer(next_tval);
 47
 48	return 0;
 49}
 50
 
 
 
 
 
 
 51static unsigned int riscv_clock_event_irq;
 52static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
 53	.name			= "riscv_timer_clockevent",
 54	.features		= CLOCK_EVT_FEAT_ONESHOT,
 55	.rating			= 100,
 56	.set_next_event		= riscv_clock_next_event,
 
 57};
 58
 59/*
 60 * It is guaranteed that all the timers across all the harts are synchronized
 61 * within one tick of each other, so while this could technically go
 62 * backwards when hopping between CPUs, practically it won't happen.
 63 */
 64static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
 65{
 66	return get_cycles64();
 67}
 68
 69static u64 notrace riscv_sched_clock(void)
 70{
 71	return get_cycles64();
 72}
 73
 74static struct clocksource riscv_clocksource = {
 75	.name		= "riscv_clocksource",
 76	.rating		= 300,
 77	.mask		= CLOCKSOURCE_MASK(64),
 78	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 79	.read		= riscv_clocksource_rdtime,
 
 
 
 
 
 80};
 81
 82static int riscv_timer_starting_cpu(unsigned int cpu)
 83{
 84	struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
 85
 86	ce->cpumask = cpumask_of(cpu);
 87	ce->irq = riscv_clock_event_irq;
 
 
 
 
 88	clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
 89
 90	enable_percpu_irq(riscv_clock_event_irq,
 91			  irq_get_trigger_type(riscv_clock_event_irq));
 92	return 0;
 93}
 94
 95static int riscv_timer_dying_cpu(unsigned int cpu)
 96{
 97	disable_percpu_irq(riscv_clock_event_irq);
 98	return 0;
 99}
100
101void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
102{
103	*mult = riscv_clocksource.mult;
104	*shift = riscv_clocksource.shift;
105}
106EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
107
108/* called directly from the low-level interrupt handler */
109static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
110{
111	struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
112
113	csr_clear(CSR_IE, IE_TIE);
114	evdev->event_handler(evdev);
115
116	return IRQ_HANDLED;
117}
118
119static int __init riscv_timer_init_dt(struct device_node *n)
120{
121	int cpuid, error;
122	unsigned long hartid;
123	struct device_node *child;
124	struct irq_domain *domain;
 
125
126	error = riscv_of_processor_hartid(n, &hartid);
127	if (error < 0) {
128		pr_warn("Not valid hartid for node [%pOF] error = [%lu]\n",
129			n, hartid);
130		return error;
131	}
132
133	cpuid = riscv_hartid_to_cpuid(hartid);
134	if (cpuid < 0) {
135		pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
136		return cpuid;
137	}
138
139	if (cpuid != smp_processor_id())
140		return 0;
141
142	domain = NULL;
143	child = of_get_compatible_child(n, "riscv,cpu-intc");
144	if (!child) {
145		pr_err("Failed to find INTC node [%pOF]\n", n);
146		return -ENODEV;
147	}
148	domain = irq_find_host(child);
149	of_node_put(child);
150	if (!domain) {
151		pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
 
152		return -ENODEV;
153	}
154
155	riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
156	if (!riscv_clock_event_irq) {
157		pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
158		return -ENODEV;
159	}
160
161	pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n",
162	       __func__, cpuid, hartid);
163	error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
164	if (error) {
165		pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
166		       error, cpuid);
167		return error;
168	}
169
170	sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
171
172	error = request_percpu_irq(riscv_clock_event_irq,
173				    riscv_timer_interrupt,
174				    "riscv-timer", &riscv_clock_event);
175	if (error) {
176		pr_err("registering percpu irq failed [%d]\n", error);
177		return error;
178	}
179
 
 
 
 
 
180	error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
181			 "clockevents/riscv/timer:starting",
182			 riscv_timer_starting_cpu, riscv_timer_dying_cpu);
183	if (error)
184		pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
185		       error);
186
187	if (riscv_isa_extension_available(NULL, SSTC)) {
188		pr_info("Timer interrupt in S-mode is available via sstc extension\n");
189		static_branch_enable(&riscv_sstc_available);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190	}
191
192	return error;
193}
194
195TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);