Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2012 ARM Ltd.
  4 * Author: Catalin Marinas <catalin.marinas@arm.com>
  5 * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  6 * Copyright (C) 2021 SiFive
  7 */
  8#include <linux/compiler.h>
  9#include <linux/irqflags.h>
 10#include <linux/percpu.h>
 11#include <linux/preempt.h>
 12#include <linux/types.h>
 13
 14#include <asm/vector.h>
 15#include <asm/switch_to.h>
 16#include <asm/simd.h>
 17#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
 18#include <asm/asm-prototypes.h>
 19#endif
 20
 21static inline void riscv_v_flags_set(u32 flags)
 22{
 23	WRITE_ONCE(current->thread.riscv_v_flags, flags);
 24}
 25
 26static inline void riscv_v_start(u32 flags)
 27{
 28	int orig;
 29
 30	orig = riscv_v_flags();
 31	BUG_ON((orig & flags) != 0);
 32	riscv_v_flags_set(orig | flags);
 33	barrier();
 34}
 35
 36static inline void riscv_v_stop(u32 flags)
 37{
 38	int orig;
 39
 40	barrier();
 41	orig = riscv_v_flags();
 42	BUG_ON((orig & flags) == 0);
 43	riscv_v_flags_set(orig & ~flags);
 44}
 45
 46/*
 47 * Claim ownership of the CPU vector context for use by the calling context.
 48 *
 49 * The caller may freely manipulate the vector context metadata until
 50 * put_cpu_vector_context() is called.
 51 */
 52void get_cpu_vector_context(void)
 53{
 54	/*
 55	 * disable softirqs so it is impossible for softirqs to nest
 56	 * get_cpu_vector_context() when kernel is actively using Vector.
 57	 */
 58	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
 59		local_bh_disable();
 60	else
 61		preempt_disable();
 62
 63	riscv_v_start(RISCV_KERNEL_MODE_V);
 64}
 65
 66/*
 67 * Release the CPU vector context.
 68 *
 69 * Must be called from a context in which get_cpu_vector_context() was
 70 * previously called, with no call to put_cpu_vector_context() in the
 71 * meantime.
 72 */
 73void put_cpu_vector_context(void)
 74{
 75	riscv_v_stop(RISCV_KERNEL_MODE_V);
 76
 77	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
 78		local_bh_enable();
 79	else
 80		preempt_enable();
 81}
 82
 83#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
 84static __always_inline u32 *riscv_v_flags_ptr(void)
 85{
 86	return &current->thread.riscv_v_flags;
 87}
 88
 89static inline void riscv_preempt_v_set_dirty(void)
 90{
 91	*riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY;
 92}
 93
 94static inline void riscv_preempt_v_reset_flags(void)
 95{
 96	*riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY | RISCV_PREEMPT_V_NEED_RESTORE);
 97}
 98
 99static inline void riscv_v_ctx_depth_inc(void)
100{
101	*riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH;
102}
103
104static inline void riscv_v_ctx_depth_dec(void)
105{
106	*riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH;
107}
108
109static inline u32 riscv_v_ctx_get_depth(void)
110{
111	return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK;
112}
113
114static int riscv_v_stop_kernel_context(void)
115{
116	if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current))
117		return 1;
118
119	riscv_preempt_v_clear_dirty(current);
120	riscv_v_stop(RISCV_PREEMPT_V);
121	return 0;
122}
123
124static int riscv_v_start_kernel_context(bool *is_nested)
125{
126	struct __riscv_v_ext_state *kvstate, *uvstate;
127
128	kvstate = &current->thread.kernel_vstate;
129	if (!kvstate->datap)
130		return -ENOENT;
131
132	if (riscv_preempt_v_started(current)) {
133		WARN_ON(riscv_v_ctx_get_depth() == 0);
134		*is_nested = true;
135		get_cpu_vector_context();
136		if (riscv_preempt_v_dirty(current)) {
137			__riscv_v_vstate_save(kvstate, kvstate->datap);
138			riscv_preempt_v_clear_dirty(current);
139		}
140		riscv_preempt_v_set_restore(current);
141		return 0;
142	}
143
144	/* Transfer the ownership of V from user to kernel, then save */
145	riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY);
146	if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) {
147		uvstate = &current->thread.vstate;
148		__riscv_v_vstate_save(uvstate, uvstate->datap);
149	}
150	riscv_preempt_v_clear_dirty(current);
151	return 0;
152}
153
154/* low-level V context handling code, called with irq disabled */
155asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs)
156{
157	int depth;
158
159	if (!riscv_preempt_v_started(current))
160		return;
161
162	depth = riscv_v_ctx_get_depth();
163	if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY)
164		riscv_preempt_v_set_dirty();
165
166	riscv_v_ctx_depth_inc();
167}
168
169asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs)
170{
171	struct __riscv_v_ext_state *vstate = &current->thread.kernel_vstate;
172	u32 depth;
173
174	WARN_ON(!irqs_disabled());
175
176	if (!riscv_preempt_v_started(current))
177		return;
178
179	riscv_v_ctx_depth_dec();
180	depth = riscv_v_ctx_get_depth();
181	if (depth == 0) {
182		if (riscv_preempt_v_restore(current)) {
183			__riscv_v_vstate_restore(vstate, vstate->datap);
184			__riscv_v_vstate_clean(regs);
185			riscv_preempt_v_reset_flags();
186		}
187	}
188}
189#else
190#define riscv_v_start_kernel_context(nested)	(-ENOENT)
191#define riscv_v_stop_kernel_context()		(-ENOENT)
192#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
193
194/*
195 * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
196 * context
197 *
198 * Must not be called unless may_use_simd() returns true.
199 * Task context in the vector registers is saved back to memory as necessary.
200 *
201 * A matching call to kernel_vector_end() must be made before returning from the
202 * calling context.
203 *
204 * The caller may freely use the vector registers until kernel_vector_end() is
205 * called.
206 */
207void kernel_vector_begin(void)
208{
209	bool nested = false;
210
211	if (WARN_ON(!has_vector()))
212		return;
213
214	BUG_ON(!may_use_simd());
215
216	if (riscv_v_start_kernel_context(&nested)) {
217		get_cpu_vector_context();
218		riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
219	}
220
221	if (!nested)
222		riscv_v_vstate_set_restore(current, task_pt_regs(current));
223
224	riscv_v_enable();
225}
226EXPORT_SYMBOL_GPL(kernel_vector_begin);
227
228/*
229 * kernel_vector_end(): give the CPU vector registers back to the current task
230 *
231 * Must be called from a context in which kernel_vector_begin() was previously
232 * called, with no call to kernel_vector_end() in the meantime.
233 *
234 * The caller must not use the vector registers after this function is called,
235 * unless kernel_vector_begin() is called again in the meantime.
236 */
237void kernel_vector_end(void)
238{
239	if (WARN_ON(!has_vector()))
240		return;
241
242	riscv_v_disable();
243
244	if (riscv_v_stop_kernel_context())
245		put_cpu_vector_context();
246}
247EXPORT_SYMBOL_GPL(kernel_vector_end);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2012 ARM Ltd.
  4 * Author: Catalin Marinas <catalin.marinas@arm.com>
  5 * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  6 * Copyright (C) 2021 SiFive
  7 */
  8#include <linux/compiler.h>
  9#include <linux/irqflags.h>
 10#include <linux/percpu.h>
 11#include <linux/preempt.h>
 12#include <linux/types.h>
 13
 14#include <asm/vector.h>
 15#include <asm/switch_to.h>
 16#include <asm/simd.h>
 17#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
 18#include <asm/asm-prototypes.h>
 19#endif
 20
 21static inline void riscv_v_flags_set(u32 flags)
 22{
 23	WRITE_ONCE(current->thread.riscv_v_flags, flags);
 24}
 25
 26static inline void riscv_v_start(u32 flags)
 27{
 28	int orig;
 29
 30	orig = riscv_v_flags();
 31	BUG_ON((orig & flags) != 0);
 32	riscv_v_flags_set(orig | flags);
 33	barrier();
 34}
 35
 36static inline void riscv_v_stop(u32 flags)
 37{
 38	int orig;
 39
 40	barrier();
 41	orig = riscv_v_flags();
 42	BUG_ON((orig & flags) == 0);
 43	riscv_v_flags_set(orig & ~flags);
 44}
 45
 46/*
 47 * Claim ownership of the CPU vector context for use by the calling context.
 48 *
 49 * The caller may freely manipulate the vector context metadata until
 50 * put_cpu_vector_context() is called.
 51 */
 52void get_cpu_vector_context(void)
 53{
 54	/*
 55	 * disable softirqs so it is impossible for softirqs to nest
 56	 * get_cpu_vector_context() when kernel is actively using Vector.
 57	 */
 58	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
 59		local_bh_disable();
 60	else
 61		preempt_disable();
 62
 63	riscv_v_start(RISCV_KERNEL_MODE_V);
 64}
 65
 66/*
 67 * Release the CPU vector context.
 68 *
 69 * Must be called from a context in which get_cpu_vector_context() was
 70 * previously called, with no call to put_cpu_vector_context() in the
 71 * meantime.
 72 */
 73void put_cpu_vector_context(void)
 74{
 75	riscv_v_stop(RISCV_KERNEL_MODE_V);
 76
 77	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
 78		local_bh_enable();
 79	else
 80		preempt_enable();
 81}
 82
 83#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
 84static __always_inline u32 *riscv_v_flags_ptr(void)
 85{
 86	return &current->thread.riscv_v_flags;
 87}
 88
 89static inline void riscv_preempt_v_set_dirty(void)
 90{
 91	*riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY;
 92}
 93
 94static inline void riscv_preempt_v_reset_flags(void)
 95{
 96	*riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY | RISCV_PREEMPT_V_NEED_RESTORE);
 97}
 98
 99static inline void riscv_v_ctx_depth_inc(void)
100{
101	*riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH;
102}
103
104static inline void riscv_v_ctx_depth_dec(void)
105{
106	*riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH;
107}
108
109static inline u32 riscv_v_ctx_get_depth(void)
110{
111	return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK;
112}
113
114static int riscv_v_stop_kernel_context(void)
115{
116	if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current))
117		return 1;
118
119	riscv_preempt_v_clear_dirty(current);
120	riscv_v_stop(RISCV_PREEMPT_V);
121	return 0;
122}
123
124static int riscv_v_start_kernel_context(bool *is_nested)
125{
126	struct __riscv_v_ext_state *kvstate, *uvstate;
127
128	kvstate = &current->thread.kernel_vstate;
129	if (!kvstate->datap)
130		return -ENOENT;
131
132	if (riscv_preempt_v_started(current)) {
133		WARN_ON(riscv_v_ctx_get_depth() == 0);
134		*is_nested = true;
135		get_cpu_vector_context();
136		if (riscv_preempt_v_dirty(current)) {
137			__riscv_v_vstate_save(kvstate, kvstate->datap);
138			riscv_preempt_v_clear_dirty(current);
139		}
140		riscv_preempt_v_set_restore(current);
141		return 0;
142	}
143
144	/* Transfer the ownership of V from user to kernel, then save */
145	riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY);
146	if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) {
147		uvstate = &current->thread.vstate;
148		__riscv_v_vstate_save(uvstate, uvstate->datap);
149	}
150	riscv_preempt_v_clear_dirty(current);
151	return 0;
152}
153
154/* low-level V context handling code, called with irq disabled */
155asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs)
156{
157	int depth;
158
159	if (!riscv_preempt_v_started(current))
160		return;
161
162	depth = riscv_v_ctx_get_depth();
163	if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY)
164		riscv_preempt_v_set_dirty();
165
166	riscv_v_ctx_depth_inc();
167}
168
169asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs)
170{
171	struct __riscv_v_ext_state *vstate = &current->thread.kernel_vstate;
172	u32 depth;
173
174	WARN_ON(!irqs_disabled());
175
176	if (!riscv_preempt_v_started(current))
177		return;
178
179	riscv_v_ctx_depth_dec();
180	depth = riscv_v_ctx_get_depth();
181	if (depth == 0) {
182		if (riscv_preempt_v_restore(current)) {
183			__riscv_v_vstate_restore(vstate, vstate->datap);
184			__riscv_v_vstate_clean(regs);
185			riscv_preempt_v_reset_flags();
186		}
187	}
188}
189#else
190#define riscv_v_start_kernel_context(nested)	(-ENOENT)
191#define riscv_v_stop_kernel_context()		(-ENOENT)
192#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
193
194/*
195 * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
196 * context
197 *
198 * Must not be called unless may_use_simd() returns true.
199 * Task context in the vector registers is saved back to memory as necessary.
200 *
201 * A matching call to kernel_vector_end() must be made before returning from the
202 * calling context.
203 *
204 * The caller may freely use the vector registers until kernel_vector_end() is
205 * called.
206 */
207void kernel_vector_begin(void)
208{
209	bool nested = false;
210
211	if (WARN_ON(!has_vector()))
212		return;
213
214	BUG_ON(!may_use_simd());
215
216	if (riscv_v_start_kernel_context(&nested)) {
217		get_cpu_vector_context();
218		riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
219	}
220
221	if (!nested)
222		riscv_v_vstate_set_restore(current, task_pt_regs(current));
223
224	riscv_v_enable();
225}
226EXPORT_SYMBOL_GPL(kernel_vector_begin);
227
228/*
229 * kernel_vector_end(): give the CPU vector registers back to the current task
230 *
231 * Must be called from a context in which kernel_vector_begin() was previously
232 * called, with no call to kernel_vector_end() in the meantime.
233 *
234 * The caller must not use the vector registers after this function is called,
235 * unless kernel_vector_begin() is called again in the meantime.
236 */
237void kernel_vector_end(void)
238{
239	if (WARN_ON(!has_vector()))
240		return;
241
242	riscv_v_disable();
243
244	if (riscv_v_stop_kernel_context())
245		put_cpu_vector_context();
246}
247EXPORT_SYMBOL_GPL(kernel_vector_end);