Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_PREEMPT_H
  3#define __ASM_PREEMPT_H
  4
  5#include <asm/rmwcc.h>
  6#include <asm/percpu.h>
  7#include <asm/current.h>
  8
  9#include <linux/thread_info.h>
 10#include <linux/static_call_types.h>
 11
 12/* We use the MSB mostly because its available */
 13#define PREEMPT_NEED_RESCHED	0x80000000
 14
 15/*
 16 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
 17 * that a decrement hitting 0 means we can and should reschedule.
 18 */
 19#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
 20
 21/*
 22 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 23 * that think a non-zero value indicates we cannot preempt.
 24 */
 25static __always_inline int preempt_count(void)
 26{
 27	return raw_cpu_read_4(pcpu_hot.preempt_count) & ~PREEMPT_NEED_RESCHED;
 28}
 29
 30static __always_inline void preempt_count_set(int pc)
 31{
 32	int old, new;
 33
 34	do {
 35		old = raw_cpu_read_4(pcpu_hot.preempt_count);
 36		new = (old & PREEMPT_NEED_RESCHED) |
 37			(pc & ~PREEMPT_NEED_RESCHED);
 38	} while (raw_cpu_cmpxchg_4(pcpu_hot.preempt_count, old, new) != old);
 39}
 40
 41/*
 42 * must be macros to avoid header recursion hell
 43 */
 44#define init_task_preempt_count(p) do { } while (0)
 45
 46#define init_idle_preempt_count(p, cpu) do { \
 47	per_cpu(pcpu_hot.preempt_count, (cpu)) = PREEMPT_DISABLED; \
 48} while (0)
 49
 50/*
 51 * We fold the NEED_RESCHED bit into the preempt count such that
 52 * preempt_enable() can decrement and test for needing to reschedule with a
 53 * single instruction.
 54 *
 55 * We invert the actual bit, so that when the decrement hits 0 we know we both
 56 * need to resched (the bit is cleared) and can resched (no preempt count).
 57 */
 58
 59static __always_inline void set_preempt_need_resched(void)
 60{
 61	raw_cpu_and_4(pcpu_hot.preempt_count, ~PREEMPT_NEED_RESCHED);
 62}
 63
 64static __always_inline void clear_preempt_need_resched(void)
 65{
 66	raw_cpu_or_4(pcpu_hot.preempt_count, PREEMPT_NEED_RESCHED);
 67}
 68
 69static __always_inline bool test_preempt_need_resched(void)
 70{
 71	return !(raw_cpu_read_4(pcpu_hot.preempt_count) & PREEMPT_NEED_RESCHED);
 72}
 73
 74/*
 75 * The various preempt_count add/sub methods
 76 */
 77
 78static __always_inline void __preempt_count_add(int val)
 79{
 80	raw_cpu_add_4(pcpu_hot.preempt_count, val);
 81}
 82
 83static __always_inline void __preempt_count_sub(int val)
 84{
 85	raw_cpu_add_4(pcpu_hot.preempt_count, -val);
 86}
 87
 88/*
 89 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
 90 * a decrement which hits zero means we have no preempt_count and should
 91 * reschedule.
 92 */
 93static __always_inline bool __preempt_count_dec_and_test(void)
 94{
 95	return GEN_UNARY_RMWcc("decl", pcpu_hot.preempt_count, e,
 96			       __percpu_arg([var]));
 97}
 98
 99/*
100 * Returns true when we need to resched and can (barring IRQ state).
101 */
102static __always_inline bool should_resched(int preempt_offset)
103{
104	return unlikely(raw_cpu_read_4(pcpu_hot.preempt_count) == preempt_offset);
105}
106
107#ifdef CONFIG_PREEMPTION
108
109extern asmlinkage void preempt_schedule(void);
110extern asmlinkage void preempt_schedule_thunk(void);
111
112#define preempt_schedule_dynamic_enabled	preempt_schedule_thunk
113#define preempt_schedule_dynamic_disabled	NULL
114
115extern asmlinkage void preempt_schedule_notrace(void);
116extern asmlinkage void preempt_schedule_notrace_thunk(void);
117
118#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace_thunk
119#define preempt_schedule_notrace_dynamic_disabled	NULL
120
121#ifdef CONFIG_PREEMPT_DYNAMIC
122
123DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
124
125#define __preempt_schedule() \
126do { \
127	__STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \
128	asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
129} while (0)
130
131DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
132
133#define __preempt_schedule_notrace() \
134do { \
135	__STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \
136	asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \
137} while (0)
138
139#else /* PREEMPT_DYNAMIC */
140
141#define __preempt_schedule() \
142	asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT);
143
144#define __preempt_schedule_notrace() \
145	asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT);
146
147#endif /* PREEMPT_DYNAMIC */
148
149#endif /* PREEMPTION */
150
151#endif /* __ASM_PREEMPT_H */
v4.6
 
  1#ifndef __ASM_PREEMPT_H
  2#define __ASM_PREEMPT_H
  3
  4#include <asm/rmwcc.h>
  5#include <asm/percpu.h>
 
 
  6#include <linux/thread_info.h>
 
  7
  8DECLARE_PER_CPU(int, __preempt_count);
 
  9
 10/*
 11 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
 12 * that a decrement hitting 0 means we can and should reschedule.
 13 */
 14#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
 15
 16/*
 17 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 18 * that think a non-zero value indicates we cannot preempt.
 19 */
 20static __always_inline int preempt_count(void)
 21{
 22	return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
 23}
 24
 25static __always_inline void preempt_count_set(int pc)
 26{
 27	raw_cpu_write_4(__preempt_count, pc);
 
 
 
 
 
 
 28}
 29
 30/*
 31 * must be macros to avoid header recursion hell
 32 */
 33#define init_task_preempt_count(p) do { } while (0)
 34
 35#define init_idle_preempt_count(p, cpu) do { \
 36	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
 37} while (0)
 38
 39/*
 40 * We fold the NEED_RESCHED bit into the preempt count such that
 41 * preempt_enable() can decrement and test for needing to reschedule with a
 42 * single instruction.
 43 *
 44 * We invert the actual bit, so that when the decrement hits 0 we know we both
 45 * need to resched (the bit is cleared) and can resched (no preempt count).
 46 */
 47
 48static __always_inline void set_preempt_need_resched(void)
 49{
 50	raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
 51}
 52
 53static __always_inline void clear_preempt_need_resched(void)
 54{
 55	raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
 56}
 57
 58static __always_inline bool test_preempt_need_resched(void)
 59{
 60	return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
 61}
 62
 63/*
 64 * The various preempt_count add/sub methods
 65 */
 66
 67static __always_inline void __preempt_count_add(int val)
 68{
 69	raw_cpu_add_4(__preempt_count, val);
 70}
 71
 72static __always_inline void __preempt_count_sub(int val)
 73{
 74	raw_cpu_add_4(__preempt_count, -val);
 75}
 76
 77/*
 78 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
 79 * a decrement which hits zero means we have no preempt_count and should
 80 * reschedule.
 81 */
 82static __always_inline bool __preempt_count_dec_and_test(void)
 83{
 84	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
 
 85}
 86
 87/*
 88 * Returns true when we need to resched and can (barring IRQ state).
 89 */
 90static __always_inline bool should_resched(int preempt_offset)
 91{
 92	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
 93}
 94
 95#ifdef CONFIG_PREEMPT
 96  extern asmlinkage void ___preempt_schedule(void);
 97# define __preempt_schedule()					\
 98({								\
 99	register void *__sp asm(_ASM_SP);			\
100	asm volatile ("call ___preempt_schedule" : "+r"(__sp));	\
101})
102
103  extern asmlinkage void preempt_schedule(void);
104  extern asmlinkage void ___preempt_schedule_notrace(void);
105# define __preempt_schedule_notrace()					\
106({									\
107	register void *__sp asm(_ASM_SP);				\
108	asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp));	\
109})
110  extern asmlinkage void preempt_schedule_notrace(void);
111#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
113#endif /* __ASM_PREEMPT_H */