Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/sched/topology.h>
14#include <linux/sched/task.h>
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
17#include <linux/mm.h>
18#include <linux/jump_label.h>
19
20#ifdef CONFIG_CPUSETS
21
22/*
23 * Static branch rewrites can happen in an arbitrary order for a given
24 * key. In code paths where we need to loop with read_mems_allowed_begin() and
25 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
26 * to ensure that begin() always gets rewritten before retry() in the
27 * disabled -> enabled transition. If not, then if local irqs are disabled
28 * around the loop, we can deadlock since retry() would always be
29 * comparing the latest value of the mems_allowed seqcount against 0 as
30 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
31 * transition should happen in reverse order for the same reasons (want to stop
32 * looking at real value of mems_allowed.sequence in retry() first).
33 */
34extern struct static_key_false cpusets_pre_enable_key;
35extern struct static_key_false cpusets_enabled_key;
36static inline bool cpusets_enabled(void)
37{
38 return static_branch_unlikely(&cpusets_enabled_key);
39}
40
41static inline void cpuset_inc(void)
42{
43 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
44 static_branch_inc_cpuslocked(&cpusets_enabled_key);
45}
46
47static inline void cpuset_dec(void)
48{
49 static_branch_dec_cpuslocked(&cpusets_enabled_key);
50 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
51}
52
53extern int cpuset_init(void);
54extern void cpuset_init_smp(void);
55extern void cpuset_force_rebuild(void);
56extern void cpuset_update_active_cpus(void);
57extern void cpuset_wait_for_hotplug(void);
58extern void cpuset_read_lock(void);
59extern void cpuset_read_unlock(void);
60extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
61extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
62extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
63#define cpuset_current_mems_allowed (current->mems_allowed)
64void cpuset_init_current_mems_allowed(void);
65int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
66
67extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
68
69static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
70{
71 if (cpusets_enabled())
72 return __cpuset_node_allowed(node, gfp_mask);
73 return true;
74}
75
76static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
77{
78 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
79}
80
81static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
82{
83 if (cpusets_enabled())
84 return __cpuset_zone_allowed(z, gfp_mask);
85 return true;
86}
87
88extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
89 const struct task_struct *tsk2);
90
91#define cpuset_memory_pressure_bump() \
92 do { \
93 if (cpuset_memory_pressure_enabled) \
94 __cpuset_memory_pressure_bump(); \
95 } while (0)
96extern int cpuset_memory_pressure_enabled;
97extern void __cpuset_memory_pressure_bump(void);
98
99extern void cpuset_task_status_allowed(struct seq_file *m,
100 struct task_struct *task);
101extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
102 struct pid *pid, struct task_struct *tsk);
103
104extern int cpuset_mem_spread_node(void);
105extern int cpuset_slab_spread_node(void);
106
107static inline int cpuset_do_page_mem_spread(void)
108{
109 return task_spread_page(current);
110}
111
112static inline int cpuset_do_slab_mem_spread(void)
113{
114 return task_spread_slab(current);
115}
116
117extern bool current_cpuset_is_being_rebound(void);
118
119extern void rebuild_sched_domains(void);
120
121extern void cpuset_print_current_mems_allowed(void);
122
123/*
124 * read_mems_allowed_begin is required when making decisions involving
125 * mems_allowed such as during page allocation. mems_allowed can be updated in
126 * parallel and depending on the new value an operation can fail potentially
127 * causing process failure. A retry loop with read_mems_allowed_begin and
128 * read_mems_allowed_retry prevents these artificial failures.
129 */
130static inline unsigned int read_mems_allowed_begin(void)
131{
132 if (!static_branch_unlikely(&cpusets_pre_enable_key))
133 return 0;
134
135 return read_seqcount_begin(¤t->mems_allowed_seq);
136}
137
138/*
139 * If this returns true, the operation that took place after
140 * read_mems_allowed_begin may have failed artificially due to a concurrent
141 * update of mems_allowed. It is up to the caller to retry the operation if
142 * appropriate.
143 */
144static inline bool read_mems_allowed_retry(unsigned int seq)
145{
146 if (!static_branch_unlikely(&cpusets_enabled_key))
147 return false;
148
149 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
150}
151
152static inline void set_mems_allowed(nodemask_t nodemask)
153{
154 unsigned long flags;
155
156 task_lock(current);
157 local_irq_save(flags);
158 write_seqcount_begin(¤t->mems_allowed_seq);
159 current->mems_allowed = nodemask;
160 write_seqcount_end(¤t->mems_allowed_seq);
161 local_irq_restore(flags);
162 task_unlock(current);
163}
164
165#else /* !CONFIG_CPUSETS */
166
167static inline bool cpusets_enabled(void) { return false; }
168
169static inline int cpuset_init(void) { return 0; }
170static inline void cpuset_init_smp(void) {}
171
172static inline void cpuset_force_rebuild(void) { }
173
174static inline void cpuset_update_active_cpus(void)
175{
176 partition_sched_domains(1, NULL, NULL);
177}
178
179static inline void cpuset_wait_for_hotplug(void) { }
180
181static inline void cpuset_read_lock(void) { }
182static inline void cpuset_read_unlock(void) { }
183
184static inline void cpuset_cpus_allowed(struct task_struct *p,
185 struct cpumask *mask)
186{
187 cpumask_copy(mask, cpu_possible_mask);
188}
189
190static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
191{
192}
193
194static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
195{
196 return node_possible_map;
197}
198
199#define cpuset_current_mems_allowed (node_states[N_MEMORY])
200static inline void cpuset_init_current_mems_allowed(void) {}
201
202static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
203{
204 return 1;
205}
206
207static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
208{
209 return true;
210}
211
212static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
213{
214 return true;
215}
216
217static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
218{
219 return true;
220}
221
222static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
223 const struct task_struct *tsk2)
224{
225 return 1;
226}
227
228static inline void cpuset_memory_pressure_bump(void) {}
229
230static inline void cpuset_task_status_allowed(struct seq_file *m,
231 struct task_struct *task)
232{
233}
234
235static inline int cpuset_mem_spread_node(void)
236{
237 return 0;
238}
239
240static inline int cpuset_slab_spread_node(void)
241{
242 return 0;
243}
244
245static inline int cpuset_do_page_mem_spread(void)
246{
247 return 0;
248}
249
250static inline int cpuset_do_slab_mem_spread(void)
251{
252 return 0;
253}
254
255static inline bool current_cpuset_is_being_rebound(void)
256{
257 return false;
258}
259
260static inline void rebuild_sched_domains(void)
261{
262 partition_sched_domains(1, NULL, NULL);
263}
264
265static inline void cpuset_print_current_mems_allowed(void)
266{
267}
268
269static inline void set_mems_allowed(nodemask_t nodemask)
270{
271}
272
273static inline unsigned int read_mems_allowed_begin(void)
274{
275 return 0;
276}
277
278static inline bool read_mems_allowed_retry(unsigned int seq)
279{
280 return false;
281}
282
283#endif /* !CONFIG_CPUSETS */
284
285#endif /* _LINUX_CPUSET_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/sched/topology.h>
14#include <linux/sched/task.h>
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
17#include <linux/mm.h>
18#include <linux/mmu_context.h>
19#include <linux/jump_label.h>
20
21#ifdef CONFIG_CPUSETS
22
23/*
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
34 */
35extern struct static_key_false cpusets_pre_enable_key;
36extern struct static_key_false cpusets_enabled_key;
37extern struct static_key_false cpusets_insane_config_key;
38
39static inline bool cpusets_enabled(void)
40{
41 return static_branch_unlikely(&cpusets_enabled_key);
42}
43
44static inline void cpuset_inc(void)
45{
46 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
47 static_branch_inc_cpuslocked(&cpusets_enabled_key);
48}
49
50static inline void cpuset_dec(void)
51{
52 static_branch_dec_cpuslocked(&cpusets_enabled_key);
53 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
54}
55
56/*
57 * This will get enabled whenever a cpuset configuration is considered
58 * unsupportable in general. E.g. movable only node which cannot satisfy
59 * any non movable allocations (see update_nodemask). Page allocator
60 * needs to make additional checks for those configurations and this
61 * check is meant to guard those checks without any overhead for sane
62 * configurations.
63 */
64static inline bool cpusets_insane_config(void)
65{
66 return static_branch_unlikely(&cpusets_insane_config_key);
67}
68
69extern int cpuset_init(void);
70extern void cpuset_init_smp(void);
71extern void cpuset_force_rebuild(void);
72extern void cpuset_update_active_cpus(void);
73extern void cpuset_wait_for_hotplug(void);
74extern void inc_dl_tasks_cs(struct task_struct *task);
75extern void dec_dl_tasks_cs(struct task_struct *task);
76extern void cpuset_lock(void);
77extern void cpuset_unlock(void);
78extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
79extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
80extern bool cpuset_cpu_is_isolated(int cpu);
81extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
82#define cpuset_current_mems_allowed (current->mems_allowed)
83void cpuset_init_current_mems_allowed(void);
84int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
85
86extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
87
88static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
89{
90 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
91}
92
93static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
94{
95 if (cpusets_enabled())
96 return __cpuset_zone_allowed(z, gfp_mask);
97 return true;
98}
99
100extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
101 const struct task_struct *tsk2);
102
103#define cpuset_memory_pressure_bump() \
104 do { \
105 if (cpuset_memory_pressure_enabled) \
106 __cpuset_memory_pressure_bump(); \
107 } while (0)
108extern int cpuset_memory_pressure_enabled;
109extern void __cpuset_memory_pressure_bump(void);
110
111extern void cpuset_task_status_allowed(struct seq_file *m,
112 struct task_struct *task);
113extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
114 struct pid *pid, struct task_struct *tsk);
115
116extern int cpuset_mem_spread_node(void);
117extern int cpuset_slab_spread_node(void);
118
119static inline int cpuset_do_page_mem_spread(void)
120{
121 return task_spread_page(current);
122}
123
124extern bool current_cpuset_is_being_rebound(void);
125
126extern void rebuild_sched_domains(void);
127
128extern void cpuset_print_current_mems_allowed(void);
129
130/*
131 * read_mems_allowed_begin is required when making decisions involving
132 * mems_allowed such as during page allocation. mems_allowed can be updated in
133 * parallel and depending on the new value an operation can fail potentially
134 * causing process failure. A retry loop with read_mems_allowed_begin and
135 * read_mems_allowed_retry prevents these artificial failures.
136 */
137static inline unsigned int read_mems_allowed_begin(void)
138{
139 if (!static_branch_unlikely(&cpusets_pre_enable_key))
140 return 0;
141
142 return read_seqcount_begin(¤t->mems_allowed_seq);
143}
144
145/*
146 * If this returns true, the operation that took place after
147 * read_mems_allowed_begin may have failed artificially due to a concurrent
148 * update of mems_allowed. It is up to the caller to retry the operation if
149 * appropriate.
150 */
151static inline bool read_mems_allowed_retry(unsigned int seq)
152{
153 if (!static_branch_unlikely(&cpusets_enabled_key))
154 return false;
155
156 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
157}
158
159static inline void set_mems_allowed(nodemask_t nodemask)
160{
161 unsigned long flags;
162
163 task_lock(current);
164 local_irq_save(flags);
165 write_seqcount_begin(¤t->mems_allowed_seq);
166 current->mems_allowed = nodemask;
167 write_seqcount_end(¤t->mems_allowed_seq);
168 local_irq_restore(flags);
169 task_unlock(current);
170}
171
172#else /* !CONFIG_CPUSETS */
173
174static inline bool cpusets_enabled(void) { return false; }
175
176static inline bool cpusets_insane_config(void) { return false; }
177
178static inline int cpuset_init(void) { return 0; }
179static inline void cpuset_init_smp(void) {}
180
181static inline void cpuset_force_rebuild(void) { }
182
183static inline void cpuset_update_active_cpus(void)
184{
185 partition_sched_domains(1, NULL, NULL);
186}
187
188static inline void cpuset_wait_for_hotplug(void) { }
189
190static inline void inc_dl_tasks_cs(struct task_struct *task) { }
191static inline void dec_dl_tasks_cs(struct task_struct *task) { }
192static inline void cpuset_lock(void) { }
193static inline void cpuset_unlock(void) { }
194
195static inline void cpuset_cpus_allowed(struct task_struct *p,
196 struct cpumask *mask)
197{
198 cpumask_copy(mask, task_cpu_possible_mask(p));
199}
200
201static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
202{
203 return false;
204}
205
206static inline bool cpuset_cpu_is_isolated(int cpu)
207{
208 return false;
209}
210
211static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
212{
213 return node_possible_map;
214}
215
216#define cpuset_current_mems_allowed (node_states[N_MEMORY])
217static inline void cpuset_init_current_mems_allowed(void) {}
218
219static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
220{
221 return 1;
222}
223
224static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
225{
226 return true;
227}
228
229static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
230{
231 return true;
232}
233
234static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
235 const struct task_struct *tsk2)
236{
237 return 1;
238}
239
240static inline void cpuset_memory_pressure_bump(void) {}
241
242static inline void cpuset_task_status_allowed(struct seq_file *m,
243 struct task_struct *task)
244{
245}
246
247static inline int cpuset_mem_spread_node(void)
248{
249 return 0;
250}
251
252static inline int cpuset_slab_spread_node(void)
253{
254 return 0;
255}
256
257static inline int cpuset_do_page_mem_spread(void)
258{
259 return 0;
260}
261
262static inline bool current_cpuset_is_being_rebound(void)
263{
264 return false;
265}
266
267static inline void rebuild_sched_domains(void)
268{
269 partition_sched_domains(1, NULL, NULL);
270}
271
272static inline void cpuset_print_current_mems_allowed(void)
273{
274}
275
276static inline void set_mems_allowed(nodemask_t nodemask)
277{
278}
279
280static inline unsigned int read_mems_allowed_begin(void)
281{
282 return 0;
283}
284
285static inline bool read_mems_allowed_retry(unsigned int seq)
286{
287 return false;
288}
289
290#endif /* !CONFIG_CPUSETS */
291
292#endif /* _LINUX_CPUSET_H */