Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/slab.h>
3#include <linux/kernel.h>
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/numa.h>
9
10/**
11 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12 * @n: the cpu prior to the place to search
13 * @mask: the cpumask pointer
14 * @start: the start point of the iteration
15 * @wrap: assume @n crossing @start terminates the iteration
16 *
17 * Return: >= nr_cpu_ids on completion
18 *
19 * Note: the @wrap argument is required for the start condition when
20 * we cannot assume @start is set in @mask.
21 */
22unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
23{
24 unsigned int next;
25
26again:
27 next = cpumask_next(n, mask);
28
29 if (wrap && n < start && next >= start) {
30 return nr_cpumask_bits;
31
32 } else if (next >= nr_cpumask_bits) {
33 wrap = true;
34 n = -1;
35 goto again;
36 }
37
38 return next;
39}
40EXPORT_SYMBOL(cpumask_next_wrap);
41
42/* These are not inline because of header tangles. */
43#ifdef CONFIG_CPUMASK_OFFSTACK
44/**
45 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46 * @mask: pointer to cpumask_var_t where the cpumask is returned
47 * @flags: GFP_ flags
48 * @node: memory node from which to allocate or %NUMA_NO_NODE
49 *
50 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
51 * a nop returning a constant 1 (in <linux/cpumask.h>).
52 *
53 * Return: TRUE if memory allocation succeeded, FALSE otherwise.
54 *
55 * In addition, mask will be NULL if this fails. Note that gcc is
56 * usually smart enough to know that mask can never be NULL if
57 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
58 * too.
59 */
60bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
61{
62 *mask = kmalloc_node(cpumask_size(), flags, node);
63
64#ifdef CONFIG_DEBUG_PER_CPU_MAPS
65 if (!*mask) {
66 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
67 dump_stack();
68 }
69#endif
70
71 return *mask != NULL;
72}
73EXPORT_SYMBOL(alloc_cpumask_var_node);
74
75/**
76 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
77 * @mask: pointer to cpumask_var_t where the cpumask is returned
78 *
79 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
80 * a nop (in <linux/cpumask.h>).
81 * Either returns an allocated (zero-filled) cpumask, or causes the
82 * system to panic.
83 */
84void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
85{
86 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
87 if (!*mask)
88 panic("%s: Failed to allocate %u bytes\n", __func__,
89 cpumask_size());
90}
91
92/**
93 * free_cpumask_var - frees memory allocated for a struct cpumask.
94 * @mask: cpumask to free
95 *
96 * This is safe on a NULL mask.
97 */
98void free_cpumask_var(cpumask_var_t mask)
99{
100 kfree(mask);
101}
102EXPORT_SYMBOL(free_cpumask_var);
103
104/**
105 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
106 * @mask: cpumask to free
107 */
108void __init free_bootmem_cpumask_var(cpumask_var_t mask)
109{
110 memblock_free(mask, cpumask_size());
111}
112#endif
113
114/**
115 * cpumask_local_spread - select the i'th cpu based on NUMA distances
116 * @i: index number
117 * @node: local numa_node
118 *
119 * Return: online CPU according to a numa aware policy; local cpus are returned
120 * first, followed by non-local ones, then it wraps around.
121 *
122 * For those who wants to enumerate all CPUs based on their NUMA distances,
123 * i.e. call this function in a loop, like:
124 *
125 * for (i = 0; i < num_online_cpus(); i++) {
126 * cpu = cpumask_local_spread(i, node);
127 * do_something(cpu);
128 * }
129 *
130 * There's a better alternative based on for_each()-like iterators:
131 *
132 * for_each_numa_hop_mask(mask, node) {
133 * for_each_cpu_andnot(cpu, mask, prev)
134 * do_something(cpu);
135 * prev = mask;
136 * }
137 *
138 * It's simpler and more verbose than above. Complexity of iterator-based
139 * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
140 * cpumask_local_spread() when called for each cpu is
141 * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
142 */
143unsigned int cpumask_local_spread(unsigned int i, int node)
144{
145 unsigned int cpu;
146
147 /* Wrap: we always want a cpu. */
148 i %= num_online_cpus();
149
150 cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
151
152 WARN_ON(cpu >= nr_cpu_ids);
153 return cpu;
154}
155EXPORT_SYMBOL(cpumask_local_spread);
156
157static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
158
159/**
160 * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
161 * @src1p: first &cpumask for intersection
162 * @src2p: second &cpumask for intersection
163 *
164 * Iterated calls using the same srcp1 and srcp2 will be distributed within
165 * their intersection.
166 *
167 * Return: >= nr_cpu_ids if the intersection is empty.
168 */
169unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
170 const struct cpumask *src2p)
171{
172 unsigned int next, prev;
173
174 /* NOTE: our first selection will skip 0. */
175 prev = __this_cpu_read(distribute_cpu_mask_prev);
176
177 next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
178 nr_cpumask_bits, prev + 1);
179 if (next < nr_cpu_ids)
180 __this_cpu_write(distribute_cpu_mask_prev, next);
181
182 return next;
183}
184EXPORT_SYMBOL(cpumask_any_and_distribute);
185
186/**
187 * cpumask_any_distribute - Return an arbitrary cpu from srcp
188 * @srcp: &cpumask for selection
189 *
190 * Return: >= nr_cpu_ids if the intersection is empty.
191 */
192unsigned int cpumask_any_distribute(const struct cpumask *srcp)
193{
194 unsigned int next, prev;
195
196 /* NOTE: our first selection will skip 0. */
197 prev = __this_cpu_read(distribute_cpu_mask_prev);
198 next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
199 if (next < nr_cpu_ids)
200 __this_cpu_write(distribute_cpu_mask_prev, next);
201
202 return next;
203}
204EXPORT_SYMBOL(cpumask_any_distribute);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/slab.h>
3#include <linux/kernel.h>
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/numa.h>
9#include <linux/sched/isolation.h>
10
11/**
12 * cpumask_next - get the next cpu in a cpumask
13 * @n: the cpu prior to the place to search (ie. return will be > @n)
14 * @srcp: the cpumask pointer
15 *
16 * Returns >= nr_cpu_ids if no further cpus set.
17 */
18unsigned int cpumask_next(int n, const struct cpumask *srcp)
19{
20 /* -1 is a legal arg here. */
21 if (n != -1)
22 cpumask_check(n);
23 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
24}
25EXPORT_SYMBOL(cpumask_next);
26
27/**
28 * cpumask_next_and - get the next cpu in *src1p & *src2p
29 * @n: the cpu prior to the place to search (ie. return will be > @n)
30 * @src1p: the first cpumask pointer
31 * @src2p: the second cpumask pointer
32 *
33 * Returns >= nr_cpu_ids if no further cpus set in both.
34 */
35int cpumask_next_and(int n, const struct cpumask *src1p,
36 const struct cpumask *src2p)
37{
38 /* -1 is a legal arg here. */
39 if (n != -1)
40 cpumask_check(n);
41 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
42 nr_cpumask_bits, n + 1);
43}
44EXPORT_SYMBOL(cpumask_next_and);
45
46/**
47 * cpumask_any_but - return a "random" in a cpumask, but not this one.
48 * @mask: the cpumask to search
49 * @cpu: the cpu to ignore.
50 *
51 * Often used to find any cpu but smp_processor_id() in a mask.
52 * Returns >= nr_cpu_ids if no cpus set.
53 */
54int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
55{
56 unsigned int i;
57
58 cpumask_check(cpu);
59 for_each_cpu(i, mask)
60 if (i != cpu)
61 break;
62 return i;
63}
64EXPORT_SYMBOL(cpumask_any_but);
65
66/**
67 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
68 * @n: the cpu prior to the place to search
69 * @mask: the cpumask pointer
70 * @start: the start point of the iteration
71 * @wrap: assume @n crossing @start terminates the iteration
72 *
73 * Returns >= nr_cpu_ids on completion
74 *
75 * Note: the @wrap argument is required for the start condition when
76 * we cannot assume @start is set in @mask.
77 */
78int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
79{
80 int next;
81
82again:
83 next = cpumask_next(n, mask);
84
85 if (wrap && n < start && next >= start) {
86 return nr_cpumask_bits;
87
88 } else if (next >= nr_cpumask_bits) {
89 wrap = true;
90 n = -1;
91 goto again;
92 }
93
94 return next;
95}
96EXPORT_SYMBOL(cpumask_next_wrap);
97
98/* These are not inline because of header tangles. */
99#ifdef CONFIG_CPUMASK_OFFSTACK
100/**
101 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
102 * @mask: pointer to cpumask_var_t where the cpumask is returned
103 * @flags: GFP_ flags
104 *
105 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
106 * a nop returning a constant 1 (in <linux/cpumask.h>)
107 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
108 *
109 * In addition, mask will be NULL if this fails. Note that gcc is
110 * usually smart enough to know that mask can never be NULL if
111 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
112 * too.
113 */
114bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
115{
116 *mask = kmalloc_node(cpumask_size(), flags, node);
117
118#ifdef CONFIG_DEBUG_PER_CPU_MAPS
119 if (!*mask) {
120 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
121 dump_stack();
122 }
123#endif
124
125 return *mask != NULL;
126}
127EXPORT_SYMBOL(alloc_cpumask_var_node);
128
129bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
130{
131 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
132}
133EXPORT_SYMBOL(zalloc_cpumask_var_node);
134
135/**
136 * alloc_cpumask_var - allocate a struct cpumask
137 * @mask: pointer to cpumask_var_t where the cpumask is returned
138 * @flags: GFP_ flags
139 *
140 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
141 * a nop returning a constant 1 (in <linux/cpumask.h>).
142 *
143 * See alloc_cpumask_var_node.
144 */
145bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
146{
147 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
148}
149EXPORT_SYMBOL(alloc_cpumask_var);
150
151bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
152{
153 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
154}
155EXPORT_SYMBOL(zalloc_cpumask_var);
156
157/**
158 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
159 * @mask: pointer to cpumask_var_t where the cpumask is returned
160 *
161 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
162 * a nop (in <linux/cpumask.h>).
163 * Either returns an allocated (zero-filled) cpumask, or causes the
164 * system to panic.
165 */
166void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
167{
168 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
169 if (!*mask)
170 panic("%s: Failed to allocate %u bytes\n", __func__,
171 cpumask_size());
172}
173
174/**
175 * free_cpumask_var - frees memory allocated for a struct cpumask.
176 * @mask: cpumask to free
177 *
178 * This is safe on a NULL mask.
179 */
180void free_cpumask_var(cpumask_var_t mask)
181{
182 kfree(mask);
183}
184EXPORT_SYMBOL(free_cpumask_var);
185
186/**
187 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
188 * @mask: cpumask to free
189 */
190void __init free_bootmem_cpumask_var(cpumask_var_t mask)
191{
192 memblock_free_early(__pa(mask), cpumask_size());
193}
194#endif
195
196/**
197 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
198 * @i: index number
199 * @node: local numa_node
200 *
201 * This function selects an online CPU according to a numa aware policy;
202 * local cpus are returned first, followed by non-local ones, then it
203 * wraps around.
204 *
205 * It's not very efficient, but useful for setup.
206 */
207unsigned int cpumask_local_spread(unsigned int i, int node)
208{
209 int cpu, hk_flags;
210 const struct cpumask *mask;
211
212 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
213 mask = housekeeping_cpumask(hk_flags);
214 /* Wrap: we always want a cpu. */
215 i %= cpumask_weight(mask);
216
217 if (node == NUMA_NO_NODE) {
218 for_each_cpu(cpu, mask) {
219 if (i-- == 0)
220 return cpu;
221 }
222 } else {
223 /* NUMA first. */
224 for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
225 if (i-- == 0)
226 return cpu;
227 }
228
229 for_each_cpu(cpu, mask) {
230 /* Skip NUMA nodes, done above. */
231 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
232 continue;
233
234 if (i-- == 0)
235 return cpu;
236 }
237 }
238 BUG();
239}
240EXPORT_SYMBOL(cpumask_local_spread);
241
242static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
243
244/**
245 * Returns an arbitrary cpu within srcp1 & srcp2.
246 *
247 * Iterated calls using the same srcp1 and srcp2 will be distributed within
248 * their intersection.
249 *
250 * Returns >= nr_cpu_ids if the intersection is empty.
251 */
252int cpumask_any_and_distribute(const struct cpumask *src1p,
253 const struct cpumask *src2p)
254{
255 int next, prev;
256
257 /* NOTE: our first selection will skip 0. */
258 prev = __this_cpu_read(distribute_cpu_mask_prev);
259
260 next = cpumask_next_and(prev, src1p, src2p);
261 if (next >= nr_cpu_ids)
262 next = cpumask_first_and(src1p, src2p);
263
264 if (next < nr_cpu_ids)
265 __this_cpu_write(distribute_cpu_mask_prev, next);
266
267 return next;
268}
269EXPORT_SYMBOL(cpumask_any_and_distribute);