Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/slab.h>
3#include <linux/kernel.h>
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/numa.h>
9
10/**
11 * cpumask_next - get the next cpu in a cpumask
12 * @n: the cpu prior to the place to search (ie. return will be > @n)
13 * @srcp: the cpumask pointer
14 *
15 * Returns >= nr_cpu_ids if no further cpus set.
16 */
17unsigned int cpumask_next(int n, const struct cpumask *srcp)
18{
19 /* -1 is a legal arg here. */
20 if (n != -1)
21 cpumask_check(n);
22 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
23}
24EXPORT_SYMBOL(cpumask_next);
25
26/**
27 * cpumask_next_and - get the next cpu in *src1p & *src2p
28 * @n: the cpu prior to the place to search (ie. return will be > @n)
29 * @src1p: the first cpumask pointer
30 * @src2p: the second cpumask pointer
31 *
32 * Returns >= nr_cpu_ids if no further cpus set in both.
33 */
34int cpumask_next_and(int n, const struct cpumask *src1p,
35 const struct cpumask *src2p)
36{
37 /* -1 is a legal arg here. */
38 if (n != -1)
39 cpumask_check(n);
40 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
41 nr_cpumask_bits, n + 1);
42}
43EXPORT_SYMBOL(cpumask_next_and);
44
45/**
46 * cpumask_any_but - return a "random" in a cpumask, but not this one.
47 * @mask: the cpumask to search
48 * @cpu: the cpu to ignore.
49 *
50 * Often used to find any cpu but smp_processor_id() in a mask.
51 * Returns >= nr_cpu_ids if no cpus set.
52 */
53int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
54{
55 unsigned int i;
56
57 cpumask_check(cpu);
58 for_each_cpu(i, mask)
59 if (i != cpu)
60 break;
61 return i;
62}
63EXPORT_SYMBOL(cpumask_any_but);
64
65/**
66 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
67 * @n: the cpu prior to the place to search
68 * @mask: the cpumask pointer
69 * @start: the start point of the iteration
70 * @wrap: assume @n crossing @start terminates the iteration
71 *
72 * Returns >= nr_cpu_ids on completion
73 *
74 * Note: the @wrap argument is required for the start condition when
75 * we cannot assume @start is set in @mask.
76 */
77int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
78{
79 int next;
80
81again:
82 next = cpumask_next(n, mask);
83
84 if (wrap && n < start && next >= start) {
85 return nr_cpumask_bits;
86
87 } else if (next >= nr_cpumask_bits) {
88 wrap = true;
89 n = -1;
90 goto again;
91 }
92
93 return next;
94}
95EXPORT_SYMBOL(cpumask_next_wrap);
96
97/* These are not inline because of header tangles. */
98#ifdef CONFIG_CPUMASK_OFFSTACK
99/**
100 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
101 * @mask: pointer to cpumask_var_t where the cpumask is returned
102 * @flags: GFP_ flags
103 *
104 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
105 * a nop returning a constant 1 (in <linux/cpumask.h>)
106 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
107 *
108 * In addition, mask will be NULL if this fails. Note that gcc is
109 * usually smart enough to know that mask can never be NULL if
110 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
111 * too.
112 */
113bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
114{
115 *mask = kmalloc_node(cpumask_size(), flags, node);
116
117#ifdef CONFIG_DEBUG_PER_CPU_MAPS
118 if (!*mask) {
119 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
120 dump_stack();
121 }
122#endif
123
124 return *mask != NULL;
125}
126EXPORT_SYMBOL(alloc_cpumask_var_node);
127
128bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
129{
130 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
131}
132EXPORT_SYMBOL(zalloc_cpumask_var_node);
133
134/**
135 * alloc_cpumask_var - allocate a struct cpumask
136 * @mask: pointer to cpumask_var_t where the cpumask is returned
137 * @flags: GFP_ flags
138 *
139 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
140 * a nop returning a constant 1 (in <linux/cpumask.h>).
141 *
142 * See alloc_cpumask_var_node.
143 */
144bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
145{
146 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
147}
148EXPORT_SYMBOL(alloc_cpumask_var);
149
150bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
151{
152 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
153}
154EXPORT_SYMBOL(zalloc_cpumask_var);
155
156/**
157 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
158 * @mask: pointer to cpumask_var_t where the cpumask is returned
159 *
160 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
161 * a nop (in <linux/cpumask.h>).
162 * Either returns an allocated (zero-filled) cpumask, or causes the
163 * system to panic.
164 */
165void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
166{
167 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
168 if (!*mask)
169 panic("%s: Failed to allocate %u bytes\n", __func__,
170 cpumask_size());
171}
172
173/**
174 * free_cpumask_var - frees memory allocated for a struct cpumask.
175 * @mask: cpumask to free
176 *
177 * This is safe on a NULL mask.
178 */
179void free_cpumask_var(cpumask_var_t mask)
180{
181 kfree(mask);
182}
183EXPORT_SYMBOL(free_cpumask_var);
184
185/**
186 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
187 * @mask: cpumask to free
188 */
189void __init free_bootmem_cpumask_var(cpumask_var_t mask)
190{
191 memblock_free_early(__pa(mask), cpumask_size());
192}
193#endif
194
195/**
196 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
197 * @i: index number
198 * @node: local numa_node
199 *
200 * This function selects an online CPU according to a numa aware policy;
201 * local cpus are returned first, followed by non-local ones, then it
202 * wraps around.
203 *
204 * It's not very efficient, but useful for setup.
205 */
206unsigned int cpumask_local_spread(unsigned int i, int node)
207{
208 int cpu;
209
210 /* Wrap: we always want a cpu. */
211 i %= num_online_cpus();
212
213 if (node == NUMA_NO_NODE) {
214 for_each_cpu(cpu, cpu_online_mask)
215 if (i-- == 0)
216 return cpu;
217 } else {
218 /* NUMA first. */
219 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
220 if (i-- == 0)
221 return cpu;
222
223 for_each_cpu(cpu, cpu_online_mask) {
224 /* Skip NUMA nodes, done above. */
225 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
226 continue;
227
228 if (i-- == 0)
229 return cpu;
230 }
231 }
232 BUG();
233}
234EXPORT_SYMBOL(cpumask_local_spread);
235
236static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
237
238/**
239 * Returns an arbitrary cpu within srcp1 & srcp2.
240 *
241 * Iterated calls using the same srcp1 and srcp2 will be distributed within
242 * their intersection.
243 *
244 * Returns >= nr_cpu_ids if the intersection is empty.
245 */
246int cpumask_any_and_distribute(const struct cpumask *src1p,
247 const struct cpumask *src2p)
248{
249 int next, prev;
250
251 /* NOTE: our first selection will skip 0. */
252 prev = __this_cpu_read(distribute_cpu_mask_prev);
253
254 next = cpumask_next_and(prev, src1p, src2p);
255 if (next >= nr_cpu_ids)
256 next = cpumask_first_and(src1p, src2p);
257
258 if (next < nr_cpu_ids)
259 __this_cpu_write(distribute_cpu_mask_prev, next);
260
261 return next;
262}
263EXPORT_SYMBOL(cpumask_any_and_distribute);
264
265int cpumask_any_distribute(const struct cpumask *srcp)
266{
267 int next, prev;
268
269 /* NOTE: our first selection will skip 0. */
270 prev = __this_cpu_read(distribute_cpu_mask_prev);
271
272 next = cpumask_next(prev, srcp);
273 if (next >= nr_cpu_ids)
274 next = cpumask_first(srcp);
275
276 if (next < nr_cpu_ids)
277 __this_cpu_write(distribute_cpu_mask_prev, next);
278
279 return next;
280}
281EXPORT_SYMBOL(cpumask_any_distribute);
1#include <linux/slab.h>
2#include <linux/kernel.h>
3#include <linux/bitops.h>
4#include <linux/cpumask.h>
5#include <linux/export.h>
6#include <linux/bootmem.h>
7
8int __first_cpu(const cpumask_t *srcp)
9{
10 return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
11}
12EXPORT_SYMBOL(__first_cpu);
13
14int __next_cpu(int n, const cpumask_t *srcp)
15{
16 return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
17}
18EXPORT_SYMBOL(__next_cpu);
19
20#if NR_CPUS > 64
21int __next_cpu_nr(int n, const cpumask_t *srcp)
22{
23 return min_t(int, nr_cpu_ids,
24 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
25}
26EXPORT_SYMBOL(__next_cpu_nr);
27#endif
28
29/**
30 * cpumask_next_and - get the next cpu in *src1p & *src2p
31 * @n: the cpu prior to the place to search (ie. return will be > @n)
32 * @src1p: the first cpumask pointer
33 * @src2p: the second cpumask pointer
34 *
35 * Returns >= nr_cpu_ids if no further cpus set in both.
36 */
37int cpumask_next_and(int n, const struct cpumask *src1p,
38 const struct cpumask *src2p)
39{
40 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
41 if (cpumask_test_cpu(n, src2p))
42 break;
43 return n;
44}
45EXPORT_SYMBOL(cpumask_next_and);
46
47/**
48 * cpumask_any_but - return a "random" in a cpumask, but not this one.
49 * @mask: the cpumask to search
50 * @cpu: the cpu to ignore.
51 *
52 * Often used to find any cpu but smp_processor_id() in a mask.
53 * Returns >= nr_cpu_ids if no cpus set.
54 */
55int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
56{
57 unsigned int i;
58
59 cpumask_check(cpu);
60 for_each_cpu(i, mask)
61 if (i != cpu)
62 break;
63 return i;
64}
65
66/* These are not inline because of header tangles. */
67#ifdef CONFIG_CPUMASK_OFFSTACK
68/**
69 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
70 * @mask: pointer to cpumask_var_t where the cpumask is returned
71 * @flags: GFP_ flags
72 *
73 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
74 * a nop returning a constant 1 (in <linux/cpumask.h>)
75 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
76 *
77 * In addition, mask will be NULL if this fails. Note that gcc is
78 * usually smart enough to know that mask can never be NULL if
79 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
80 * too.
81 */
82bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
83{
84 *mask = kmalloc_node(cpumask_size(), flags, node);
85
86#ifdef CONFIG_DEBUG_PER_CPU_MAPS
87 if (!*mask) {
88 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
89 dump_stack();
90 }
91#endif
92 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
93 if (*mask) {
94 unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
95 unsigned int tail;
96 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
97 memset(ptr + cpumask_size() - tail, 0, tail);
98 }
99
100 return *mask != NULL;
101}
102EXPORT_SYMBOL(alloc_cpumask_var_node);
103
104bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
105{
106 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
107}
108EXPORT_SYMBOL(zalloc_cpumask_var_node);
109
110/**
111 * alloc_cpumask_var - allocate a struct cpumask
112 * @mask: pointer to cpumask_var_t where the cpumask is returned
113 * @flags: GFP_ flags
114 *
115 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
116 * a nop returning a constant 1 (in <linux/cpumask.h>).
117 *
118 * See alloc_cpumask_var_node.
119 */
120bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
121{
122 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
123}
124EXPORT_SYMBOL(alloc_cpumask_var);
125
126bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
127{
128 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
129}
130EXPORT_SYMBOL(zalloc_cpumask_var);
131
132/**
133 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
134 * @mask: pointer to cpumask_var_t where the cpumask is returned
135 *
136 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
137 * a nop (in <linux/cpumask.h>).
138 * Either returns an allocated (zero-filled) cpumask, or causes the
139 * system to panic.
140 */
141void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
142{
143 *mask = memblock_virt_alloc(cpumask_size(), 0);
144}
145
146/**
147 * free_cpumask_var - frees memory allocated for a struct cpumask.
148 * @mask: cpumask to free
149 *
150 * This is safe on a NULL mask.
151 */
152void free_cpumask_var(cpumask_var_t mask)
153{
154 kfree(mask);
155}
156EXPORT_SYMBOL(free_cpumask_var);
157
158/**
159 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
160 * @mask: cpumask to free
161 */
162void __init free_bootmem_cpumask_var(cpumask_var_t mask)
163{
164 memblock_free_early(__pa(mask), cpumask_size());
165}
166#endif