Loading...
1#include <linux/slab.h>
2#include <linux/kernel.h>
3#include <linux/bitops.h>
4#include <linux/cpumask.h>
5#include <linux/export.h>
6#include <linux/bootmem.h>
7
8/**
9 * cpumask_next_and - get the next cpu in *src1p & *src2p
10 * @n: the cpu prior to the place to search (ie. return will be > @n)
11 * @src1p: the first cpumask pointer
12 * @src2p: the second cpumask pointer
13 *
14 * Returns >= nr_cpu_ids if no further cpus set in both.
15 */
16int cpumask_next_and(int n, const struct cpumask *src1p,
17 const struct cpumask *src2p)
18{
19 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
20 if (cpumask_test_cpu(n, src2p))
21 break;
22 return n;
23}
24EXPORT_SYMBOL(cpumask_next_and);
25
26/**
27 * cpumask_any_but - return a "random" in a cpumask, but not this one.
28 * @mask: the cpumask to search
29 * @cpu: the cpu to ignore.
30 *
31 * Often used to find any cpu but smp_processor_id() in a mask.
32 * Returns >= nr_cpu_ids if no cpus set.
33 */
34int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
35{
36 unsigned int i;
37
38 cpumask_check(cpu);
39 for_each_cpu(i, mask)
40 if (i != cpu)
41 break;
42 return i;
43}
44EXPORT_SYMBOL(cpumask_any_but);
45
46/* These are not inline because of header tangles. */
47#ifdef CONFIG_CPUMASK_OFFSTACK
48/**
49 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
50 * @mask: pointer to cpumask_var_t where the cpumask is returned
51 * @flags: GFP_ flags
52 *
53 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
54 * a nop returning a constant 1 (in <linux/cpumask.h>)
55 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
56 *
57 * In addition, mask will be NULL if this fails. Note that gcc is
58 * usually smart enough to know that mask can never be NULL if
59 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
60 * too.
61 */
62bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
63{
64 *mask = kmalloc_node(cpumask_size(), flags, node);
65
66#ifdef CONFIG_DEBUG_PER_CPU_MAPS
67 if (!*mask) {
68 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
69 dump_stack();
70 }
71#endif
72
73 return *mask != NULL;
74}
75EXPORT_SYMBOL(alloc_cpumask_var_node);
76
77bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
78{
79 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
80}
81EXPORT_SYMBOL(zalloc_cpumask_var_node);
82
83/**
84 * alloc_cpumask_var - allocate a struct cpumask
85 * @mask: pointer to cpumask_var_t where the cpumask is returned
86 * @flags: GFP_ flags
87 *
88 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
89 * a nop returning a constant 1 (in <linux/cpumask.h>).
90 *
91 * See alloc_cpumask_var_node.
92 */
93bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
94{
95 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
96}
97EXPORT_SYMBOL(alloc_cpumask_var);
98
99bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
100{
101 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
102}
103EXPORT_SYMBOL(zalloc_cpumask_var);
104
105/**
106 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
107 * @mask: pointer to cpumask_var_t where the cpumask is returned
108 *
109 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
110 * a nop (in <linux/cpumask.h>).
111 * Either returns an allocated (zero-filled) cpumask, or causes the
112 * system to panic.
113 */
114void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
115{
116 *mask = memblock_virt_alloc(cpumask_size(), 0);
117}
118
119/**
120 * free_cpumask_var - frees memory allocated for a struct cpumask.
121 * @mask: cpumask to free
122 *
123 * This is safe on a NULL mask.
124 */
125void free_cpumask_var(cpumask_var_t mask)
126{
127 kfree(mask);
128}
129EXPORT_SYMBOL(free_cpumask_var);
130
131/**
132 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
133 * @mask: cpumask to free
134 */
135void __init free_bootmem_cpumask_var(cpumask_var_t mask)
136{
137 memblock_free_early(__pa(mask), cpumask_size());
138}
139#endif
140
141/**
142 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
143 * @i: index number
144 * @node: local numa_node
145 *
146 * This function selects an online CPU according to a numa aware policy;
147 * local cpus are returned first, followed by non-local ones, then it
148 * wraps around.
149 *
150 * It's not very efficient, but useful for setup.
151 */
152unsigned int cpumask_local_spread(unsigned int i, int node)
153{
154 int cpu;
155
156 /* Wrap: we always want a cpu. */
157 i %= num_online_cpus();
158
159 if (node == -1) {
160 for_each_cpu(cpu, cpu_online_mask)
161 if (i-- == 0)
162 return cpu;
163 } else {
164 /* NUMA first. */
165 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
166 if (i-- == 0)
167 return cpu;
168
169 for_each_cpu(cpu, cpu_online_mask) {
170 /* Skip NUMA nodes, done above. */
171 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
172 continue;
173
174 if (i-- == 0)
175 return cpu;
176 }
177 }
178 BUG();
179}
180EXPORT_SYMBOL(cpumask_local_spread);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/slab.h>
3#include <linux/kernel.h>
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/numa.h>
9#include <linux/sched/isolation.h>
10
11/**
12 * cpumask_next - get the next cpu in a cpumask
13 * @n: the cpu prior to the place to search (ie. return will be > @n)
14 * @srcp: the cpumask pointer
15 *
16 * Returns >= nr_cpu_ids if no further cpus set.
17 */
18unsigned int cpumask_next(int n, const struct cpumask *srcp)
19{
20 /* -1 is a legal arg here. */
21 if (n != -1)
22 cpumask_check(n);
23 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
24}
25EXPORT_SYMBOL(cpumask_next);
26
27/**
28 * cpumask_next_and - get the next cpu in *src1p & *src2p
29 * @n: the cpu prior to the place to search (ie. return will be > @n)
30 * @src1p: the first cpumask pointer
31 * @src2p: the second cpumask pointer
32 *
33 * Returns >= nr_cpu_ids if no further cpus set in both.
34 */
35int cpumask_next_and(int n, const struct cpumask *src1p,
36 const struct cpumask *src2p)
37{
38 /* -1 is a legal arg here. */
39 if (n != -1)
40 cpumask_check(n);
41 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
42 nr_cpumask_bits, n + 1);
43}
44EXPORT_SYMBOL(cpumask_next_and);
45
46/**
47 * cpumask_any_but - return a "random" in a cpumask, but not this one.
48 * @mask: the cpumask to search
49 * @cpu: the cpu to ignore.
50 *
51 * Often used to find any cpu but smp_processor_id() in a mask.
52 * Returns >= nr_cpu_ids if no cpus set.
53 */
54int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
55{
56 unsigned int i;
57
58 cpumask_check(cpu);
59 for_each_cpu(i, mask)
60 if (i != cpu)
61 break;
62 return i;
63}
64EXPORT_SYMBOL(cpumask_any_but);
65
66/**
67 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
68 * @n: the cpu prior to the place to search
69 * @mask: the cpumask pointer
70 * @start: the start point of the iteration
71 * @wrap: assume @n crossing @start terminates the iteration
72 *
73 * Returns >= nr_cpu_ids on completion
74 *
75 * Note: the @wrap argument is required for the start condition when
76 * we cannot assume @start is set in @mask.
77 */
78int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
79{
80 int next;
81
82again:
83 next = cpumask_next(n, mask);
84
85 if (wrap && n < start && next >= start) {
86 return nr_cpumask_bits;
87
88 } else if (next >= nr_cpumask_bits) {
89 wrap = true;
90 n = -1;
91 goto again;
92 }
93
94 return next;
95}
96EXPORT_SYMBOL(cpumask_next_wrap);
97
98/* These are not inline because of header tangles. */
99#ifdef CONFIG_CPUMASK_OFFSTACK
100/**
101 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
102 * @mask: pointer to cpumask_var_t where the cpumask is returned
103 * @flags: GFP_ flags
104 *
105 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
106 * a nop returning a constant 1 (in <linux/cpumask.h>)
107 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
108 *
109 * In addition, mask will be NULL if this fails. Note that gcc is
110 * usually smart enough to know that mask can never be NULL if
111 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
112 * too.
113 */
114bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
115{
116 *mask = kmalloc_node(cpumask_size(), flags, node);
117
118#ifdef CONFIG_DEBUG_PER_CPU_MAPS
119 if (!*mask) {
120 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
121 dump_stack();
122 }
123#endif
124
125 return *mask != NULL;
126}
127EXPORT_SYMBOL(alloc_cpumask_var_node);
128
129bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
130{
131 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
132}
133EXPORT_SYMBOL(zalloc_cpumask_var_node);
134
135/**
136 * alloc_cpumask_var - allocate a struct cpumask
137 * @mask: pointer to cpumask_var_t where the cpumask is returned
138 * @flags: GFP_ flags
139 *
140 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
141 * a nop returning a constant 1 (in <linux/cpumask.h>).
142 *
143 * See alloc_cpumask_var_node.
144 */
145bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
146{
147 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
148}
149EXPORT_SYMBOL(alloc_cpumask_var);
150
151bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
152{
153 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
154}
155EXPORT_SYMBOL(zalloc_cpumask_var);
156
157/**
158 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
159 * @mask: pointer to cpumask_var_t where the cpumask is returned
160 *
161 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
162 * a nop (in <linux/cpumask.h>).
163 * Either returns an allocated (zero-filled) cpumask, or causes the
164 * system to panic.
165 */
166void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
167{
168 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
169 if (!*mask)
170 panic("%s: Failed to allocate %u bytes\n", __func__,
171 cpumask_size());
172}
173
174/**
175 * free_cpumask_var - frees memory allocated for a struct cpumask.
176 * @mask: cpumask to free
177 *
178 * This is safe on a NULL mask.
179 */
180void free_cpumask_var(cpumask_var_t mask)
181{
182 kfree(mask);
183}
184EXPORT_SYMBOL(free_cpumask_var);
185
186/**
187 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
188 * @mask: cpumask to free
189 */
190void __init free_bootmem_cpumask_var(cpumask_var_t mask)
191{
192 memblock_free_early(__pa(mask), cpumask_size());
193}
194#endif
195
196/**
197 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
198 * @i: index number
199 * @node: local numa_node
200 *
201 * This function selects an online CPU according to a numa aware policy;
202 * local cpus are returned first, followed by non-local ones, then it
203 * wraps around.
204 *
205 * It's not very efficient, but useful for setup.
206 */
207unsigned int cpumask_local_spread(unsigned int i, int node)
208{
209 int cpu, hk_flags;
210 const struct cpumask *mask;
211
212 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
213 mask = housekeeping_cpumask(hk_flags);
214 /* Wrap: we always want a cpu. */
215 i %= cpumask_weight(mask);
216
217 if (node == NUMA_NO_NODE) {
218 for_each_cpu(cpu, mask) {
219 if (i-- == 0)
220 return cpu;
221 }
222 } else {
223 /* NUMA first. */
224 for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
225 if (i-- == 0)
226 return cpu;
227 }
228
229 for_each_cpu(cpu, mask) {
230 /* Skip NUMA nodes, done above. */
231 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
232 continue;
233
234 if (i-- == 0)
235 return cpu;
236 }
237 }
238 BUG();
239}
240EXPORT_SYMBOL(cpumask_local_spread);
241
242static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
243
244/**
245 * Returns an arbitrary cpu within srcp1 & srcp2.
246 *
247 * Iterated calls using the same srcp1 and srcp2 will be distributed within
248 * their intersection.
249 *
250 * Returns >= nr_cpu_ids if the intersection is empty.
251 */
252int cpumask_any_and_distribute(const struct cpumask *src1p,
253 const struct cpumask *src2p)
254{
255 int next, prev;
256
257 /* NOTE: our first selection will skip 0. */
258 prev = __this_cpu_read(distribute_cpu_mask_prev);
259
260 next = cpumask_next_and(prev, src1p, src2p);
261 if (next >= nr_cpu_ids)
262 next = cpumask_first_and(src1p, src2p);
263
264 if (next < nr_cpu_ids)
265 __this_cpu_write(distribute_cpu_mask_prev, next);
266
267 return next;
268}
269EXPORT_SYMBOL(cpumask_any_and_distribute);