Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/slab.h>
  3#include <linux/kernel.h>
  4#include <linux/bitops.h>
  5#include <linux/cpumask.h>
  6#include <linux/export.h>
  7#include <linux/memblock.h>
  8#include <linux/numa.h>
  9
 10/**
 11 * cpumask_next - get the next cpu in a cpumask
 12 * @n: the cpu prior to the place to search (ie. return will be > @n)
 13 * @srcp: the cpumask pointer
 14 *
 15 * Returns >= nr_cpu_ids if no further cpus set.
 16 */
 17unsigned int cpumask_next(int n, const struct cpumask *srcp)
 18{
 19	/* -1 is a legal arg here. */
 20	if (n != -1)
 21		cpumask_check(n);
 22	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
 23}
 24EXPORT_SYMBOL(cpumask_next);
 25
 26/**
 27 * cpumask_next_and - get the next cpu in *src1p & *src2p
 28 * @n: the cpu prior to the place to search (ie. return will be > @n)
 29 * @src1p: the first cpumask pointer
 30 * @src2p: the second cpumask pointer
 31 *
 32 * Returns >= nr_cpu_ids if no further cpus set in both.
 33 */
 34int cpumask_next_and(int n, const struct cpumask *src1p,
 35		     const struct cpumask *src2p)
 36{
 37	/* -1 is a legal arg here. */
 38	if (n != -1)
 39		cpumask_check(n);
 40	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
 41		nr_cpumask_bits, n + 1);
 42}
 43EXPORT_SYMBOL(cpumask_next_and);
 44
 45/**
 46 * cpumask_any_but - return a "random" in a cpumask, but not this one.
 47 * @mask: the cpumask to search
 48 * @cpu: the cpu to ignore.
 49 *
 50 * Often used to find any cpu but smp_processor_id() in a mask.
 51 * Returns >= nr_cpu_ids if no cpus set.
 52 */
 53int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
 54{
 55	unsigned int i;
 56
 57	cpumask_check(cpu);
 58	for_each_cpu(i, mask)
 59		if (i != cpu)
 60			break;
 61	return i;
 62}
 63EXPORT_SYMBOL(cpumask_any_but);
 64
 65/**
 66 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
 67 * @n: the cpu prior to the place to search
 68 * @mask: the cpumask pointer
 69 * @start: the start point of the iteration
 70 * @wrap: assume @n crossing @start terminates the iteration
 71 *
 72 * Returns >= nr_cpu_ids on completion
 73 *
 74 * Note: the @wrap argument is required for the start condition when
 75 * we cannot assume @start is set in @mask.
 76 */
 77int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
 78{
 79	int next;
 80
 81again:
 82	next = cpumask_next(n, mask);
 83
 84	if (wrap && n < start && next >= start) {
 85		return nr_cpumask_bits;
 86
 87	} else if (next >= nr_cpumask_bits) {
 88		wrap = true;
 89		n = -1;
 90		goto again;
 91	}
 92
 93	return next;
 94}
 95EXPORT_SYMBOL(cpumask_next_wrap);
 96
 97/* These are not inline because of header tangles. */
 98#ifdef CONFIG_CPUMASK_OFFSTACK
 99/**
100 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
101 * @mask: pointer to cpumask_var_t where the cpumask is returned
102 * @flags: GFP_ flags
103 *
104 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
105 * a nop returning a constant 1 (in <linux/cpumask.h>)
106 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
107 *
108 * In addition, mask will be NULL if this fails.  Note that gcc is
109 * usually smart enough to know that mask can never be NULL if
110 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
111 * too.
112 */
113bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
114{
115	*mask = kmalloc_node(cpumask_size(), flags, node);
116
117#ifdef CONFIG_DEBUG_PER_CPU_MAPS
118	if (!*mask) {
119		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
120		dump_stack();
121	}
122#endif
123
124	return *mask != NULL;
125}
126EXPORT_SYMBOL(alloc_cpumask_var_node);
127
128bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
129{
130	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
131}
132EXPORT_SYMBOL(zalloc_cpumask_var_node);
133
134/**
135 * alloc_cpumask_var - allocate a struct cpumask
136 * @mask: pointer to cpumask_var_t where the cpumask is returned
137 * @flags: GFP_ flags
138 *
139 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
140 * a nop returning a constant 1 (in <linux/cpumask.h>).
141 *
142 * See alloc_cpumask_var_node.
143 */
144bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
145{
146	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
147}
148EXPORT_SYMBOL(alloc_cpumask_var);
149
150bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
151{
152	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
153}
154EXPORT_SYMBOL(zalloc_cpumask_var);
155
156/**
157 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
158 * @mask: pointer to cpumask_var_t where the cpumask is returned
159 *
160 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
161 * a nop (in <linux/cpumask.h>).
162 * Either returns an allocated (zero-filled) cpumask, or causes the
163 * system to panic.
164 */
165void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
166{
167	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
168	if (!*mask)
169		panic("%s: Failed to allocate %u bytes\n", __func__,
170		      cpumask_size());
171}
172
173/**
174 * free_cpumask_var - frees memory allocated for a struct cpumask.
175 * @mask: cpumask to free
176 *
177 * This is safe on a NULL mask.
178 */
179void free_cpumask_var(cpumask_var_t mask)
180{
181	kfree(mask);
182}
183EXPORT_SYMBOL(free_cpumask_var);
184
185/**
186 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
187 * @mask: cpumask to free
188 */
189void __init free_bootmem_cpumask_var(cpumask_var_t mask)
190{
191	memblock_free_early(__pa(mask), cpumask_size());
192}
193#endif
194
195/**
196 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
197 * @i: index number
198 * @node: local numa_node
199 *
200 * This function selects an online CPU according to a numa aware policy;
201 * local cpus are returned first, followed by non-local ones, then it
202 * wraps around.
203 *
204 * It's not very efficient, but useful for setup.
205 */
206unsigned int cpumask_local_spread(unsigned int i, int node)
207{
208	int cpu;
209
210	/* Wrap: we always want a cpu. */
211	i %= num_online_cpus();
212
213	if (node == NUMA_NO_NODE) {
214		for_each_cpu(cpu, cpu_online_mask)
215			if (i-- == 0)
216				return cpu;
217	} else {
218		/* NUMA first. */
219		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
220			if (i-- == 0)
221				return cpu;
222
223		for_each_cpu(cpu, cpu_online_mask) {
224			/* Skip NUMA nodes, done above. */
225			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
226				continue;
227
228			if (i-- == 0)
229				return cpu;
230		}
231	}
232	BUG();
233}
234EXPORT_SYMBOL(cpumask_local_spread);
235
236static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
237
238/**
239 * Returns an arbitrary cpu within srcp1 & srcp2.
240 *
241 * Iterated calls using the same srcp1 and srcp2 will be distributed within
242 * their intersection.
243 *
244 * Returns >= nr_cpu_ids if the intersection is empty.
245 */
246int cpumask_any_and_distribute(const struct cpumask *src1p,
247			       const struct cpumask *src2p)
248{
249	int next, prev;
250
251	/* NOTE: our first selection will skip 0. */
252	prev = __this_cpu_read(distribute_cpu_mask_prev);
253
254	next = cpumask_next_and(prev, src1p, src2p);
255	if (next >= nr_cpu_ids)
256		next = cpumask_first_and(src1p, src2p);
257
258	if (next < nr_cpu_ids)
259		__this_cpu_write(distribute_cpu_mask_prev, next);
260
261	return next;
262}
263EXPORT_SYMBOL(cpumask_any_and_distribute);
264
265int cpumask_any_distribute(const struct cpumask *srcp)
266{
267	int next, prev;
268
269	/* NOTE: our first selection will skip 0. */
270	prev = __this_cpu_read(distribute_cpu_mask_prev);
271
272	next = cpumask_next(prev, srcp);
273	if (next >= nr_cpu_ids)
274		next = cpumask_first(srcp);
275
276	if (next < nr_cpu_ids)
277		__this_cpu_write(distribute_cpu_mask_prev, next);
278
279	return next;
280}
281EXPORT_SYMBOL(cpumask_any_distribute);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/slab.h>
  3#include <linux/kernel.h>
  4#include <linux/bitops.h>
  5#include <linux/cpumask.h>
  6#include <linux/export.h>
  7#include <linux/memblock.h>
  8#include <linux/numa.h>
  9
 10/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 11 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
 12 * @n: the cpu prior to the place to search
 13 * @mask: the cpumask pointer
 14 * @start: the start point of the iteration
 15 * @wrap: assume @n crossing @start terminates the iteration
 16 *
 17 * Returns >= nr_cpu_ids on completion
 18 *
 19 * Note: the @wrap argument is required for the start condition when
 20 * we cannot assume @start is set in @mask.
 21 */
 22unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
 23{
 24	unsigned int next;
 25
 26again:
 27	next = cpumask_next(n, mask);
 28
 29	if (wrap && n < start && next >= start) {
 30		return nr_cpumask_bits;
 31
 32	} else if (next >= nr_cpumask_bits) {
 33		wrap = true;
 34		n = -1;
 35		goto again;
 36	}
 37
 38	return next;
 39}
 40EXPORT_SYMBOL(cpumask_next_wrap);
 41
 42/* These are not inline because of header tangles. */
 43#ifdef CONFIG_CPUMASK_OFFSTACK
 44/**
 45 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
 46 * @mask: pointer to cpumask_var_t where the cpumask is returned
 47 * @flags: GFP_ flags
 48 *
 49 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 50 * a nop returning a constant 1 (in <linux/cpumask.h>)
 51 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
 52 *
 53 * In addition, mask will be NULL if this fails.  Note that gcc is
 54 * usually smart enough to know that mask can never be NULL if
 55 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
 56 * too.
 57 */
 58bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
 59{
 60	*mask = kmalloc_node(cpumask_size(), flags, node);
 61
 62#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 63	if (!*mask) {
 64		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
 65		dump_stack();
 66	}
 67#endif
 68
 69	return *mask != NULL;
 70}
 71EXPORT_SYMBOL(alloc_cpumask_var_node);
 72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73/**
 74 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
 75 * @mask: pointer to cpumask_var_t where the cpumask is returned
 76 *
 77 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 78 * a nop (in <linux/cpumask.h>).
 79 * Either returns an allocated (zero-filled) cpumask, or causes the
 80 * system to panic.
 81 */
 82void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
 83{
 84	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
 85	if (!*mask)
 86		panic("%s: Failed to allocate %u bytes\n", __func__,
 87		      cpumask_size());
 88}
 89
 90/**
 91 * free_cpumask_var - frees memory allocated for a struct cpumask.
 92 * @mask: cpumask to free
 93 *
 94 * This is safe on a NULL mask.
 95 */
 96void free_cpumask_var(cpumask_var_t mask)
 97{
 98	kfree(mask);
 99}
100EXPORT_SYMBOL(free_cpumask_var);
101
102/**
103 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
104 * @mask: cpumask to free
105 */
106void __init free_bootmem_cpumask_var(cpumask_var_t mask)
107{
108	memblock_free(mask, cpumask_size());
109}
110#endif
111
112/**
113 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
114 * @i: index number
115 * @node: local numa_node
116 *
117 * This function selects an online CPU according to a numa aware policy;
118 * local cpus are returned first, followed by non-local ones, then it
119 * wraps around.
120 *
121 * It's not very efficient, but useful for setup.
122 */
123unsigned int cpumask_local_spread(unsigned int i, int node)
124{
125	unsigned int cpu;
126
127	/* Wrap: we always want a cpu. */
128	i %= num_online_cpus();
129
130	if (node == NUMA_NO_NODE) {
131		cpu = cpumask_nth(i, cpu_online_mask);
132		if (cpu < nr_cpu_ids)
133			return cpu;
134	} else {
135		/* NUMA first. */
136		cpu = cpumask_nth_and(i, cpu_online_mask, cpumask_of_node(node));
137		if (cpu < nr_cpu_ids)
138			return cpu;
139
140		i -= cpumask_weight_and(cpu_online_mask, cpumask_of_node(node));
141
142		/* Skip NUMA nodes, done above. */
143		cpu = cpumask_nth_andnot(i, cpu_online_mask, cpumask_of_node(node));
144		if (cpu < nr_cpu_ids)
145			return cpu;
 
 
146	}
147	BUG();
148}
149EXPORT_SYMBOL(cpumask_local_spread);
150
151static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
152
153/**
154 * Returns an arbitrary cpu within srcp1 & srcp2.
155 *
156 * Iterated calls using the same srcp1 and srcp2 will be distributed within
157 * their intersection.
158 *
159 * Returns >= nr_cpu_ids if the intersection is empty.
160 */
161unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
162			       const struct cpumask *src2p)
163{
164	unsigned int next, prev;
165
166	/* NOTE: our first selection will skip 0. */
167	prev = __this_cpu_read(distribute_cpu_mask_prev);
168
169	next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
170					nr_cpumask_bits, prev + 1);
 
 
171	if (next < nr_cpu_ids)
172		__this_cpu_write(distribute_cpu_mask_prev, next);
173
174	return next;
175}
176EXPORT_SYMBOL(cpumask_any_and_distribute);
177
178unsigned int cpumask_any_distribute(const struct cpumask *srcp)
179{
180	unsigned int next, prev;
181
182	/* NOTE: our first selection will skip 0. */
183	prev = __this_cpu_read(distribute_cpu_mask_prev);
184	next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
 
 
 
 
185	if (next < nr_cpu_ids)
186		__this_cpu_write(distribute_cpu_mask_prev, next);
187
188	return next;
189}
190EXPORT_SYMBOL(cpumask_any_distribute);