Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * lib/bitmap.c
  4 * Helper functions for bitmap.h.
  5 */
  6
  7#include <linux/bitmap.h>
  8#include <linux/bitops.h>
  9#include <linux/ctype.h>
 10#include <linux/device.h>
 11#include <linux/export.h>
 12#include <linux/slab.h>
 13
 14/**
 15 * DOC: bitmap introduction
 16 *
 17 * bitmaps provide an array of bits, implemented using an
 18 * array of unsigned longs.  The number of valid bits in a
 19 * given bitmap does _not_ need to be an exact multiple of
 20 * BITS_PER_LONG.
 21 *
 22 * The possible unused bits in the last, partially used word
 23 * of a bitmap are 'don't care'.  The implementation makes
 24 * no particular effort to keep them zero.  It ensures that
 25 * their value will not affect the results of any operation.
 26 * The bitmap operations that return Boolean (bitmap_empty,
 27 * for example) or scalar (bitmap_weight, for example) results
 28 * carefully filter out these unused bits from impacting their
 29 * results.
 30 *
 31 * The byte ordering of bitmaps is more natural on little
 32 * endian architectures.  See the big-endian headers
 33 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
 34 * for the best explanations of this ordering.
 35 */
 36
 37bool __bitmap_equal(const unsigned long *bitmap1,
 38		    const unsigned long *bitmap2, unsigned int bits)
 39{
 40	unsigned int k, lim = bits/BITS_PER_LONG;
 41	for (k = 0; k < lim; ++k)
 42		if (bitmap1[k] != bitmap2[k])
 43			return false;
 44
 45	if (bits % BITS_PER_LONG)
 46		if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
 47			return false;
 48
 49	return true;
 50}
 51EXPORT_SYMBOL(__bitmap_equal);
 52
 53bool __bitmap_or_equal(const unsigned long *bitmap1,
 54		       const unsigned long *bitmap2,
 55		       const unsigned long *bitmap3,
 56		       unsigned int bits)
 57{
 58	unsigned int k, lim = bits / BITS_PER_LONG;
 59	unsigned long tmp;
 60
 61	for (k = 0; k < lim; ++k) {
 62		if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
 63			return false;
 64	}
 65
 66	if (!(bits % BITS_PER_LONG))
 67		return true;
 68
 69	tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
 70	return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
 71}
 72
 73void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
 74{
 75	unsigned int k, lim = BITS_TO_LONGS(bits);
 76	for (k = 0; k < lim; ++k)
 77		dst[k] = ~src[k];
 78}
 79EXPORT_SYMBOL(__bitmap_complement);
 80
 81/**
 82 * __bitmap_shift_right - logical right shift of the bits in a bitmap
 83 *   @dst : destination bitmap
 84 *   @src : source bitmap
 85 *   @shift : shift by this many bits
 86 *   @nbits : bitmap size, in bits
 87 *
 88 * Shifting right (dividing) means moving bits in the MS -> LS bit
 89 * direction.  Zeros are fed into the vacated MS positions and the
 90 * LS bits shifted off the bottom are lost.
 91 */
 92void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
 93			unsigned shift, unsigned nbits)
 94{
 95	unsigned k, lim = BITS_TO_LONGS(nbits);
 96	unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
 97	unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
 98	for (k = 0; off + k < lim; ++k) {
 99		unsigned long upper, lower;
100
101		/*
102		 * If shift is not word aligned, take lower rem bits of
103		 * word above and make them the top rem bits of result.
104		 */
105		if (!rem || off + k + 1 >= lim)
106			upper = 0;
107		else {
108			upper = src[off + k + 1];
109			if (off + k + 1 == lim - 1)
110				upper &= mask;
111			upper <<= (BITS_PER_LONG - rem);
112		}
113		lower = src[off + k];
114		if (off + k == lim - 1)
115			lower &= mask;
116		lower >>= rem;
117		dst[k] = lower | upper;
118	}
119	if (off)
120		memset(&dst[lim - off], 0, off*sizeof(unsigned long));
121}
122EXPORT_SYMBOL(__bitmap_shift_right);
123
124
125/**
126 * __bitmap_shift_left - logical left shift of the bits in a bitmap
127 *   @dst : destination bitmap
128 *   @src : source bitmap
129 *   @shift : shift by this many bits
130 *   @nbits : bitmap size, in bits
131 *
132 * Shifting left (multiplying) means moving bits in the LS -> MS
133 * direction.  Zeros are fed into the vacated LS bit positions
134 * and those MS bits shifted off the top are lost.
135 */
136
137void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
138			unsigned int shift, unsigned int nbits)
139{
140	int k;
141	unsigned int lim = BITS_TO_LONGS(nbits);
142	unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
143	for (k = lim - off - 1; k >= 0; --k) {
144		unsigned long upper, lower;
145
146		/*
147		 * If shift is not word aligned, take upper rem bits of
148		 * word below and make them the bottom rem bits of result.
149		 */
150		if (rem && k > 0)
151			lower = src[k - 1] >> (BITS_PER_LONG - rem);
152		else
153			lower = 0;
154		upper = src[k] << rem;
155		dst[k + off] = lower | upper;
156	}
157	if (off)
158		memset(dst, 0, off*sizeof(unsigned long));
159}
160EXPORT_SYMBOL(__bitmap_shift_left);
161
162/**
163 * bitmap_cut() - remove bit region from bitmap and right shift remaining bits
164 * @dst: destination bitmap, might overlap with src
165 * @src: source bitmap
166 * @first: start bit of region to be removed
167 * @cut: number of bits to remove
168 * @nbits: bitmap size, in bits
169 *
170 * Set the n-th bit of @dst iff the n-th bit of @src is set and
171 * n is less than @first, or the m-th bit of @src is set for any
172 * m such that @first <= n < nbits, and m = n + @cut.
173 *
174 * In pictures, example for a big-endian 32-bit architecture:
175 *
176 * The @src bitmap is::
177 *
178 *   31                                   63
179 *   |                                    |
180 *   10000000 11000001 11110010 00010101  10000000 11000001 01110010 00010101
181 *                   |  |              |                                    |
182 *                  16  14             0                                   32
183 *
184 * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
185 *
186 *   31                                   63
187 *   |                                    |
188 *   10110000 00011000 00110010 00010101  00010000 00011000 00101110 01000010
189 *                      |              |                                    |
190 *                      14 (bit 17     0                                   32
191 *                          from @src)
192 *
193 * Note that @dst and @src might overlap partially or entirely.
194 *
195 * This is implemented in the obvious way, with a shift and carry
196 * step for each moved bit. Optimisation is left as an exercise
197 * for the compiler.
198 */
199void bitmap_cut(unsigned long *dst, const unsigned long *src,
200		unsigned int first, unsigned int cut, unsigned int nbits)
201{
202	unsigned int len = BITS_TO_LONGS(nbits);
203	unsigned long keep = 0, carry;
204	int i;
205
206	if (first % BITS_PER_LONG) {
207		keep = src[first / BITS_PER_LONG] &
208		       (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
209	}
210
211	memmove(dst, src, len * sizeof(*dst));
212
213	while (cut--) {
214		for (i = first / BITS_PER_LONG; i < len; i++) {
215			if (i < len - 1)
216				carry = dst[i + 1] & 1UL;
217			else
218				carry = 0;
219
220			dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1));
221		}
222	}
223
224	dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG);
225	dst[first / BITS_PER_LONG] |= keep;
226}
227EXPORT_SYMBOL(bitmap_cut);
228
229bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
230				const unsigned long *bitmap2, unsigned int bits)
231{
232	unsigned int k;
233	unsigned int lim = bits/BITS_PER_LONG;
234	unsigned long result = 0;
235
236	for (k = 0; k < lim; k++)
237		result |= (dst[k] = bitmap1[k] & bitmap2[k]);
238	if (bits % BITS_PER_LONG)
239		result |= (dst[k] = bitmap1[k] & bitmap2[k] &
240			   BITMAP_LAST_WORD_MASK(bits));
241	return result != 0;
242}
243EXPORT_SYMBOL(__bitmap_and);
244
245void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
246				const unsigned long *bitmap2, unsigned int bits)
247{
248	unsigned int k;
249	unsigned int nr = BITS_TO_LONGS(bits);
250
251	for (k = 0; k < nr; k++)
252		dst[k] = bitmap1[k] | bitmap2[k];
253}
254EXPORT_SYMBOL(__bitmap_or);
255
256void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
257				const unsigned long *bitmap2, unsigned int bits)
258{
259	unsigned int k;
260	unsigned int nr = BITS_TO_LONGS(bits);
261
262	for (k = 0; k < nr; k++)
263		dst[k] = bitmap1[k] ^ bitmap2[k];
264}
265EXPORT_SYMBOL(__bitmap_xor);
266
267bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
268				const unsigned long *bitmap2, unsigned int bits)
269{
270	unsigned int k;
271	unsigned int lim = bits/BITS_PER_LONG;
272	unsigned long result = 0;
273
274	for (k = 0; k < lim; k++)
275		result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
276	if (bits % BITS_PER_LONG)
277		result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
278			   BITMAP_LAST_WORD_MASK(bits));
279	return result != 0;
280}
281EXPORT_SYMBOL(__bitmap_andnot);
282
283void __bitmap_replace(unsigned long *dst,
284		      const unsigned long *old, const unsigned long *new,
285		      const unsigned long *mask, unsigned int nbits)
286{
287	unsigned int k;
288	unsigned int nr = BITS_TO_LONGS(nbits);
289
290	for (k = 0; k < nr; k++)
291		dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
292}
293EXPORT_SYMBOL(__bitmap_replace);
294
295bool __bitmap_intersects(const unsigned long *bitmap1,
296			 const unsigned long *bitmap2, unsigned int bits)
297{
298	unsigned int k, lim = bits/BITS_PER_LONG;
299	for (k = 0; k < lim; ++k)
300		if (bitmap1[k] & bitmap2[k])
301			return true;
302
303	if (bits % BITS_PER_LONG)
304		if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
305			return true;
306	return false;
307}
308EXPORT_SYMBOL(__bitmap_intersects);
309
310bool __bitmap_subset(const unsigned long *bitmap1,
311		     const unsigned long *bitmap2, unsigned int bits)
312{
313	unsigned int k, lim = bits/BITS_PER_LONG;
314	for (k = 0; k < lim; ++k)
315		if (bitmap1[k] & ~bitmap2[k])
316			return false;
317
318	if (bits % BITS_PER_LONG)
319		if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
320			return false;
321	return true;
322}
323EXPORT_SYMBOL(__bitmap_subset);
324
325#define BITMAP_WEIGHT(FETCH, bits)	\
326({										\
327	unsigned int __bits = (bits), idx, w = 0;				\
328										\
329	for (idx = 0; idx < __bits / BITS_PER_LONG; idx++)			\
330		w += hweight_long(FETCH);					\
331										\
332	if (__bits % BITS_PER_LONG)						\
333		w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits));	\
334										\
335	w;									\
336})
337
338unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
339{
340	return BITMAP_WEIGHT(bitmap[idx], bits);
341}
342EXPORT_SYMBOL(__bitmap_weight);
343
344unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
345				const unsigned long *bitmap2, unsigned int bits)
346{
347	return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
348}
349EXPORT_SYMBOL(__bitmap_weight_and);
350
 
 
 
 
 
 
 
351void __bitmap_set(unsigned long *map, unsigned int start, int len)
352{
353	unsigned long *p = map + BIT_WORD(start);
354	const unsigned int size = start + len;
355	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
356	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
357
358	while (len - bits_to_set >= 0) {
359		*p |= mask_to_set;
360		len -= bits_to_set;
361		bits_to_set = BITS_PER_LONG;
362		mask_to_set = ~0UL;
363		p++;
364	}
365	if (len) {
366		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
367		*p |= mask_to_set;
368	}
369}
370EXPORT_SYMBOL(__bitmap_set);
371
372void __bitmap_clear(unsigned long *map, unsigned int start, int len)
373{
374	unsigned long *p = map + BIT_WORD(start);
375	const unsigned int size = start + len;
376	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
377	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
378
379	while (len - bits_to_clear >= 0) {
380		*p &= ~mask_to_clear;
381		len -= bits_to_clear;
382		bits_to_clear = BITS_PER_LONG;
383		mask_to_clear = ~0UL;
384		p++;
385	}
386	if (len) {
387		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
388		*p &= ~mask_to_clear;
389	}
390}
391EXPORT_SYMBOL(__bitmap_clear);
392
393/**
394 * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
395 * @map: The address to base the search on
396 * @size: The bitmap size in bits
397 * @start: The bitnumber to start searching at
398 * @nr: The number of zeroed bits we're looking for
399 * @align_mask: Alignment mask for zero area
400 * @align_offset: Alignment offset for zero area.
401 *
402 * The @align_mask should be one less than a power of 2; the effect is that
403 * the bit offset of all zero areas this function finds plus @align_offset
404 * is multiple of that power of 2.
405 */
406unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
407					     unsigned long size,
408					     unsigned long start,
409					     unsigned int nr,
410					     unsigned long align_mask,
411					     unsigned long align_offset)
412{
413	unsigned long index, end, i;
414again:
415	index = find_next_zero_bit(map, size, start);
416
417	/* Align allocation */
418	index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
419
420	end = index + nr;
421	if (end > size)
422		return end;
423	i = find_next_bit(map, end, index);
424	if (i < end) {
425		start = i + 1;
426		goto again;
427	}
428	return index;
429}
430EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
431
432/**
433 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
434 *	@buf: pointer to a bitmap
435 *	@pos: a bit position in @buf (0 <= @pos < @nbits)
436 *	@nbits: number of valid bit positions in @buf
437 *
438 * Map the bit at position @pos in @buf (of length @nbits) to the
439 * ordinal of which set bit it is.  If it is not set or if @pos
440 * is not a valid bit position, map to -1.
441 *
442 * If for example, just bits 4 through 7 are set in @buf, then @pos
443 * values 4 through 7 will get mapped to 0 through 3, respectively,
444 * and other @pos values will get mapped to -1.  When @pos value 7
445 * gets mapped to (returns) @ord value 3 in this example, that means
446 * that bit 7 is the 3rd (starting with 0th) set bit in @buf.
447 *
448 * The bit positions 0 through @bits are valid positions in @buf.
449 */
450static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
451{
452	if (pos >= nbits || !test_bit(pos, buf))
453		return -1;
454
455	return bitmap_weight(buf, pos);
456}
457
458/**
459 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
460 *	@dst: remapped result
461 *	@src: subset to be remapped
462 *	@old: defines domain of map
463 *	@new: defines range of map
464 *	@nbits: number of bits in each of these bitmaps
465 *
466 * Let @old and @new define a mapping of bit positions, such that
467 * whatever position is held by the n-th set bit in @old is mapped
468 * to the n-th set bit in @new.  In the more general case, allowing
469 * for the possibility that the weight 'w' of @new is less than the
470 * weight of @old, map the position of the n-th set bit in @old to
471 * the position of the m-th set bit in @new, where m == n % w.
472 *
473 * If either of the @old and @new bitmaps are empty, or if @src and
474 * @dst point to the same location, then this routine copies @src
475 * to @dst.
476 *
477 * The positions of unset bits in @old are mapped to themselves
478 * (the identity map).
479 *
480 * Apply the above specified mapping to @src, placing the result in
481 * @dst, clearing any bits previously set in @dst.
482 *
483 * For example, lets say that @old has bits 4 through 7 set, and
484 * @new has bits 12 through 15 set.  This defines the mapping of bit
485 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
486 * bit positions unchanged.  So if say @src comes into this routine
487 * with bits 1, 5 and 7 set, then @dst should leave with bits 1,
488 * 13 and 15 set.
489 */
490void bitmap_remap(unsigned long *dst, const unsigned long *src,
491		const unsigned long *old, const unsigned long *new,
492		unsigned int nbits)
493{
494	unsigned int oldbit, w;
495
496	if (dst == src)		/* following doesn't handle inplace remaps */
497		return;
498	bitmap_zero(dst, nbits);
499
500	w = bitmap_weight(new, nbits);
501	for_each_set_bit(oldbit, src, nbits) {
502		int n = bitmap_pos_to_ord(old, oldbit, nbits);
503
504		if (n < 0 || w == 0)
505			set_bit(oldbit, dst);	/* identity map */
506		else
507			set_bit(find_nth_bit(new, nbits, n % w), dst);
508	}
509}
510EXPORT_SYMBOL(bitmap_remap);
511
512/**
513 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
514 *	@oldbit: bit position to be mapped
515 *	@old: defines domain of map
516 *	@new: defines range of map
517 *	@bits: number of bits in each of these bitmaps
518 *
519 * Let @old and @new define a mapping of bit positions, such that
520 * whatever position is held by the n-th set bit in @old is mapped
521 * to the n-th set bit in @new.  In the more general case, allowing
522 * for the possibility that the weight 'w' of @new is less than the
523 * weight of @old, map the position of the n-th set bit in @old to
524 * the position of the m-th set bit in @new, where m == n % w.
525 *
526 * The positions of unset bits in @old are mapped to themselves
527 * (the identity map).
528 *
529 * Apply the above specified mapping to bit position @oldbit, returning
530 * the new bit position.
531 *
532 * For example, lets say that @old has bits 4 through 7 set, and
533 * @new has bits 12 through 15 set.  This defines the mapping of bit
534 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
535 * bit positions unchanged.  So if say @oldbit is 5, then this routine
536 * returns 13.
537 */
538int bitmap_bitremap(int oldbit, const unsigned long *old,
539				const unsigned long *new, int bits)
540{
541	int w = bitmap_weight(new, bits);
542	int n = bitmap_pos_to_ord(old, oldbit, bits);
543	if (n < 0 || w == 0)
544		return oldbit;
545	else
546		return find_nth_bit(new, bits, n % w);
547}
548EXPORT_SYMBOL(bitmap_bitremap);
549
550#ifdef CONFIG_NUMA
551/**
552 * bitmap_onto - translate one bitmap relative to another
553 *	@dst: resulting translated bitmap
554 * 	@orig: original untranslated bitmap
555 * 	@relmap: bitmap relative to which translated
556 *	@bits: number of bits in each of these bitmaps
557 *
558 * Set the n-th bit of @dst iff there exists some m such that the
559 * n-th bit of @relmap is set, the m-th bit of @orig is set, and
560 * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
561 * (If you understood the previous sentence the first time your
562 * read it, you're overqualified for your current job.)
563 *
564 * In other words, @orig is mapped onto (surjectively) @dst,
565 * using the map { <n, m> | the n-th bit of @relmap is the
566 * m-th set bit of @relmap }.
567 *
568 * Any set bits in @orig above bit number W, where W is the
569 * weight of (number of set bits in) @relmap are mapped nowhere.
570 * In particular, if for all bits m set in @orig, m >= W, then
571 * @dst will end up empty.  In situations where the possibility
572 * of such an empty result is not desired, one way to avoid it is
573 * to use the bitmap_fold() operator, below, to first fold the
574 * @orig bitmap over itself so that all its set bits x are in the
575 * range 0 <= x < W.  The bitmap_fold() operator does this by
576 * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
577 *
578 * Example [1] for bitmap_onto():
579 *  Let's say @relmap has bits 30-39 set, and @orig has bits
580 *  1, 3, 5, 7, 9 and 11 set.  Then on return from this routine,
581 *  @dst will have bits 31, 33, 35, 37 and 39 set.
582 *
583 *  When bit 0 is set in @orig, it means turn on the bit in
584 *  @dst corresponding to whatever is the first bit (if any)
585 *  that is turned on in @relmap.  Since bit 0 was off in the
586 *  above example, we leave off that bit (bit 30) in @dst.
587 *
588 *  When bit 1 is set in @orig (as in the above example), it
589 *  means turn on the bit in @dst corresponding to whatever
590 *  is the second bit that is turned on in @relmap.  The second
591 *  bit in @relmap that was turned on in the above example was
592 *  bit 31, so we turned on bit 31 in @dst.
593 *
594 *  Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
595 *  because they were the 4th, 6th, 8th and 10th set bits
596 *  set in @relmap, and the 4th, 6th, 8th and 10th bits of
597 *  @orig (i.e. bits 3, 5, 7 and 9) were also set.
598 *
599 *  When bit 11 is set in @orig, it means turn on the bit in
600 *  @dst corresponding to whatever is the twelfth bit that is
601 *  turned on in @relmap.  In the above example, there were
602 *  only ten bits turned on in @relmap (30..39), so that bit
603 *  11 was set in @orig had no affect on @dst.
604 *
605 * Example [2] for bitmap_fold() + bitmap_onto():
606 *  Let's say @relmap has these ten bits set::
607 *
608 *		40 41 42 43 45 48 53 61 74 95
609 *
610 *  (for the curious, that's 40 plus the first ten terms of the
611 *  Fibonacci sequence.)
612 *
613 *  Further lets say we use the following code, invoking
614 *  bitmap_fold() then bitmap_onto, as suggested above to
615 *  avoid the possibility of an empty @dst result::
616 *
617 *	unsigned long *tmp;	// a temporary bitmap's bits
618 *
619 *	bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
620 *	bitmap_onto(dst, tmp, relmap, bits);
621 *
622 *  Then this table shows what various values of @dst would be, for
623 *  various @orig's.  I list the zero-based positions of each set bit.
624 *  The tmp column shows the intermediate result, as computed by
625 *  using bitmap_fold() to fold the @orig bitmap modulo ten
626 *  (the weight of @relmap):
627 *
628 *      =============== ============== =================
629 *      @orig           tmp            @dst
630 *      0                0             40
631 *      1                1             41
632 *      9                9             95
633 *      10               0             40 [#f1]_
634 *      1 3 5 7          1 3 5 7       41 43 48 61
635 *      0 1 2 3 4        0 1 2 3 4     40 41 42 43 45
636 *      0 9 18 27        0 9 8 7       40 61 74 95
637 *      0 10 20 30       0             40
638 *      0 11 22 33       0 1 2 3       40 41 42 43
639 *      0 12 24 36       0 2 4 6       40 42 45 53
640 *      78 102 211       1 2 8         41 42 74 [#f1]_
641 *      =============== ============== =================
642 *
643 * .. [#f1]
644 *
645 *     For these marked lines, if we hadn't first done bitmap_fold()
646 *     into tmp, then the @dst result would have been empty.
647 *
648 * If either of @orig or @relmap is empty (no set bits), then @dst
649 * will be returned empty.
650 *
651 * If (as explained above) the only set bits in @orig are in positions
652 * m where m >= W, (where W is the weight of @relmap) then @dst will
653 * once again be returned empty.
654 *
655 * All bits in @dst not set by the above rule are cleared.
656 */
657void bitmap_onto(unsigned long *dst, const unsigned long *orig,
658			const unsigned long *relmap, unsigned int bits)
659{
660	unsigned int n, m;	/* same meaning as in above comment */
661
662	if (dst == orig)	/* following doesn't handle inplace mappings */
663		return;
664	bitmap_zero(dst, bits);
665
666	/*
667	 * The following code is a more efficient, but less
668	 * obvious, equivalent to the loop:
669	 *	for (m = 0; m < bitmap_weight(relmap, bits); m++) {
670	 *		n = find_nth_bit(orig, bits, m);
671	 *		if (test_bit(m, orig))
672	 *			set_bit(n, dst);
673	 *	}
674	 */
675
676	m = 0;
677	for_each_set_bit(n, relmap, bits) {
678		/* m == bitmap_pos_to_ord(relmap, n, bits) */
679		if (test_bit(m, orig))
680			set_bit(n, dst);
681		m++;
682	}
683}
684
685/**
686 * bitmap_fold - fold larger bitmap into smaller, modulo specified size
687 *	@dst: resulting smaller bitmap
688 *	@orig: original larger bitmap
689 *	@sz: specified size
690 *	@nbits: number of bits in each of these bitmaps
691 *
692 * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
693 * Clear all other bits in @dst.  See further the comment and
694 * Example [2] for bitmap_onto() for why and how to use this.
695 */
696void bitmap_fold(unsigned long *dst, const unsigned long *orig,
697			unsigned int sz, unsigned int nbits)
698{
699	unsigned int oldbit;
700
701	if (dst == orig)	/* following doesn't handle inplace mappings */
702		return;
703	bitmap_zero(dst, nbits);
704
705	for_each_set_bit(oldbit, orig, nbits)
706		set_bit(oldbit % sz, dst);
707}
708#endif /* CONFIG_NUMA */
709
710unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
711{
712	return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
713			     flags);
714}
715EXPORT_SYMBOL(bitmap_alloc);
716
717unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
718{
719	return bitmap_alloc(nbits, flags | __GFP_ZERO);
720}
721EXPORT_SYMBOL(bitmap_zalloc);
722
723unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
724{
725	return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
726				  flags, node);
727}
728EXPORT_SYMBOL(bitmap_alloc_node);
729
730unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
731{
732	return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
733}
734EXPORT_SYMBOL(bitmap_zalloc_node);
735
736void bitmap_free(const unsigned long *bitmap)
737{
738	kfree(bitmap);
739}
740EXPORT_SYMBOL(bitmap_free);
741
742static void devm_bitmap_free(void *data)
743{
744	unsigned long *bitmap = data;
745
746	bitmap_free(bitmap);
747}
748
749unsigned long *devm_bitmap_alloc(struct device *dev,
750				 unsigned int nbits, gfp_t flags)
751{
752	unsigned long *bitmap;
753	int ret;
754
755	bitmap = bitmap_alloc(nbits, flags);
756	if (!bitmap)
757		return NULL;
758
759	ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
760	if (ret)
761		return NULL;
762
763	return bitmap;
764}
765EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
766
767unsigned long *devm_bitmap_zalloc(struct device *dev,
768				  unsigned int nbits, gfp_t flags)
769{
770	return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
771}
772EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
773
774#if BITS_PER_LONG == 64
775/**
776 * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
777 *	@bitmap: array of unsigned longs, the destination bitmap
778 *	@buf: array of u32 (in host byte order), the source bitmap
779 *	@nbits: number of bits in @bitmap
780 */
781void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
782{
783	unsigned int i, halfwords;
784
785	halfwords = DIV_ROUND_UP(nbits, 32);
786	for (i = 0; i < halfwords; i++) {
787		bitmap[i/2] = (unsigned long) buf[i];
788		if (++i < halfwords)
789			bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
790	}
791
792	/* Clear tail bits in last word beyond nbits. */
793	if (nbits % BITS_PER_LONG)
794		bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
795}
796EXPORT_SYMBOL(bitmap_from_arr32);
797
798/**
799 * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
800 *	@buf: array of u32 (in host byte order), the dest bitmap
801 *	@bitmap: array of unsigned longs, the source bitmap
802 *	@nbits: number of bits in @bitmap
803 */
804void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
805{
806	unsigned int i, halfwords;
807
808	halfwords = DIV_ROUND_UP(nbits, 32);
809	for (i = 0; i < halfwords; i++) {
810		buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
811		if (++i < halfwords)
812			buf[i] = (u32) (bitmap[i/2] >> 32);
813	}
814
815	/* Clear tail bits in last element of array beyond nbits. */
816	if (nbits % BITS_PER_LONG)
817		buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
818}
819EXPORT_SYMBOL(bitmap_to_arr32);
820#endif
821
822#if BITS_PER_LONG == 32
823/**
824 * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
825 *	@bitmap: array of unsigned longs, the destination bitmap
826 *	@buf: array of u64 (in host byte order), the source bitmap
827 *	@nbits: number of bits in @bitmap
828 */
829void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
830{
831	int n;
832
833	for (n = nbits; n > 0; n -= 64) {
834		u64 val = *buf++;
835
836		*bitmap++ = val;
837		if (n > 32)
838			*bitmap++ = val >> 32;
839	}
840
841	/*
842	 * Clear tail bits in the last word beyond nbits.
843	 *
844	 * Negative index is OK because here we point to the word next
845	 * to the last word of the bitmap, except for nbits == 0, which
846	 * is tested implicitly.
847	 */
848	if (nbits % BITS_PER_LONG)
849		bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
850}
851EXPORT_SYMBOL(bitmap_from_arr64);
852
853/**
854 * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
855 *	@buf: array of u64 (in host byte order), the dest bitmap
856 *	@bitmap: array of unsigned longs, the source bitmap
857 *	@nbits: number of bits in @bitmap
858 */
859void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
860{
861	const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
862
863	while (bitmap < end) {
864		*buf = *bitmap++;
865		if (bitmap < end)
866			*buf |= (u64)(*bitmap++) << 32;
867		buf++;
868	}
869
870	/* Clear tail bits in the last element of array beyond nbits. */
871	if (nbits % 64)
872		buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
873}
874EXPORT_SYMBOL(bitmap_to_arr64);
875#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * lib/bitmap.c
  4 * Helper functions for bitmap.h.
  5 */
  6
  7#include <linux/bitmap.h>
  8#include <linux/bitops.h>
  9#include <linux/ctype.h>
 10#include <linux/device.h>
 11#include <linux/export.h>
 12#include <linux/slab.h>
 13
 14/**
 15 * DOC: bitmap introduction
 16 *
 17 * bitmaps provide an array of bits, implemented using an
 18 * array of unsigned longs.  The number of valid bits in a
 19 * given bitmap does _not_ need to be an exact multiple of
 20 * BITS_PER_LONG.
 21 *
 22 * The possible unused bits in the last, partially used word
 23 * of a bitmap are 'don't care'.  The implementation makes
 24 * no particular effort to keep them zero.  It ensures that
 25 * their value will not affect the results of any operation.
 26 * The bitmap operations that return Boolean (bitmap_empty,
 27 * for example) or scalar (bitmap_weight, for example) results
 28 * carefully filter out these unused bits from impacting their
 29 * results.
 30 *
 31 * The byte ordering of bitmaps is more natural on little
 32 * endian architectures.  See the big-endian headers
 33 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
 34 * for the best explanations of this ordering.
 35 */
 36
 37bool __bitmap_equal(const unsigned long *bitmap1,
 38		    const unsigned long *bitmap2, unsigned int bits)
 39{
 40	unsigned int k, lim = bits/BITS_PER_LONG;
 41	for (k = 0; k < lim; ++k)
 42		if (bitmap1[k] != bitmap2[k])
 43			return false;
 44
 45	if (bits % BITS_PER_LONG)
 46		if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
 47			return false;
 48
 49	return true;
 50}
 51EXPORT_SYMBOL(__bitmap_equal);
 52
 53bool __bitmap_or_equal(const unsigned long *bitmap1,
 54		       const unsigned long *bitmap2,
 55		       const unsigned long *bitmap3,
 56		       unsigned int bits)
 57{
 58	unsigned int k, lim = bits / BITS_PER_LONG;
 59	unsigned long tmp;
 60
 61	for (k = 0; k < lim; ++k) {
 62		if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
 63			return false;
 64	}
 65
 66	if (!(bits % BITS_PER_LONG))
 67		return true;
 68
 69	tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
 70	return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
 71}
 72
 73void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
 74{
 75	unsigned int k, lim = BITS_TO_LONGS(bits);
 76	for (k = 0; k < lim; ++k)
 77		dst[k] = ~src[k];
 78}
 79EXPORT_SYMBOL(__bitmap_complement);
 80
 81/**
 82 * __bitmap_shift_right - logical right shift of the bits in a bitmap
 83 *   @dst : destination bitmap
 84 *   @src : source bitmap
 85 *   @shift : shift by this many bits
 86 *   @nbits : bitmap size, in bits
 87 *
 88 * Shifting right (dividing) means moving bits in the MS -> LS bit
 89 * direction.  Zeros are fed into the vacated MS positions and the
 90 * LS bits shifted off the bottom are lost.
 91 */
 92void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
 93			unsigned shift, unsigned nbits)
 94{
 95	unsigned k, lim = BITS_TO_LONGS(nbits);
 96	unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
 97	unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
 98	for (k = 0; off + k < lim; ++k) {
 99		unsigned long upper, lower;
100
101		/*
102		 * If shift is not word aligned, take lower rem bits of
103		 * word above and make them the top rem bits of result.
104		 */
105		if (!rem || off + k + 1 >= lim)
106			upper = 0;
107		else {
108			upper = src[off + k + 1];
109			if (off + k + 1 == lim - 1)
110				upper &= mask;
111			upper <<= (BITS_PER_LONG - rem);
112		}
113		lower = src[off + k];
114		if (off + k == lim - 1)
115			lower &= mask;
116		lower >>= rem;
117		dst[k] = lower | upper;
118	}
119	if (off)
120		memset(&dst[lim - off], 0, off*sizeof(unsigned long));
121}
122EXPORT_SYMBOL(__bitmap_shift_right);
123
124
125/**
126 * __bitmap_shift_left - logical left shift of the bits in a bitmap
127 *   @dst : destination bitmap
128 *   @src : source bitmap
129 *   @shift : shift by this many bits
130 *   @nbits : bitmap size, in bits
131 *
132 * Shifting left (multiplying) means moving bits in the LS -> MS
133 * direction.  Zeros are fed into the vacated LS bit positions
134 * and those MS bits shifted off the top are lost.
135 */
136
137void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
138			unsigned int shift, unsigned int nbits)
139{
140	int k;
141	unsigned int lim = BITS_TO_LONGS(nbits);
142	unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
143	for (k = lim - off - 1; k >= 0; --k) {
144		unsigned long upper, lower;
145
146		/*
147		 * If shift is not word aligned, take upper rem bits of
148		 * word below and make them the bottom rem bits of result.
149		 */
150		if (rem && k > 0)
151			lower = src[k - 1] >> (BITS_PER_LONG - rem);
152		else
153			lower = 0;
154		upper = src[k] << rem;
155		dst[k + off] = lower | upper;
156	}
157	if (off)
158		memset(dst, 0, off*sizeof(unsigned long));
159}
160EXPORT_SYMBOL(__bitmap_shift_left);
161
162/**
163 * bitmap_cut() - remove bit region from bitmap and right shift remaining bits
164 * @dst: destination bitmap, might overlap with src
165 * @src: source bitmap
166 * @first: start bit of region to be removed
167 * @cut: number of bits to remove
168 * @nbits: bitmap size, in bits
169 *
170 * Set the n-th bit of @dst iff the n-th bit of @src is set and
171 * n is less than @first, or the m-th bit of @src is set for any
172 * m such that @first <= n < nbits, and m = n + @cut.
173 *
174 * In pictures, example for a big-endian 32-bit architecture:
175 *
176 * The @src bitmap is::
177 *
178 *   31                                   63
179 *   |                                    |
180 *   10000000 11000001 11110010 00010101  10000000 11000001 01110010 00010101
181 *                   |  |              |                                    |
182 *                  16  14             0                                   32
183 *
184 * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
185 *
186 *   31                                   63
187 *   |                                    |
188 *   10110000 00011000 00110010 00010101  00010000 00011000 00101110 01000010
189 *                      |              |                                    |
190 *                      14 (bit 17     0                                   32
191 *                          from @src)
192 *
193 * Note that @dst and @src might overlap partially or entirely.
194 *
195 * This is implemented in the obvious way, with a shift and carry
196 * step for each moved bit. Optimisation is left as an exercise
197 * for the compiler.
198 */
199void bitmap_cut(unsigned long *dst, const unsigned long *src,
200		unsigned int first, unsigned int cut, unsigned int nbits)
201{
202	unsigned int len = BITS_TO_LONGS(nbits);
203	unsigned long keep = 0, carry;
204	int i;
205
206	if (first % BITS_PER_LONG) {
207		keep = src[first / BITS_PER_LONG] &
208		       (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
209	}
210
211	memmove(dst, src, len * sizeof(*dst));
212
213	while (cut--) {
214		for (i = first / BITS_PER_LONG; i < len; i++) {
215			if (i < len - 1)
216				carry = dst[i + 1] & 1UL;
217			else
218				carry = 0;
219
220			dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1));
221		}
222	}
223
224	dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG);
225	dst[first / BITS_PER_LONG] |= keep;
226}
227EXPORT_SYMBOL(bitmap_cut);
228
229bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
230				const unsigned long *bitmap2, unsigned int bits)
231{
232	unsigned int k;
233	unsigned int lim = bits/BITS_PER_LONG;
234	unsigned long result = 0;
235
236	for (k = 0; k < lim; k++)
237		result |= (dst[k] = bitmap1[k] & bitmap2[k]);
238	if (bits % BITS_PER_LONG)
239		result |= (dst[k] = bitmap1[k] & bitmap2[k] &
240			   BITMAP_LAST_WORD_MASK(bits));
241	return result != 0;
242}
243EXPORT_SYMBOL(__bitmap_and);
244
245void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
246				const unsigned long *bitmap2, unsigned int bits)
247{
248	unsigned int k;
249	unsigned int nr = BITS_TO_LONGS(bits);
250
251	for (k = 0; k < nr; k++)
252		dst[k] = bitmap1[k] | bitmap2[k];
253}
254EXPORT_SYMBOL(__bitmap_or);
255
256void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
257				const unsigned long *bitmap2, unsigned int bits)
258{
259	unsigned int k;
260	unsigned int nr = BITS_TO_LONGS(bits);
261
262	for (k = 0; k < nr; k++)
263		dst[k] = bitmap1[k] ^ bitmap2[k];
264}
265EXPORT_SYMBOL(__bitmap_xor);
266
267bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
268				const unsigned long *bitmap2, unsigned int bits)
269{
270	unsigned int k;
271	unsigned int lim = bits/BITS_PER_LONG;
272	unsigned long result = 0;
273
274	for (k = 0; k < lim; k++)
275		result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
276	if (bits % BITS_PER_LONG)
277		result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
278			   BITMAP_LAST_WORD_MASK(bits));
279	return result != 0;
280}
281EXPORT_SYMBOL(__bitmap_andnot);
282
283void __bitmap_replace(unsigned long *dst,
284		      const unsigned long *old, const unsigned long *new,
285		      const unsigned long *mask, unsigned int nbits)
286{
287	unsigned int k;
288	unsigned int nr = BITS_TO_LONGS(nbits);
289
290	for (k = 0; k < nr; k++)
291		dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
292}
293EXPORT_SYMBOL(__bitmap_replace);
294
295bool __bitmap_intersects(const unsigned long *bitmap1,
296			 const unsigned long *bitmap2, unsigned int bits)
297{
298	unsigned int k, lim = bits/BITS_PER_LONG;
299	for (k = 0; k < lim; ++k)
300		if (bitmap1[k] & bitmap2[k])
301			return true;
302
303	if (bits % BITS_PER_LONG)
304		if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
305			return true;
306	return false;
307}
308EXPORT_SYMBOL(__bitmap_intersects);
309
310bool __bitmap_subset(const unsigned long *bitmap1,
311		     const unsigned long *bitmap2, unsigned int bits)
312{
313	unsigned int k, lim = bits/BITS_PER_LONG;
314	for (k = 0; k < lim; ++k)
315		if (bitmap1[k] & ~bitmap2[k])
316			return false;
317
318	if (bits % BITS_PER_LONG)
319		if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
320			return false;
321	return true;
322}
323EXPORT_SYMBOL(__bitmap_subset);
324
325#define BITMAP_WEIGHT(FETCH, bits)	\
326({										\
327	unsigned int __bits = (bits), idx, w = 0;				\
328										\
329	for (idx = 0; idx < __bits / BITS_PER_LONG; idx++)			\
330		w += hweight_long(FETCH);					\
331										\
332	if (__bits % BITS_PER_LONG)						\
333		w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits));	\
334										\
335	w;									\
336})
337
338unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
339{
340	return BITMAP_WEIGHT(bitmap[idx], bits);
341}
342EXPORT_SYMBOL(__bitmap_weight);
343
344unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
345				const unsigned long *bitmap2, unsigned int bits)
346{
347	return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
348}
349EXPORT_SYMBOL(__bitmap_weight_and);
350
351unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
352				const unsigned long *bitmap2, unsigned int bits)
353{
354	return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
355}
356EXPORT_SYMBOL(__bitmap_weight_andnot);
357
358void __bitmap_set(unsigned long *map, unsigned int start, int len)
359{
360	unsigned long *p = map + BIT_WORD(start);
361	const unsigned int size = start + len;
362	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
363	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
364
365	while (len - bits_to_set >= 0) {
366		*p |= mask_to_set;
367		len -= bits_to_set;
368		bits_to_set = BITS_PER_LONG;
369		mask_to_set = ~0UL;
370		p++;
371	}
372	if (len) {
373		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
374		*p |= mask_to_set;
375	}
376}
377EXPORT_SYMBOL(__bitmap_set);
378
379void __bitmap_clear(unsigned long *map, unsigned int start, int len)
380{
381	unsigned long *p = map + BIT_WORD(start);
382	const unsigned int size = start + len;
383	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
384	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
385
386	while (len - bits_to_clear >= 0) {
387		*p &= ~mask_to_clear;
388		len -= bits_to_clear;
389		bits_to_clear = BITS_PER_LONG;
390		mask_to_clear = ~0UL;
391		p++;
392	}
393	if (len) {
394		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
395		*p &= ~mask_to_clear;
396	}
397}
398EXPORT_SYMBOL(__bitmap_clear);
399
400/**
401 * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
402 * @map: The address to base the search on
403 * @size: The bitmap size in bits
404 * @start: The bitnumber to start searching at
405 * @nr: The number of zeroed bits we're looking for
406 * @align_mask: Alignment mask for zero area
407 * @align_offset: Alignment offset for zero area.
408 *
409 * The @align_mask should be one less than a power of 2; the effect is that
410 * the bit offset of all zero areas this function finds plus @align_offset
411 * is multiple of that power of 2.
412 */
413unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
414					     unsigned long size,
415					     unsigned long start,
416					     unsigned int nr,
417					     unsigned long align_mask,
418					     unsigned long align_offset)
419{
420	unsigned long index, end, i;
421again:
422	index = find_next_zero_bit(map, size, start);
423
424	/* Align allocation */
425	index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
426
427	end = index + nr;
428	if (end > size)
429		return end;
430	i = find_next_bit(map, end, index);
431	if (i < end) {
432		start = i + 1;
433		goto again;
434	}
435	return index;
436}
437EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
438
439/**
440 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
441 *	@buf: pointer to a bitmap
442 *	@pos: a bit position in @buf (0 <= @pos < @nbits)
443 *	@nbits: number of valid bit positions in @buf
444 *
445 * Map the bit at position @pos in @buf (of length @nbits) to the
446 * ordinal of which set bit it is.  If it is not set or if @pos
447 * is not a valid bit position, map to -1.
448 *
449 * If for example, just bits 4 through 7 are set in @buf, then @pos
450 * values 4 through 7 will get mapped to 0 through 3, respectively,
451 * and other @pos values will get mapped to -1.  When @pos value 7
452 * gets mapped to (returns) @ord value 3 in this example, that means
453 * that bit 7 is the 3rd (starting with 0th) set bit in @buf.
454 *
455 * The bit positions 0 through @bits are valid positions in @buf.
456 */
457static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
458{
459	if (pos >= nbits || !test_bit(pos, buf))
460		return -1;
461
462	return bitmap_weight(buf, pos);
463}
464
465/**
466 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
467 *	@dst: remapped result
468 *	@src: subset to be remapped
469 *	@old: defines domain of map
470 *	@new: defines range of map
471 *	@nbits: number of bits in each of these bitmaps
472 *
473 * Let @old and @new define a mapping of bit positions, such that
474 * whatever position is held by the n-th set bit in @old is mapped
475 * to the n-th set bit in @new.  In the more general case, allowing
476 * for the possibility that the weight 'w' of @new is less than the
477 * weight of @old, map the position of the n-th set bit in @old to
478 * the position of the m-th set bit in @new, where m == n % w.
479 *
480 * If either of the @old and @new bitmaps are empty, or if @src and
481 * @dst point to the same location, then this routine copies @src
482 * to @dst.
483 *
484 * The positions of unset bits in @old are mapped to themselves
485 * (the identity map).
486 *
487 * Apply the above specified mapping to @src, placing the result in
488 * @dst, clearing any bits previously set in @dst.
489 *
490 * For example, lets say that @old has bits 4 through 7 set, and
491 * @new has bits 12 through 15 set.  This defines the mapping of bit
492 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
493 * bit positions unchanged.  So if say @src comes into this routine
494 * with bits 1, 5 and 7 set, then @dst should leave with bits 1,
495 * 13 and 15 set.
496 */
497void bitmap_remap(unsigned long *dst, const unsigned long *src,
498		const unsigned long *old, const unsigned long *new,
499		unsigned int nbits)
500{
501	unsigned int oldbit, w;
502
503	if (dst == src)		/* following doesn't handle inplace remaps */
504		return;
505	bitmap_zero(dst, nbits);
506
507	w = bitmap_weight(new, nbits);
508	for_each_set_bit(oldbit, src, nbits) {
509		int n = bitmap_pos_to_ord(old, oldbit, nbits);
510
511		if (n < 0 || w == 0)
512			set_bit(oldbit, dst);	/* identity map */
513		else
514			set_bit(find_nth_bit(new, nbits, n % w), dst);
515	}
516}
517EXPORT_SYMBOL(bitmap_remap);
518
519/**
520 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
521 *	@oldbit: bit position to be mapped
522 *	@old: defines domain of map
523 *	@new: defines range of map
524 *	@bits: number of bits in each of these bitmaps
525 *
526 * Let @old and @new define a mapping of bit positions, such that
527 * whatever position is held by the n-th set bit in @old is mapped
528 * to the n-th set bit in @new.  In the more general case, allowing
529 * for the possibility that the weight 'w' of @new is less than the
530 * weight of @old, map the position of the n-th set bit in @old to
531 * the position of the m-th set bit in @new, where m == n % w.
532 *
533 * The positions of unset bits in @old are mapped to themselves
534 * (the identity map).
535 *
536 * Apply the above specified mapping to bit position @oldbit, returning
537 * the new bit position.
538 *
539 * For example, lets say that @old has bits 4 through 7 set, and
540 * @new has bits 12 through 15 set.  This defines the mapping of bit
541 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
542 * bit positions unchanged.  So if say @oldbit is 5, then this routine
543 * returns 13.
544 */
545int bitmap_bitremap(int oldbit, const unsigned long *old,
546				const unsigned long *new, int bits)
547{
548	int w = bitmap_weight(new, bits);
549	int n = bitmap_pos_to_ord(old, oldbit, bits);
550	if (n < 0 || w == 0)
551		return oldbit;
552	else
553		return find_nth_bit(new, bits, n % w);
554}
555EXPORT_SYMBOL(bitmap_bitremap);
556
557#ifdef CONFIG_NUMA
558/**
559 * bitmap_onto - translate one bitmap relative to another
560 *	@dst: resulting translated bitmap
561 * 	@orig: original untranslated bitmap
562 * 	@relmap: bitmap relative to which translated
563 *	@bits: number of bits in each of these bitmaps
564 *
565 * Set the n-th bit of @dst iff there exists some m such that the
566 * n-th bit of @relmap is set, the m-th bit of @orig is set, and
567 * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
568 * (If you understood the previous sentence the first time your
569 * read it, you're overqualified for your current job.)
570 *
571 * In other words, @orig is mapped onto (surjectively) @dst,
572 * using the map { <n, m> | the n-th bit of @relmap is the
573 * m-th set bit of @relmap }.
574 *
575 * Any set bits in @orig above bit number W, where W is the
576 * weight of (number of set bits in) @relmap are mapped nowhere.
577 * In particular, if for all bits m set in @orig, m >= W, then
578 * @dst will end up empty.  In situations where the possibility
579 * of such an empty result is not desired, one way to avoid it is
580 * to use the bitmap_fold() operator, below, to first fold the
581 * @orig bitmap over itself so that all its set bits x are in the
582 * range 0 <= x < W.  The bitmap_fold() operator does this by
583 * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
584 *
585 * Example [1] for bitmap_onto():
586 *  Let's say @relmap has bits 30-39 set, and @orig has bits
587 *  1, 3, 5, 7, 9 and 11 set.  Then on return from this routine,
588 *  @dst will have bits 31, 33, 35, 37 and 39 set.
589 *
590 *  When bit 0 is set in @orig, it means turn on the bit in
591 *  @dst corresponding to whatever is the first bit (if any)
592 *  that is turned on in @relmap.  Since bit 0 was off in the
593 *  above example, we leave off that bit (bit 30) in @dst.
594 *
595 *  When bit 1 is set in @orig (as in the above example), it
596 *  means turn on the bit in @dst corresponding to whatever
597 *  is the second bit that is turned on in @relmap.  The second
598 *  bit in @relmap that was turned on in the above example was
599 *  bit 31, so we turned on bit 31 in @dst.
600 *
601 *  Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
602 *  because they were the 4th, 6th, 8th and 10th set bits
603 *  set in @relmap, and the 4th, 6th, 8th and 10th bits of
604 *  @orig (i.e. bits 3, 5, 7 and 9) were also set.
605 *
606 *  When bit 11 is set in @orig, it means turn on the bit in
607 *  @dst corresponding to whatever is the twelfth bit that is
608 *  turned on in @relmap.  In the above example, there were
609 *  only ten bits turned on in @relmap (30..39), so that bit
610 *  11 was set in @orig had no affect on @dst.
611 *
612 * Example [2] for bitmap_fold() + bitmap_onto():
613 *  Let's say @relmap has these ten bits set::
614 *
615 *		40 41 42 43 45 48 53 61 74 95
616 *
617 *  (for the curious, that's 40 plus the first ten terms of the
618 *  Fibonacci sequence.)
619 *
620 *  Further lets say we use the following code, invoking
621 *  bitmap_fold() then bitmap_onto, as suggested above to
622 *  avoid the possibility of an empty @dst result::
623 *
624 *	unsigned long *tmp;	// a temporary bitmap's bits
625 *
626 *	bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
627 *	bitmap_onto(dst, tmp, relmap, bits);
628 *
629 *  Then this table shows what various values of @dst would be, for
630 *  various @orig's.  I list the zero-based positions of each set bit.
631 *  The tmp column shows the intermediate result, as computed by
632 *  using bitmap_fold() to fold the @orig bitmap modulo ten
633 *  (the weight of @relmap):
634 *
635 *      =============== ============== =================
636 *      @orig           tmp            @dst
637 *      0                0             40
638 *      1                1             41
639 *      9                9             95
640 *      10               0             40 [#f1]_
641 *      1 3 5 7          1 3 5 7       41 43 48 61
642 *      0 1 2 3 4        0 1 2 3 4     40 41 42 43 45
643 *      0 9 18 27        0 9 8 7       40 61 74 95
644 *      0 10 20 30       0             40
645 *      0 11 22 33       0 1 2 3       40 41 42 43
646 *      0 12 24 36       0 2 4 6       40 42 45 53
647 *      78 102 211       1 2 8         41 42 74 [#f1]_
648 *      =============== ============== =================
649 *
650 * .. [#f1]
651 *
652 *     For these marked lines, if we hadn't first done bitmap_fold()
653 *     into tmp, then the @dst result would have been empty.
654 *
655 * If either of @orig or @relmap is empty (no set bits), then @dst
656 * will be returned empty.
657 *
658 * If (as explained above) the only set bits in @orig are in positions
659 * m where m >= W, (where W is the weight of @relmap) then @dst will
660 * once again be returned empty.
661 *
662 * All bits in @dst not set by the above rule are cleared.
663 */
664void bitmap_onto(unsigned long *dst, const unsigned long *orig,
665			const unsigned long *relmap, unsigned int bits)
666{
667	unsigned int n, m;	/* same meaning as in above comment */
668
669	if (dst == orig)	/* following doesn't handle inplace mappings */
670		return;
671	bitmap_zero(dst, bits);
672
673	/*
674	 * The following code is a more efficient, but less
675	 * obvious, equivalent to the loop:
676	 *	for (m = 0; m < bitmap_weight(relmap, bits); m++) {
677	 *		n = find_nth_bit(orig, bits, m);
678	 *		if (test_bit(m, orig))
679	 *			set_bit(n, dst);
680	 *	}
681	 */
682
683	m = 0;
684	for_each_set_bit(n, relmap, bits) {
685		/* m == bitmap_pos_to_ord(relmap, n, bits) */
686		if (test_bit(m, orig))
687			set_bit(n, dst);
688		m++;
689	}
690}
691
692/**
693 * bitmap_fold - fold larger bitmap into smaller, modulo specified size
694 *	@dst: resulting smaller bitmap
695 *	@orig: original larger bitmap
696 *	@sz: specified size
697 *	@nbits: number of bits in each of these bitmaps
698 *
699 * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
700 * Clear all other bits in @dst.  See further the comment and
701 * Example [2] for bitmap_onto() for why and how to use this.
702 */
703void bitmap_fold(unsigned long *dst, const unsigned long *orig,
704			unsigned int sz, unsigned int nbits)
705{
706	unsigned int oldbit;
707
708	if (dst == orig)	/* following doesn't handle inplace mappings */
709		return;
710	bitmap_zero(dst, nbits);
711
712	for_each_set_bit(oldbit, orig, nbits)
713		set_bit(oldbit % sz, dst);
714}
715#endif /* CONFIG_NUMA */
716
717unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
718{
719	return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
720			     flags);
721}
722EXPORT_SYMBOL(bitmap_alloc);
723
724unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
725{
726	return bitmap_alloc(nbits, flags | __GFP_ZERO);
727}
728EXPORT_SYMBOL(bitmap_zalloc);
729
730unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
731{
732	return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
733				  flags, node);
734}
735EXPORT_SYMBOL(bitmap_alloc_node);
736
737unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
738{
739	return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
740}
741EXPORT_SYMBOL(bitmap_zalloc_node);
742
743void bitmap_free(const unsigned long *bitmap)
744{
745	kfree(bitmap);
746}
747EXPORT_SYMBOL(bitmap_free);
748
749static void devm_bitmap_free(void *data)
750{
751	unsigned long *bitmap = data;
752
753	bitmap_free(bitmap);
754}
755
756unsigned long *devm_bitmap_alloc(struct device *dev,
757				 unsigned int nbits, gfp_t flags)
758{
759	unsigned long *bitmap;
760	int ret;
761
762	bitmap = bitmap_alloc(nbits, flags);
763	if (!bitmap)
764		return NULL;
765
766	ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
767	if (ret)
768		return NULL;
769
770	return bitmap;
771}
772EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
773
774unsigned long *devm_bitmap_zalloc(struct device *dev,
775				  unsigned int nbits, gfp_t flags)
776{
777	return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
778}
779EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
780
781#if BITS_PER_LONG == 64
782/**
783 * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
784 *	@bitmap: array of unsigned longs, the destination bitmap
785 *	@buf: array of u32 (in host byte order), the source bitmap
786 *	@nbits: number of bits in @bitmap
787 */
788void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
789{
790	unsigned int i, halfwords;
791
792	halfwords = DIV_ROUND_UP(nbits, 32);
793	for (i = 0; i < halfwords; i++) {
794		bitmap[i/2] = (unsigned long) buf[i];
795		if (++i < halfwords)
796			bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
797	}
798
799	/* Clear tail bits in last word beyond nbits. */
800	if (nbits % BITS_PER_LONG)
801		bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
802}
803EXPORT_SYMBOL(bitmap_from_arr32);
804
805/**
806 * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
807 *	@buf: array of u32 (in host byte order), the dest bitmap
808 *	@bitmap: array of unsigned longs, the source bitmap
809 *	@nbits: number of bits in @bitmap
810 */
811void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
812{
813	unsigned int i, halfwords;
814
815	halfwords = DIV_ROUND_UP(nbits, 32);
816	for (i = 0; i < halfwords; i++) {
817		buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
818		if (++i < halfwords)
819			buf[i] = (u32) (bitmap[i/2] >> 32);
820	}
821
822	/* Clear tail bits in last element of array beyond nbits. */
823	if (nbits % BITS_PER_LONG)
824		buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
825}
826EXPORT_SYMBOL(bitmap_to_arr32);
827#endif
828
829#if BITS_PER_LONG == 32
830/**
831 * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
832 *	@bitmap: array of unsigned longs, the destination bitmap
833 *	@buf: array of u64 (in host byte order), the source bitmap
834 *	@nbits: number of bits in @bitmap
835 */
836void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
837{
838	int n;
839
840	for (n = nbits; n > 0; n -= 64) {
841		u64 val = *buf++;
842
843		*bitmap++ = val;
844		if (n > 32)
845			*bitmap++ = val >> 32;
846	}
847
848	/*
849	 * Clear tail bits in the last word beyond nbits.
850	 *
851	 * Negative index is OK because here we point to the word next
852	 * to the last word of the bitmap, except for nbits == 0, which
853	 * is tested implicitly.
854	 */
855	if (nbits % BITS_PER_LONG)
856		bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
857}
858EXPORT_SYMBOL(bitmap_from_arr64);
859
860/**
861 * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
862 *	@buf: array of u64 (in host byte order), the dest bitmap
863 *	@bitmap: array of unsigned longs, the source bitmap
864 *	@nbits: number of bits in @bitmap
865 */
866void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
867{
868	const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
869
870	while (bitmap < end) {
871		*buf = *bitmap++;
872		if (bitmap < end)
873			*buf |= (u64)(*bitmap++) << 32;
874		buf++;
875	}
876
877	/* Clear tail bits in the last element of array beyond nbits. */
878	if (nbits % 64)
879		buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
880}
881EXPORT_SYMBOL(bitmap_to_arr64);
882#endif