Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * A fast, small, non-recursive O(n log n) sort for the Linux kernel
  4 *
  5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
  6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
  7 *
  8 * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
  9 * better) at the expense of stack usage and much larger code to avoid
 10 * quicksort's O(n^2) worst case.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/types.h>
 16#include <linux/export.h>
 17#include <linux/sort.h>
 18
 19/**
 20 * is_aligned - is this pointer & size okay for word-wide copying?
 21 * @base: pointer to data
 22 * @size: size of each element
 23 * @align: required alignment (typically 4 or 8)
 24 *
 25 * Returns true if elements can be copied using word loads and stores.
 26 * The size must be a multiple of the alignment, and the base address must
 27 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
 28 *
 29 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
 30 * to "if ((a | b) & mask)", so we do that by hand.
 31 */
 32__attribute_const__ __always_inline
 33static bool is_aligned(const void *base, size_t size, unsigned char align)
 34{
 35	unsigned char lsbits = (unsigned char)size;
 36
 37	(void)base;
 38#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 39	lsbits |= (unsigned char)(uintptr_t)base;
 40#endif
 41	return (lsbits & (align - 1)) == 0;
 42}
 43
 44/**
 45 * swap_words_32 - swap two elements in 32-bit chunks
 46 * @a: pointer to the first element to swap
 47 * @b: pointer to the second element to swap
 48 * @n: element size (must be a multiple of 4)
 49 *
 50 * Exchange the two objects in memory.  This exploits base+index addressing,
 51 * which basically all CPUs have, to minimize loop overhead computations.
 52 *
 53 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
 54 * bottom of the loop, even though the zero flag is still valid from the
 55 * subtract (since the intervening mov instructions don't alter the flags).
 56 * Gcc 8.1.0 doesn't have that problem.
 57 */
 58static void swap_words_32(void *a, void *b, size_t n)
 59{
 60	do {
 61		u32 t = *(u32 *)(a + (n -= 4));
 62		*(u32 *)(a + n) = *(u32 *)(b + n);
 63		*(u32 *)(b + n) = t;
 64	} while (n);
 65}
 66
 67/**
 68 * swap_words_64 - swap two elements in 64-bit chunks
 69 * @a: pointer to the first element to swap
 70 * @b: pointer to the second element to swap
 71 * @n: element size (must be a multiple of 8)
 72 *
 73 * Exchange the two objects in memory.  This exploits base+index
 74 * addressing, which basically all CPUs have, to minimize loop overhead
 75 * computations.
 76 *
 77 * We'd like to use 64-bit loads if possible.  If they're not, emulating
 78 * one requires base+index+4 addressing which x86 has but most other
 79 * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
 80 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
 81 * x32 ABI).  Are there any cases the kernel needs to worry about?
 82 */
 83static void swap_words_64(void *a, void *b, size_t n)
 84{
 85	do {
 86#ifdef CONFIG_64BIT
 87		u64 t = *(u64 *)(a + (n -= 8));
 88		*(u64 *)(a + n) = *(u64 *)(b + n);
 89		*(u64 *)(b + n) = t;
 90#else
 91		/* Use two 32-bit transfers to avoid base+index+4 addressing */
 92		u32 t = *(u32 *)(a + (n -= 4));
 93		*(u32 *)(a + n) = *(u32 *)(b + n);
 94		*(u32 *)(b + n) = t;
 95
 96		t = *(u32 *)(a + (n -= 4));
 97		*(u32 *)(a + n) = *(u32 *)(b + n);
 98		*(u32 *)(b + n) = t;
 99#endif
100	} while (n);
101}
102
103/**
104 * swap_bytes - swap two elements a byte at a time
105 * @a: pointer to the first element to swap
106 * @b: pointer to the second element to swap
107 * @n: element size
108 *
109 * This is the fallback if alignment doesn't allow using larger chunks.
110 */
111static void swap_bytes(void *a, void *b, size_t n)
112{
113	do {
114		char t = ((char *)a)[--n];
115		((char *)a)[n] = ((char *)b)[n];
116		((char *)b)[n] = t;
117	} while (n);
118}
119
120/*
121 * The values are arbitrary as long as they can't be confused with
122 * a pointer, but small integers make for the smallest compare
123 * instructions.
124 */
125#define SWAP_WORDS_64 (swap_func_t)0
126#define SWAP_WORDS_32 (swap_func_t)1
127#define SWAP_BYTES    (swap_func_t)2
 
 
 
 
 
 
128
129/*
130 * The function pointer is last to make tail calls most efficient if the
131 * compiler decides not to inline this function.
132 */
133static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
134{
 
 
 
 
 
135	if (swap_func == SWAP_WORDS_64)
136		swap_words_64(a, b, size);
137	else if (swap_func == SWAP_WORDS_32)
138		swap_words_32(a, b, size);
139	else if (swap_func == SWAP_BYTES)
140		swap_bytes(a, b, size);
141	else
142		swap_func(a, b, (int)size);
143}
144
145#define _CMP_WRAPPER ((cmp_r_func_t)0L)
146
147static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
148{
149	if (cmp == _CMP_WRAPPER)
150		return ((cmp_func_t)(priv))(a, b);
151	return cmp(a, b, priv);
152}
153
154/**
155 * parent - given the offset of the child, find the offset of the parent.
156 * @i: the offset of the heap element whose parent is sought.  Non-zero.
157 * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
158 * @size: size of each element
159 *
160 * In terms of array indexes, the parent of element j = @i/@size is simply
161 * (j-1)/2.  But when working in byte offsets, we can't use implicit
162 * truncation of integer divides.
163 *
164 * Fortunately, we only need one bit of the quotient, not the full divide.
165 * @size has a least significant bit.  That bit will be clear if @i is
166 * an even multiple of @size, and set if it's an odd multiple.
167 *
168 * Logically, we're doing "if (i & lsbit) i -= size;", but since the
169 * branch is unpredictable, it's done with a bit of clever branch-free
170 * code instead.
171 */
172__attribute_const__ __always_inline
173static size_t parent(size_t i, unsigned int lsbit, size_t size)
174{
175	i -= size;
176	i -= size & -(i & lsbit);
177	return i / 2;
178}
179
180/**
181 * sort_r - sort an array of elements
182 * @base: pointer to data to sort
183 * @num: number of elements
184 * @size: size of each element
185 * @cmp_func: pointer to comparison function
186 * @swap_func: pointer to swap function or NULL
187 * @priv: third argument passed to comparison function
188 *
189 * This function does a heapsort on the given array.  You may provide
190 * a swap_func function if you need to do something more than a memory
191 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
192 * avoids a slow retpoline and so is significantly faster.
193 *
194 * Sorting time is O(n log n) both on average and worst-case. While
195 * quicksort is slightly faster on average, it suffers from exploitable
196 * O(n*n) worst-case behavior and extra memory requirements that make
197 * it less suitable for kernel use.
198 */
199void sort_r(void *base, size_t num, size_t size,
200	    cmp_r_func_t cmp_func,
201	    swap_func_t swap_func,
202	    const void *priv)
203{
204	/* pre-scale counters for performance */
205	size_t n = num * size, a = (num/2) * size;
206	const unsigned int lsbit = size & -size;  /* Used to find parent */
 
207
208	if (!a)		/* num < 2 || size == 0 */
209		return;
210
 
 
 
 
211	if (!swap_func) {
212		if (is_aligned(base, size, 8))
213			swap_func = SWAP_WORDS_64;
214		else if (is_aligned(base, size, 4))
215			swap_func = SWAP_WORDS_32;
216		else
217			swap_func = SWAP_BYTES;
218	}
219
220	/*
221	 * Loop invariants:
222	 * 1. elements [a,n) satisfy the heap property (compare greater than
223	 *    all of their children),
224	 * 2. elements [n,num*size) are sorted, and
225	 * 3. a <= b <= c <= d <= n (whenever they are valid).
226	 */
227	for (;;) {
228		size_t b, c, d;
229
230		if (a)			/* Building heap: sift down --a */
231			a -= size;
232		else if (n -= size)	/* Sorting: Extract root to --n */
233			do_swap(base, base + n, size, swap_func);
234		else			/* Sort complete */
 
 
 
 
 
235			break;
 
236
237		/*
238		 * Sift element at "a" down into heap.  This is the
239		 * "bottom-up" variant, which significantly reduces
240		 * calls to cmp_func(): we find the sift-down path all
241		 * the way to the leaves (one compare per level), then
242		 * backtrack to find where to insert the target element.
243		 *
244		 * Because elements tend to sift down close to the leaves,
245		 * this uses fewer compares than doing two per level
246		 * on the way down.  (A bit more than half as many on
247		 * average, 3/4 worst-case.)
248		 */
249		for (b = a; c = 2*b + size, (d = c + size) < n;)
250			b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
251		if (d == n)	/* Special case last leaf with no sibling */
252			b = c;
253
254		/* Now backtrack from "b" to the correct location for "a" */
255		while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
256			b = parent(b, lsbit, size);
257		c = b;			/* Where "a" belongs */
258		while (b != a) {	/* Shift it into place */
259			b = parent(b, lsbit, size);
260			do_swap(base + b, base + c, size, swap_func);
261		}
262	}
 
 
 
 
 
263}
264EXPORT_SYMBOL(sort_r);
265
266void sort(void *base, size_t num, size_t size,
267	  cmp_func_t cmp_func,
268	  swap_func_t swap_func)
269{
270	return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
 
 
 
 
 
271}
272EXPORT_SYMBOL(sort);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * A fast, small, non-recursive O(n log n) sort for the Linux kernel
  4 *
  5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
  6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
  7 *
  8 * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n
  9 * better) at the expense of stack usage and much larger code to avoid
 10 * quicksort's O(n^2) worst case.
 11 */
 12
 
 
 13#include <linux/types.h>
 14#include <linux/export.h>
 15#include <linux/sort.h>
 16
 17/**
 18 * is_aligned - is this pointer & size okay for word-wide copying?
 19 * @base: pointer to data
 20 * @size: size of each element
 21 * @align: required alignment (typically 4 or 8)
 22 *
 23 * Returns true if elements can be copied using word loads and stores.
 24 * The size must be a multiple of the alignment, and the base address must
 25 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
 26 *
 27 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
 28 * to "if ((a | b) & mask)", so we do that by hand.
 29 */
 30__attribute_const__ __always_inline
 31static bool is_aligned(const void *base, size_t size, unsigned char align)
 32{
 33	unsigned char lsbits = (unsigned char)size;
 34
 35	(void)base;
 36#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 37	lsbits |= (unsigned char)(uintptr_t)base;
 38#endif
 39	return (lsbits & (align - 1)) == 0;
 40}
 41
 42/**
 43 * swap_words_32 - swap two elements in 32-bit chunks
 44 * @a: pointer to the first element to swap
 45 * @b: pointer to the second element to swap
 46 * @n: element size (must be a multiple of 4)
 47 *
 48 * Exchange the two objects in memory.  This exploits base+index addressing,
 49 * which basically all CPUs have, to minimize loop overhead computations.
 50 *
 51 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
 52 * bottom of the loop, even though the zero flag is still valid from the
 53 * subtract (since the intervening mov instructions don't alter the flags).
 54 * Gcc 8.1.0 doesn't have that problem.
 55 */
 56static void swap_words_32(void *a, void *b, size_t n)
 57{
 58	do {
 59		u32 t = *(u32 *)(a + (n -= 4));
 60		*(u32 *)(a + n) = *(u32 *)(b + n);
 61		*(u32 *)(b + n) = t;
 62	} while (n);
 63}
 64
 65/**
 66 * swap_words_64 - swap two elements in 64-bit chunks
 67 * @a: pointer to the first element to swap
 68 * @b: pointer to the second element to swap
 69 * @n: element size (must be a multiple of 8)
 70 *
 71 * Exchange the two objects in memory.  This exploits base+index
 72 * addressing, which basically all CPUs have, to minimize loop overhead
 73 * computations.
 74 *
 75 * We'd like to use 64-bit loads if possible.  If they're not, emulating
 76 * one requires base+index+4 addressing which x86 has but most other
 77 * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
 78 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
 79 * x32 ABI).  Are there any cases the kernel needs to worry about?
 80 */
 81static void swap_words_64(void *a, void *b, size_t n)
 82{
 83	do {
 84#ifdef CONFIG_64BIT
 85		u64 t = *(u64 *)(a + (n -= 8));
 86		*(u64 *)(a + n) = *(u64 *)(b + n);
 87		*(u64 *)(b + n) = t;
 88#else
 89		/* Use two 32-bit transfers to avoid base+index+4 addressing */
 90		u32 t = *(u32 *)(a + (n -= 4));
 91		*(u32 *)(a + n) = *(u32 *)(b + n);
 92		*(u32 *)(b + n) = t;
 93
 94		t = *(u32 *)(a + (n -= 4));
 95		*(u32 *)(a + n) = *(u32 *)(b + n);
 96		*(u32 *)(b + n) = t;
 97#endif
 98	} while (n);
 99}
100
101/**
102 * swap_bytes - swap two elements a byte at a time
103 * @a: pointer to the first element to swap
104 * @b: pointer to the second element to swap
105 * @n: element size
106 *
107 * This is the fallback if alignment doesn't allow using larger chunks.
108 */
109static void swap_bytes(void *a, void *b, size_t n)
110{
111	do {
112		char t = ((char *)a)[--n];
113		((char *)a)[n] = ((char *)b)[n];
114		((char *)b)[n] = t;
115	} while (n);
116}
117
118/*
119 * The values are arbitrary as long as they can't be confused with
120 * a pointer, but small integers make for the smallest compare
121 * instructions.
122 */
123#define SWAP_WORDS_64 (swap_r_func_t)0
124#define SWAP_WORDS_32 (swap_r_func_t)1
125#define SWAP_BYTES    (swap_r_func_t)2
126#define SWAP_WRAPPER  (swap_r_func_t)3
127
128struct wrapper {
129	cmp_func_t cmp;
130	swap_func_t swap;
131};
132
133/*
134 * The function pointer is last to make tail calls most efficient if the
135 * compiler decides not to inline this function.
136 */
137static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
138{
139	if (swap_func == SWAP_WRAPPER) {
140		((const struct wrapper *)priv)->swap(a, b, (int)size);
141		return;
142	}
143
144	if (swap_func == SWAP_WORDS_64)
145		swap_words_64(a, b, size);
146	else if (swap_func == SWAP_WORDS_32)
147		swap_words_32(a, b, size);
148	else if (swap_func == SWAP_BYTES)
149		swap_bytes(a, b, size);
150	else
151		swap_func(a, b, (int)size, priv);
152}
153
154#define _CMP_WRAPPER ((cmp_r_func_t)0L)
155
156static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
157{
158	if (cmp == _CMP_WRAPPER)
159		return ((const struct wrapper *)priv)->cmp(a, b);
160	return cmp(a, b, priv);
161}
162
163/**
164 * parent - given the offset of the child, find the offset of the parent.
165 * @i: the offset of the heap element whose parent is sought.  Non-zero.
166 * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
167 * @size: size of each element
168 *
169 * In terms of array indexes, the parent of element j = @i/@size is simply
170 * (j-1)/2.  But when working in byte offsets, we can't use implicit
171 * truncation of integer divides.
172 *
173 * Fortunately, we only need one bit of the quotient, not the full divide.
174 * @size has a least significant bit.  That bit will be clear if @i is
175 * an even multiple of @size, and set if it's an odd multiple.
176 *
177 * Logically, we're doing "if (i & lsbit) i -= size;", but since the
178 * branch is unpredictable, it's done with a bit of clever branch-free
179 * code instead.
180 */
181__attribute_const__ __always_inline
182static size_t parent(size_t i, unsigned int lsbit, size_t size)
183{
184	i -= size;
185	i -= size & -(i & lsbit);
186	return i / 2;
187}
188
189/**
190 * sort_r - sort an array of elements
191 * @base: pointer to data to sort
192 * @num: number of elements
193 * @size: size of each element
194 * @cmp_func: pointer to comparison function
195 * @swap_func: pointer to swap function or NULL
196 * @priv: third argument passed to comparison function
197 *
198 * This function does a heapsort on the given array.  You may provide
199 * a swap_func function if you need to do something more than a memory
200 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
201 * avoids a slow retpoline and so is significantly faster.
202 *
203 * Sorting time is O(n log n) both on average and worst-case. While
204 * quicksort is slightly faster on average, it suffers from exploitable
205 * O(n*n) worst-case behavior and extra memory requirements that make
206 * it less suitable for kernel use.
207 */
208void sort_r(void *base, size_t num, size_t size,
209	    cmp_r_func_t cmp_func,
210	    swap_r_func_t swap_func,
211	    const void *priv)
212{
213	/* pre-scale counters for performance */
214	size_t n = num * size, a = (num/2) * size;
215	const unsigned int lsbit = size & -size;  /* Used to find parent */
216	size_t shift = 0;
217
218	if (!a)		/* num < 2 || size == 0 */
219		return;
220
221	/* called from 'sort' without swap function, let's pick the default */
222	if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap)
223		swap_func = NULL;
224
225	if (!swap_func) {
226		if (is_aligned(base, size, 8))
227			swap_func = SWAP_WORDS_64;
228		else if (is_aligned(base, size, 4))
229			swap_func = SWAP_WORDS_32;
230		else
231			swap_func = SWAP_BYTES;
232	}
233
234	/*
235	 * Loop invariants:
236	 * 1. elements [a,n) satisfy the heap property (compare greater than
237	 *    all of their children),
238	 * 2. elements [n,num*size) are sorted, and
239	 * 3. a <= b <= c <= d <= n (whenever they are valid).
240	 */
241	for (;;) {
242		size_t b, c, d;
243
244		if (a)			/* Building heap: sift down a */
245			a -= size << shift;
246		else if (n > 3 * size) { /* Sorting: Extract two largest elements */
247			n -= size;
248			do_swap(base, base + n, size, swap_func, priv);
249			shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0;
250			a = size << shift;
251			n -= size;
252			do_swap(base + a, base + n, size, swap_func, priv);
253		} else {		/* Sort complete */
254			break;
255		}
256
257		/*
258		 * Sift element at "a" down into heap.  This is the
259		 * "bottom-up" variant, which significantly reduces
260		 * calls to cmp_func(): we find the sift-down path all
261		 * the way to the leaves (one compare per level), then
262		 * backtrack to find where to insert the target element.
263		 *
264		 * Because elements tend to sift down close to the leaves,
265		 * this uses fewer compares than doing two per level
266		 * on the way down.  (A bit more than half as many on
267		 * average, 3/4 worst-case.)
268		 */
269		for (b = a; c = 2*b + size, (d = c + size) < n;)
270			b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d;
271		if (d == n)	/* Special case last leaf with no sibling */
272			b = c;
273
274		/* Now backtrack from "b" to the correct location for "a" */
275		while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
276			b = parent(b, lsbit, size);
277		c = b;			/* Where "a" belongs */
278		while (b != a) {	/* Shift it into place */
279			b = parent(b, lsbit, size);
280			do_swap(base + b, base + c, size, swap_func, priv);
281		}
282	}
283
284	n -= size;
285	do_swap(base, base + n, size, swap_func, priv);
286	if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0)
287		do_swap(base, base + size, size, swap_func, priv);
288}
289EXPORT_SYMBOL(sort_r);
290
291void sort(void *base, size_t num, size_t size,
292	  cmp_func_t cmp_func,
293	  swap_func_t swap_func)
294{
295	struct wrapper w = {
296		.cmp  = cmp_func,
297		.swap = swap_func,
298	};
299
300	return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
301}
302EXPORT_SYMBOL(sort);