Loading...
1/*
2 * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
3 *
4 * Jan 23 2005 Matt Mackall <mpm@selenic.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/sort.h>
10#include <linux/slab.h>
11
12static void u32_swap(void *a, void *b, int size)
13{
14 u32 t = *(u32 *)a;
15 *(u32 *)a = *(u32 *)b;
16 *(u32 *)b = t;
17}
18
19static void generic_swap(void *a, void *b, int size)
20{
21 char t;
22
23 do {
24 t = *(char *)a;
25 *(char *)a++ = *(char *)b;
26 *(char *)b++ = t;
27 } while (--size > 0);
28}
29
30/**
31 * sort - sort an array of elements
32 * @base: pointer to data to sort
33 * @num: number of elements
34 * @size: size of each element
35 * @cmp_func: pointer to comparison function
36 * @swap_func: pointer to swap function or NULL
37 *
38 * This function does a heapsort on the given array. You may provide a
39 * swap_func function optimized to your element type.
40 *
41 * Sorting time is O(n log n) both on average and worst-case. While
42 * qsort is about 20% faster on average, it suffers from exploitable
43 * O(n*n) worst-case behavior and extra memory requirements that make
44 * it less suitable for kernel use.
45 */
46
47void sort(void *base, size_t num, size_t size,
48 int (*cmp_func)(const void *, const void *),
49 void (*swap_func)(void *, void *, int size))
50{
51 /* pre-scale counters for performance */
52 int i = (num/2 - 1) * size, n = num * size, c, r;
53
54 if (!swap_func)
55 swap_func = (size == 4 ? u32_swap : generic_swap);
56
57 /* heapify */
58 for ( ; i >= 0; i -= size) {
59 for (r = i; r * 2 + size < n; r = c) {
60 c = r * 2 + size;
61 if (c < n - size &&
62 cmp_func(base + c, base + c + size) < 0)
63 c += size;
64 if (cmp_func(base + r, base + c) >= 0)
65 break;
66 swap_func(base + r, base + c, size);
67 }
68 }
69
70 /* sort */
71 for (i = n - size; i > 0; i -= size) {
72 swap_func(base, base + i, size);
73 for (r = 0; r * 2 + size < i; r = c) {
74 c = r * 2 + size;
75 if (c < i - size &&
76 cmp_func(base + c, base + c + size) < 0)
77 c += size;
78 if (cmp_func(base + r, base + c) >= 0)
79 break;
80 swap_func(base + r, base + c, size);
81 }
82 }
83}
84
85EXPORT_SYMBOL(sort);
86
87#if 0
88/* a simple boot-time regression test */
89
90int cmpint(const void *a, const void *b)
91{
92 return *(int *)a - *(int *)b;
93}
94
95static int sort_test(void)
96{
97 int *a, i, r = 1;
98
99 a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
100 BUG_ON(!a);
101
102 printk("testing sort()\n");
103
104 for (i = 0; i < 1000; i++) {
105 r = (r * 725861) % 6599;
106 a[i] = r;
107 }
108
109 sort(a, 1000, sizeof(int), cmpint, NULL);
110
111 for (i = 0; i < 999; i++)
112 if (a[i] > a[i+1]) {
113 printk("sort() failed!\n");
114 break;
115 }
116
117 kfree(a);
118
119 return 0;
120}
121
122module_init(sort_test);
123#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * A fast, small, non-recursive O(n log n) sort for the Linux kernel
4 *
5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
7 *
8 * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n
9 * better) at the expense of stack usage and much larger code to avoid
10 * quicksort's O(n^2) worst case.
11 */
12
13#include <linux/types.h>
14#include <linux/export.h>
15#include <linux/sort.h>
16
17/**
18 * is_aligned - is this pointer & size okay for word-wide copying?
19 * @base: pointer to data
20 * @size: size of each element
21 * @align: required alignment (typically 4 or 8)
22 *
23 * Returns true if elements can be copied using word loads and stores.
24 * The size must be a multiple of the alignment, and the base address must
25 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
26 *
27 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
28 * to "if ((a | b) & mask)", so we do that by hand.
29 */
30__attribute_const__ __always_inline
31static bool is_aligned(const void *base, size_t size, unsigned char align)
32{
33 unsigned char lsbits = (unsigned char)size;
34
35 (void)base;
36#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
37 lsbits |= (unsigned char)(uintptr_t)base;
38#endif
39 return (lsbits & (align - 1)) == 0;
40}
41
42/**
43 * swap_words_32 - swap two elements in 32-bit chunks
44 * @a: pointer to the first element to swap
45 * @b: pointer to the second element to swap
46 * @n: element size (must be a multiple of 4)
47 *
48 * Exchange the two objects in memory. This exploits base+index addressing,
49 * which basically all CPUs have, to minimize loop overhead computations.
50 *
51 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
52 * bottom of the loop, even though the zero flag is still valid from the
53 * subtract (since the intervening mov instructions don't alter the flags).
54 * Gcc 8.1.0 doesn't have that problem.
55 */
56static void swap_words_32(void *a, void *b, size_t n)
57{
58 do {
59 u32 t = *(u32 *)(a + (n -= 4));
60 *(u32 *)(a + n) = *(u32 *)(b + n);
61 *(u32 *)(b + n) = t;
62 } while (n);
63}
64
65/**
66 * swap_words_64 - swap two elements in 64-bit chunks
67 * @a: pointer to the first element to swap
68 * @b: pointer to the second element to swap
69 * @n: element size (must be a multiple of 8)
70 *
71 * Exchange the two objects in memory. This exploits base+index
72 * addressing, which basically all CPUs have, to minimize loop overhead
73 * computations.
74 *
75 * We'd like to use 64-bit loads if possible. If they're not, emulating
76 * one requires base+index+4 addressing which x86 has but most other
77 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
78 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
79 * x32 ABI). Are there any cases the kernel needs to worry about?
80 */
81static void swap_words_64(void *a, void *b, size_t n)
82{
83 do {
84#ifdef CONFIG_64BIT
85 u64 t = *(u64 *)(a + (n -= 8));
86 *(u64 *)(a + n) = *(u64 *)(b + n);
87 *(u64 *)(b + n) = t;
88#else
89 /* Use two 32-bit transfers to avoid base+index+4 addressing */
90 u32 t = *(u32 *)(a + (n -= 4));
91 *(u32 *)(a + n) = *(u32 *)(b + n);
92 *(u32 *)(b + n) = t;
93
94 t = *(u32 *)(a + (n -= 4));
95 *(u32 *)(a + n) = *(u32 *)(b + n);
96 *(u32 *)(b + n) = t;
97#endif
98 } while (n);
99}
100
101/**
102 * swap_bytes - swap two elements a byte at a time
103 * @a: pointer to the first element to swap
104 * @b: pointer to the second element to swap
105 * @n: element size
106 *
107 * This is the fallback if alignment doesn't allow using larger chunks.
108 */
109static void swap_bytes(void *a, void *b, size_t n)
110{
111 do {
112 char t = ((char *)a)[--n];
113 ((char *)a)[n] = ((char *)b)[n];
114 ((char *)b)[n] = t;
115 } while (n);
116}
117
118/*
119 * The values are arbitrary as long as they can't be confused with
120 * a pointer, but small integers make for the smallest compare
121 * instructions.
122 */
123#define SWAP_WORDS_64 (swap_r_func_t)0
124#define SWAP_WORDS_32 (swap_r_func_t)1
125#define SWAP_BYTES (swap_r_func_t)2
126#define SWAP_WRAPPER (swap_r_func_t)3
127
128struct wrapper {
129 cmp_func_t cmp;
130 swap_func_t swap;
131};
132
133/*
134 * The function pointer is last to make tail calls most efficient if the
135 * compiler decides not to inline this function.
136 */
137static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
138{
139 if (swap_func == SWAP_WRAPPER) {
140 ((const struct wrapper *)priv)->swap(a, b, (int)size);
141 return;
142 }
143
144 if (swap_func == SWAP_WORDS_64)
145 swap_words_64(a, b, size);
146 else if (swap_func == SWAP_WORDS_32)
147 swap_words_32(a, b, size);
148 else if (swap_func == SWAP_BYTES)
149 swap_bytes(a, b, size);
150 else
151 swap_func(a, b, (int)size, priv);
152}
153
154#define _CMP_WRAPPER ((cmp_r_func_t)0L)
155
156static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
157{
158 if (cmp == _CMP_WRAPPER)
159 return ((const struct wrapper *)priv)->cmp(a, b);
160 return cmp(a, b, priv);
161}
162
163/**
164 * parent - given the offset of the child, find the offset of the parent.
165 * @i: the offset of the heap element whose parent is sought. Non-zero.
166 * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
167 * @size: size of each element
168 *
169 * In terms of array indexes, the parent of element j = @i/@size is simply
170 * (j-1)/2. But when working in byte offsets, we can't use implicit
171 * truncation of integer divides.
172 *
173 * Fortunately, we only need one bit of the quotient, not the full divide.
174 * @size has a least significant bit. That bit will be clear if @i is
175 * an even multiple of @size, and set if it's an odd multiple.
176 *
177 * Logically, we're doing "if (i & lsbit) i -= size;", but since the
178 * branch is unpredictable, it's done with a bit of clever branch-free
179 * code instead.
180 */
181__attribute_const__ __always_inline
182static size_t parent(size_t i, unsigned int lsbit, size_t size)
183{
184 i -= size;
185 i -= size & -(i & lsbit);
186 return i / 2;
187}
188
189/**
190 * sort_r - sort an array of elements
191 * @base: pointer to data to sort
192 * @num: number of elements
193 * @size: size of each element
194 * @cmp_func: pointer to comparison function
195 * @swap_func: pointer to swap function or NULL
196 * @priv: third argument passed to comparison function
197 *
198 * This function does a heapsort on the given array. You may provide
199 * a swap_func function if you need to do something more than a memory
200 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
201 * avoids a slow retpoline and so is significantly faster.
202 *
203 * Sorting time is O(n log n) both on average and worst-case. While
204 * quicksort is slightly faster on average, it suffers from exploitable
205 * O(n*n) worst-case behavior and extra memory requirements that make
206 * it less suitable for kernel use.
207 */
208void sort_r(void *base, size_t num, size_t size,
209 cmp_r_func_t cmp_func,
210 swap_r_func_t swap_func,
211 const void *priv)
212{
213 /* pre-scale counters for performance */
214 size_t n = num * size, a = (num/2) * size;
215 const unsigned int lsbit = size & -size; /* Used to find parent */
216 size_t shift = 0;
217
218 if (!a) /* num < 2 || size == 0 */
219 return;
220
221 /* called from 'sort' without swap function, let's pick the default */
222 if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap)
223 swap_func = NULL;
224
225 if (!swap_func) {
226 if (is_aligned(base, size, 8))
227 swap_func = SWAP_WORDS_64;
228 else if (is_aligned(base, size, 4))
229 swap_func = SWAP_WORDS_32;
230 else
231 swap_func = SWAP_BYTES;
232 }
233
234 /*
235 * Loop invariants:
236 * 1. elements [a,n) satisfy the heap property (compare greater than
237 * all of their children),
238 * 2. elements [n,num*size) are sorted, and
239 * 3. a <= b <= c <= d <= n (whenever they are valid).
240 */
241 for (;;) {
242 size_t b, c, d;
243
244 if (a) /* Building heap: sift down a */
245 a -= size << shift;
246 else if (n > 3 * size) { /* Sorting: Extract two largest elements */
247 n -= size;
248 do_swap(base, base + n, size, swap_func, priv);
249 shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0;
250 a = size << shift;
251 n -= size;
252 do_swap(base + a, base + n, size, swap_func, priv);
253 } else { /* Sort complete */
254 break;
255 }
256
257 /*
258 * Sift element at "a" down into heap. This is the
259 * "bottom-up" variant, which significantly reduces
260 * calls to cmp_func(): we find the sift-down path all
261 * the way to the leaves (one compare per level), then
262 * backtrack to find where to insert the target element.
263 *
264 * Because elements tend to sift down close to the leaves,
265 * this uses fewer compares than doing two per level
266 * on the way down. (A bit more than half as many on
267 * average, 3/4 worst-case.)
268 */
269 for (b = a; c = 2*b + size, (d = c + size) < n;)
270 b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d;
271 if (d == n) /* Special case last leaf with no sibling */
272 b = c;
273
274 /* Now backtrack from "b" to the correct location for "a" */
275 while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
276 b = parent(b, lsbit, size);
277 c = b; /* Where "a" belongs */
278 while (b != a) { /* Shift it into place */
279 b = parent(b, lsbit, size);
280 do_swap(base + b, base + c, size, swap_func, priv);
281 }
282 }
283
284 n -= size;
285 do_swap(base, base + n, size, swap_func, priv);
286 if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0)
287 do_swap(base, base + size, size, swap_func, priv);
288}
289EXPORT_SYMBOL(sort_r);
290
291void sort(void *base, size_t num, size_t size,
292 cmp_func_t cmp_func,
293 swap_func_t swap_func)
294{
295 struct wrapper w = {
296 .cmp = cmp_func,
297 .swap = swap_func,
298 };
299
300 return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
301}
302EXPORT_SYMBOL(sort);