Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
  3 *
  4 * Jan 23 2005  Matt Mackall <mpm@selenic.com>
  5 */
  6
 
 
  7#include <linux/types.h>
  8#include <linux/export.h>
  9#include <linux/sort.h>
 10
 11static int alignment_ok(const void *base, int align)
 12{
 13	return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
 14		((unsigned long)base & (align - 1)) == 0;
 15}
 16
 17static void u32_swap(void *a, void *b, int size)
 18{
 19	u32 t = *(u32 *)a;
 20	*(u32 *)a = *(u32 *)b;
 21	*(u32 *)b = t;
 22}
 23
 24static void u64_swap(void *a, void *b, int size)
 25{
 26	u64 t = *(u64 *)a;
 27	*(u64 *)a = *(u64 *)b;
 28	*(u64 *)b = t;
 29}
 30
 31static void generic_swap(void *a, void *b, int size)
 32{
 33	char t;
 34
 35	do {
 36		t = *(char *)a;
 37		*(char *)a++ = *(char *)b;
 38		*(char *)b++ = t;
 39	} while (--size > 0);
 40}
 41
 42/**
 43 * sort - sort an array of elements
 44 * @base: pointer to data to sort
 45 * @num: number of elements
 46 * @size: size of each element
 47 * @cmp_func: pointer to comparison function
 48 * @swap_func: pointer to swap function or NULL
 49 *
 50 * This function does a heapsort on the given array. You may provide a
 51 * swap_func function optimized to your element type.
 52 *
 53 * Sorting time is O(n log n) both on average and worst-case. While
 54 * qsort is about 20% faster on average, it suffers from exploitable
 55 * O(n*n) worst-case behavior and extra memory requirements that make
 56 * it less suitable for kernel use.
 57 */
 58
 59void sort(void *base, size_t num, size_t size,
 60	  int (*cmp_func)(const void *, const void *),
 61	  void (*swap_func)(void *, void *, int size))
 62{
 63	/* pre-scale counters for performance */
 64	int i = (num/2 - 1) * size, n = num * size, c, r;
 65
 66	if (!swap_func) {
 67		if (size == 4 && alignment_ok(base, 4))
 68			swap_func = u32_swap;
 69		else if (size == 8 && alignment_ok(base, 8))
 70			swap_func = u64_swap;
 71		else
 72			swap_func = generic_swap;
 73	}
 74
 75	/* heapify */
 76	for ( ; i >= 0; i -= size) {
 77		for (r = i; r * 2 + size < n; r  = c) {
 78			c = r * 2 + size;
 79			if (c < n - size &&
 80					cmp_func(base + c, base + c + size) < 0)
 81				c += size;
 82			if (cmp_func(base + r, base + c) >= 0)
 83				break;
 84			swap_func(base + r, base + c, size);
 85		}
 86	}
 87
 88	/* sort */
 89	for (i = n - size; i > 0; i -= size) {
 90		swap_func(base, base + i, size);
 91		for (r = 0; r * 2 + size < i; r = c) {
 92			c = r * 2 + size;
 93			if (c < i - size &&
 94					cmp_func(base + c, base + c + size) < 0)
 95				c += size;
 96			if (cmp_func(base + r, base + c) >= 0)
 97				break;
 98			swap_func(base + r, base + c, size);
 99		}
100	}
101}
102
103EXPORT_SYMBOL(sort);
104
105#if 0
106#include <linux/slab.h>
107/* a simple boot-time regression test */
108
109int cmpint(const void *a, const void *b)
110{
111	return *(int *)a - *(int *)b;
112}
113
114static int sort_test(void)
115{
116	int *a, i, r = 1;
117
118	a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
119	BUG_ON(!a);
120
121	printk("testing sort()\n");
122
123	for (i = 0; i < 1000; i++) {
124		r = (r * 725861) % 6599;
125		a[i] = r;
126	}
127
128	sort(a, 1000, sizeof(int), cmpint, NULL);
129
130	for (i = 0; i < 999; i++)
131		if (a[i] > a[i+1]) {
132			printk("sort() failed!\n");
133			break;
134		}
135
136	kfree(a);
137
138	return 0;
139}
140
141module_init(sort_test);
142#endif
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
  4 *
  5 * Jan 23 2005  Matt Mackall <mpm@selenic.com>
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/types.h>
 11#include <linux/export.h>
 12#include <linux/sort.h>
 13
 14static int alignment_ok(const void *base, int align)
 15{
 16	return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
 17		((unsigned long)base & (align - 1)) == 0;
 18}
 19
 20static void u32_swap(void *a, void *b, int size)
 21{
 22	u32 t = *(u32 *)a;
 23	*(u32 *)a = *(u32 *)b;
 24	*(u32 *)b = t;
 25}
 26
 27static void u64_swap(void *a, void *b, int size)
 28{
 29	u64 t = *(u64 *)a;
 30	*(u64 *)a = *(u64 *)b;
 31	*(u64 *)b = t;
 32}
 33
 34static void generic_swap(void *a, void *b, int size)
 35{
 36	char t;
 37
 38	do {
 39		t = *(char *)a;
 40		*(char *)a++ = *(char *)b;
 41		*(char *)b++ = t;
 42	} while (--size > 0);
 43}
 44
 45/**
 46 * sort - sort an array of elements
 47 * @base: pointer to data to sort
 48 * @num: number of elements
 49 * @size: size of each element
 50 * @cmp_func: pointer to comparison function
 51 * @swap_func: pointer to swap function or NULL
 52 *
 53 * This function does a heapsort on the given array. You may provide a
 54 * swap_func function optimized to your element type.
 55 *
 56 * Sorting time is O(n log n) both on average and worst-case. While
 57 * qsort is about 20% faster on average, it suffers from exploitable
 58 * O(n*n) worst-case behavior and extra memory requirements that make
 59 * it less suitable for kernel use.
 60 */
 61
 62void sort(void *base, size_t num, size_t size,
 63	  int (*cmp_func)(const void *, const void *),
 64	  void (*swap_func)(void *, void *, int size))
 65{
 66	/* pre-scale counters for performance */
 67	int i = (num/2 - 1) * size, n = num * size, c, r;
 68
 69	if (!swap_func) {
 70		if (size == 4 && alignment_ok(base, 4))
 71			swap_func = u32_swap;
 72		else if (size == 8 && alignment_ok(base, 8))
 73			swap_func = u64_swap;
 74		else
 75			swap_func = generic_swap;
 76	}
 77
 78	/* heapify */
 79	for ( ; i >= 0; i -= size) {
 80		for (r = i; r * 2 + size < n; r  = c) {
 81			c = r * 2 + size;
 82			if (c < n - size &&
 83					cmp_func(base + c, base + c + size) < 0)
 84				c += size;
 85			if (cmp_func(base + r, base + c) >= 0)
 86				break;
 87			swap_func(base + r, base + c, size);
 88		}
 89	}
 90
 91	/* sort */
 92	for (i = n - size; i > 0; i -= size) {
 93		swap_func(base, base + i, size);
 94		for (r = 0; r * 2 + size < i; r = c) {
 95			c = r * 2 + size;
 96			if (c < i - size &&
 97					cmp_func(base + c, base + c + size) < 0)
 98				c += size;
 99			if (cmp_func(base + r, base + c) >= 0)
100				break;
101			swap_func(base + r, base + c, size);
102		}
103	}
104}
105
106EXPORT_SYMBOL(sort);