Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/kernel.h>
  2#include <linux/module.h>
 
 
 
  3#include <linux/list_sort.h>
  4#include <linux/slab.h>
  5#include <linux/list.h>
  6
  7#define MAX_LIST_LENGTH_BITS 20
 
  8
  9/*
 10 * Returns a list organized in an intermediate format suited
 11 * to chaining of merge() calls: null-terminated, no reserved or
 12 * sentinel head node, "prev" links not maintained.
 13 */
 14static struct list_head *merge(void *priv,
 15				int (*cmp)(void *priv, struct list_head *a,
 16					struct list_head *b),
 17				struct list_head *a, struct list_head *b)
 18{
 19	struct list_head head, *tail = &head;
 20
 21	while (a && b) {
 22		/* if equal, take 'a' -- important for sort stability */
 23		if ((*cmp)(priv, a, b) <= 0) {
 24			tail->next = a;
 
 25			a = a->next;
 
 
 
 
 26		} else {
 27			tail->next = b;
 
 28			b = b->next;
 
 
 
 
 29		}
 30		tail = tail->next;
 31	}
 32	tail->next = a?:b;
 33	return head.next;
 34}
 35
 36/*
 37 * Combine final list merge with restoration of standard doubly-linked
 38 * list structure.  This approach duplicates code from merge(), but
 39 * runs faster than the tidier alternatives of either a separate final
 40 * prev-link restoration pass, or maintaining the prev links
 41 * throughout.
 42 */
 43static void merge_and_restore_back_links(void *priv,
 44				int (*cmp)(void *priv, struct list_head *a,
 45					struct list_head *b),
 46				struct list_head *head,
 47				struct list_head *a, struct list_head *b)
 48{
 49	struct list_head *tail = head;
 
 50
 51	while (a && b) {
 52		/* if equal, take 'a' -- important for sort stability */
 53		if ((*cmp)(priv, a, b) <= 0) {
 54			tail->next = a;
 55			a->prev = tail;
 
 56			a = a->next;
 
 
 57		} else {
 58			tail->next = b;
 59			b->prev = tail;
 
 60			b = b->next;
 
 
 
 
 61		}
 62		tail = tail->next;
 63	}
 64	tail->next = a ? : b;
 65
 
 
 66	do {
 67		/*
 68		 * In worst cases this loop may run many iterations.
 
 69		 * Continue callbacks to the client even though no
 70		 * element comparison is needed, so the client's cmp()
 71		 * routine can invoke cond_resched() periodically.
 72		 */
 73		(*cmp)(priv, tail->next, tail->next);
 74
 75		tail->next->prev = tail;
 76		tail = tail->next;
 77	} while (tail->next);
 
 78
 
 79	tail->next = head;
 80	head->prev = tail;
 81}
 82
 83/**
 84 * list_sort - sort a list
 85 * @priv: private data, opaque to list_sort(), passed to @cmp
 86 * @head: the list to sort
 87 * @cmp: the elements comparison function
 88 *
 89 * This function implements "merge sort", which has O(nlog(n))
 90 * complexity.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91 *
 92 * The comparison function @cmp must return a negative value if @a
 93 * should sort before @b, and a positive value if @a should sort after
 94 * @b. If @a and @b are equivalent, and their original relative
 95 * ordering is to be preserved, @cmp must return 0.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96 */
 
 97void list_sort(void *priv, struct list_head *head,
 98		int (*cmp)(void *priv, struct list_head *a,
 99			struct list_head *b))
100{
101	struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
102						-- last slot is a sentinel */
103	int lev;  /* index into part[] */
104	int max_lev = 0;
105	struct list_head *list;
106
107	if (list_empty(head))
108		return;
109
110	memset(part, 0, sizeof(part));
111
112	head->prev->next = NULL;
113	list = head->next;
114
115	while (list) {
116		struct list_head *cur = list;
117		list = list->next;
118		cur->next = NULL;
119
120		for (lev = 0; part[lev]; lev++) {
121			cur = merge(priv, cmp, part[lev], cur);
122			part[lev] = NULL;
123		}
124		if (lev > max_lev) {
125			if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
126				printk_once(KERN_DEBUG "list passed to"
127					" list_sort() too long for"
128					" efficiency\n");
129				lev--;
130			}
131			max_lev = lev;
132		}
133		part[lev] = cur;
134	}
135
136	for (lev = 0; lev < max_lev; lev++)
137		if (part[lev])
138			list = merge(priv, cmp, part[lev], list);
139
140	merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
141}
142EXPORT_SYMBOL(list_sort);
143
144#ifdef CONFIG_TEST_LIST_SORT
145
146#include <linux/random.h>
147
148/*
149 * The pattern of set bits in the list length determines which cases
150 * are hit in list_sort().
151 */
152#define TEST_LIST_LEN (512+128+2) /* not including head */
153
154#define TEST_POISON1 0xDEADBEEF
155#define TEST_POISON2 0xA324354C
156
157struct debug_el {
158	unsigned int poison1;
159	struct list_head list;
160	unsigned int poison2;
161	int value;
162	unsigned serial;
163};
164
165/* Array, containing pointers to all elements in the test list */
166static struct debug_el **elts __initdata;
167
168static int __init check(struct debug_el *ela, struct debug_el *elb)
169{
170	if (ela->serial >= TEST_LIST_LEN) {
171		printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
172				ela->serial);
173		return -EINVAL;
174	}
175	if (elb->serial >= TEST_LIST_LEN) {
176		printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
177				elb->serial);
178		return -EINVAL;
179	}
180	if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
181		printk(KERN_ERR "list_sort_test: error: phantom element\n");
182		return -EINVAL;
183	}
184	if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
185		printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
186				ela->poison1, ela->poison2);
187		return -EINVAL;
188	}
189	if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
190		printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
191				elb->poison1, elb->poison2);
192		return -EINVAL;
193	}
194	return 0;
195}
196
197static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
198{
199	struct debug_el *ela, *elb;
200
201	ela = container_of(a, struct debug_el, list);
202	elb = container_of(b, struct debug_el, list);
203
204	check(ela, elb);
205	return ela->value - elb->value;
206}
207
208static int __init list_sort_test(void)
209{
210	int i, count = 1, err = -EINVAL;
211	struct debug_el *el;
212	struct list_head *cur, *tmp;
213	LIST_HEAD(head);
214
215	printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
216
217	elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
218	if (!elts) {
219		printk(KERN_ERR "list_sort_test: error: cannot allocate "
220				"memory\n");
221		goto exit;
222	}
223
224	for (i = 0; i < TEST_LIST_LEN; i++) {
225		el = kmalloc(sizeof(*el), GFP_KERNEL);
226		if (!el) {
227			printk(KERN_ERR "list_sort_test: error: cannot "
228					"allocate memory\n");
229			goto exit;
230		}
231		 /* force some equivalencies */
232		el->value = prandom_u32() % (TEST_LIST_LEN / 3);
233		el->serial = i;
234		el->poison1 = TEST_POISON1;
235		el->poison2 = TEST_POISON2;
236		elts[i] = el;
237		list_add_tail(&el->list, &head);
238	}
239
240	list_sort(NULL, &head, cmp);
241
242	for (cur = head.next; cur->next != &head; cur = cur->next) {
243		struct debug_el *el1;
244		int cmp_result;
245
246		if (cur->next->prev != cur) {
247			printk(KERN_ERR "list_sort_test: error: list is "
248					"corrupted\n");
249			goto exit;
250		}
251
252		cmp_result = cmp(NULL, cur, cur->next);
253		if (cmp_result > 0) {
254			printk(KERN_ERR "list_sort_test: error: list is not "
255					"sorted\n");
256			goto exit;
257		}
258
259		el = container_of(cur, struct debug_el, list);
260		el1 = container_of(cur->next, struct debug_el, list);
261		if (cmp_result == 0 && el->serial >= el1->serial) {
262			printk(KERN_ERR "list_sort_test: error: order of "
263					"equivalent elements not preserved\n");
264			goto exit;
 
 
 
 
 
265		}
266
267		if (check(el, el1)) {
268			printk(KERN_ERR "list_sort_test: error: element check "
269					"failed\n");
270			goto exit;
271		}
272		count++;
273	}
274
275	if (count != TEST_LIST_LEN) {
276		printk(KERN_ERR "list_sort_test: error: bad list length %d",
277				count);
278		goto exit;
279	}
280
281	err = 0;
282exit:
283	kfree(elts);
284	list_for_each_safe(cur, tmp, &head) {
285		list_del(cur);
286		kfree(container_of(cur, struct debug_el, list));
 
 
 
 
287	}
288	return err;
 
289}
290module_init(list_sort_test);
291#endif /* CONFIG_TEST_LIST_SORT */
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/bug.h>
  4#include <linux/compiler.h>
  5#include <linux/export.h>
  6#include <linux/string.h>
  7#include <linux/list_sort.h>
 
  8#include <linux/list.h>
  9
 10typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
 11		struct list_head const *, struct list_head const *);
 12
 13/*
 14 * Returns a list organized in an intermediate format suited
 15 * to chaining of merge() calls: null-terminated, no reserved or
 16 * sentinel head node, "prev" links not maintained.
 17 */
 18__attribute__((nonnull(2,3,4)))
 19static struct list_head *merge(void *priv, cmp_func cmp,
 
 20				struct list_head *a, struct list_head *b)
 21{
 22	struct list_head *head, **tail = &head;
 23
 24	for (;;) {
 25		/* if equal, take 'a' -- important for sort stability */
 26		if (cmp(priv, a, b) <= 0) {
 27			*tail = a;
 28			tail = &a->next;
 29			a = a->next;
 30			if (!a) {
 31				*tail = b;
 32				break;
 33			}
 34		} else {
 35			*tail = b;
 36			tail = &b->next;
 37			b = b->next;
 38			if (!b) {
 39				*tail = a;
 40				break;
 41			}
 42		}
 
 43	}
 44	return head;
 
 45}
 46
 47/*
 48 * Combine final list merge with restoration of standard doubly-linked
 49 * list structure.  This approach duplicates code from merge(), but
 50 * runs faster than the tidier alternatives of either a separate final
 51 * prev-link restoration pass, or maintaining the prev links
 52 * throughout.
 53 */
 54__attribute__((nonnull(2,3,4,5)))
 55static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
 56			struct list_head *a, struct list_head *b)
 
 
 57{
 58	struct list_head *tail = head;
 59	u8 count = 0;
 60
 61	for (;;) {
 62		/* if equal, take 'a' -- important for sort stability */
 63		if (cmp(priv, a, b) <= 0) {
 64			tail->next = a;
 65			a->prev = tail;
 66			tail = a;
 67			a = a->next;
 68			if (!a)
 69				break;
 70		} else {
 71			tail->next = b;
 72			b->prev = tail;
 73			tail = b;
 74			b = b->next;
 75			if (!b) {
 76				b = a;
 77				break;
 78			}
 79		}
 
 80	}
 
 81
 82	/* Finish linking remainder of list b on to tail */
 83	tail->next = b;
 84	do {
 85		/*
 86		 * If the merge is highly unbalanced (e.g. the input is
 87		 * already sorted), this loop may run many iterations.
 88		 * Continue callbacks to the client even though no
 89		 * element comparison is needed, so the client's cmp()
 90		 * routine can invoke cond_resched() periodically.
 91		 */
 92		if (unlikely(!++count))
 93			cmp(priv, b, b);
 94		b->prev = tail;
 95		tail = b;
 96		b = b->next;
 97	} while (b);
 98
 99	/* And the final links to make a circular doubly-linked list */
100	tail->next = head;
101	head->prev = tail;
102}
103
104/**
105 * list_sort - sort a list
106 * @priv: private data, opaque to list_sort(), passed to @cmp
107 * @head: the list to sort
108 * @cmp: the elements comparison function
109 *
110 * The comparison funtion @cmp must return > 0 if @a should sort after
111 * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
112 * sort before @b *or* their original order should be preserved.  It is
113 * always called with the element that came first in the input in @a,
114 * and list_sort is a stable sort, so it is not necessary to distinguish
115 * the @a < @b and @a == @b cases.
116 *
117 * This is compatible with two styles of @cmp function:
118 * - The traditional style which returns <0 / =0 / >0, or
119 * - Returning a boolean 0/1.
120 * The latter offers a chance to save a few cycles in the comparison
121 * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
122 *
123 * A good way to write a multi-word comparison is::
124 *
125 *	if (a->high != b->high)
126 *		return a->high > b->high;
127 *	if (a->middle != b->middle)
128 *		return a->middle > b->middle;
129 *	return a->low > b->low;
130 *
131 *
132 * This mergesort is as eager as possible while always performing at least
133 * 2:1 balanced merges.  Given two pending sublists of size 2^k, they are
134 * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
135 *
136 * Thus, it will avoid cache thrashing as long as 3*2^k elements can
137 * fit into the cache.  Not quite as good as a fully-eager bottom-up
138 * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
139 * the common case that everything fits into L1.
140 *
141 *
142 * The merging is controlled by "count", the number of elements in the
143 * pending lists.  This is beautiully simple code, but rather subtle.
144 *
145 * Each time we increment "count", we set one bit (bit k) and clear
146 * bits k-1 .. 0.  Each time this happens (except the very first time
147 * for each bit, when count increments to 2^k), we merge two lists of
148 * size 2^k into one list of size 2^(k+1).
149 *
150 * This merge happens exactly when the count reaches an odd multiple of
151 * 2^k, which is when we have 2^k elements pending in smaller lists,
152 * so it's safe to merge away two lists of size 2^k.
153 *
154 * After this happens twice, we have created two lists of size 2^(k+1),
155 * which will be merged into a list of size 2^(k+2) before we create
156 * a third list of size 2^(k+1), so there are never more than two pending.
157 *
158 * The number of pending lists of size 2^k is determined by the
159 * state of bit k of "count" plus two extra pieces of information:
160 *
161 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
162 * - Whether the higher-order bits are zero or non-zero (i.e.
163 *   is count >= 2^(k+1)).
164 *
165 * There are six states we distinguish.  "x" represents some arbitrary
166 * bits, and "y" represents some arbitrary non-zero bits:
167 * 0:  00x: 0 pending of size 2^k;           x pending of sizes < 2^k
168 * 1:  01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
169 * 2: x10x: 0 pending of size 2^k; 2^k     + x pending of sizes < 2^k
170 * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
171 * 4: y00x: 1 pending of size 2^k; 2^k     + x pending of sizes < 2^k
172 * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
173 * (merge and loop back to state 2)
174 *
175 * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
176 * bit k-1 is set while the more significant bits are non-zero) and
177 * merge them away in the 5->2 transition.  Note in particular that just
178 * before the 5->2 transition, all lower-order bits are 11 (state 3),
179 * so there is one list of each smaller size.
180 *
181 * When we reach the end of the input, we merge all the pending
182 * lists, from smallest to largest.  If you work through cases 2 to
183 * 5 above, you can see that the number of elements we merge with a list
184 * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
185 * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
186 */
187__attribute__((nonnull(2,3)))
188void list_sort(void *priv, struct list_head *head,
189		int (*cmp)(void *priv, struct list_head *a,
190			struct list_head *b))
191{
192	struct list_head *list = head->next, *pending = NULL;
193	size_t count = 0;	/* Count of pending */
 
 
 
194
195	if (list == head->prev)	/* Zero or one elements */
196		return;
197
198	/* Convert to a null-terminated singly-linked list. */
 
199	head->prev->next = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
201	/*
202	 * Data structure invariants:
203	 * - All lists are singly linked and null-terminated; prev
204	 *   pointers are not maintained.
205	 * - pending is a prev-linked "list of lists" of sorted
206	 *   sublists awaiting further merging.
207	 * - Each of the sorted sublists is power-of-two in size.
208	 * - Sublists are sorted by size and age, smallest & newest at front.
209	 * - There are zero to two sublists of each size.
210	 * - A pair of pending sublists are merged as soon as the number
211	 *   of following pending elements equals their size (i.e.
212	 *   each time count reaches an odd multiple of that size).
213	 *   That ensures each later final merge will be at worst 2:1.
214	 * - Each round consists of:
215	 *   - Merging the two sublists selected by the highest bit
216	 *     which flips when count is incremented, and
217	 *   - Adding an element from the input as a size-1 sublist.
218	 */
219	do {
220		size_t bits;
221		struct list_head **tail = &pending;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
223		/* Find the least-significant clear bit in count */
224		for (bits = count; bits & 1; bits >>= 1)
225			tail = &(*tail)->prev;
226		/* Do the indicated merge */
227		if (likely(bits)) {
228			struct list_head *a = *tail, *b = a->prev;
229
230			a = merge(priv, (cmp_func)cmp, b, a);
231			/* Install the merged result in place of the inputs */
232			a->prev = b->prev;
233			*tail = a;
234		}
235
236		/* Move one element from input list to pending */
237		list->prev = pending;
238		pending = list;
239		list = list->next;
240		pending->next = NULL;
241		count++;
242	} while (list);
 
 
 
 
 
 
243
244	/* End of input; merge together all the pending lists. */
245	list = pending;
246	pending = pending->prev;
247	for (;;) {
248		struct list_head *next = pending->prev;
249
250		if (!next)
251			break;
252		list = merge(priv, (cmp_func)cmp, pending, list);
253		pending = next;
254	}
255	/* The final merge, rebuilding prev links */
256	merge_final(priv, (cmp_func)cmp, head, pending, list);
257}
258EXPORT_SYMBOL(list_sort);