Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/bug.h>
4#include <linux/compiler.h>
5#include <linux/export.h>
6#include <linux/string.h>
7#include <linux/list_sort.h>
8#include <linux/list.h>
9
10/*
11 * Returns a list organized in an intermediate format suited
12 * to chaining of merge() calls: null-terminated, no reserved or
13 * sentinel head node, "prev" links not maintained.
14 */
15__attribute__((nonnull(2,3,4)))
16static struct list_head *merge(void *priv, list_cmp_func_t cmp,
17 struct list_head *a, struct list_head *b)
18{
19 struct list_head *head, **tail = &head;
20
21 for (;;) {
22 /* if equal, take 'a' -- important for sort stability */
23 if (cmp(priv, a, b) <= 0) {
24 *tail = a;
25 tail = &a->next;
26 a = a->next;
27 if (!a) {
28 *tail = b;
29 break;
30 }
31 } else {
32 *tail = b;
33 tail = &b->next;
34 b = b->next;
35 if (!b) {
36 *tail = a;
37 break;
38 }
39 }
40 }
41 return head;
42}
43
44/*
45 * Combine final list merge with restoration of standard doubly-linked
46 * list structure. This approach duplicates code from merge(), but
47 * runs faster than the tidier alternatives of either a separate final
48 * prev-link restoration pass, or maintaining the prev links
49 * throughout.
50 */
51__attribute__((nonnull(2,3,4,5)))
52static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
53 struct list_head *a, struct list_head *b)
54{
55 struct list_head *tail = head;
56 u8 count = 0;
57
58 for (;;) {
59 /* if equal, take 'a' -- important for sort stability */
60 if (cmp(priv, a, b) <= 0) {
61 tail->next = a;
62 a->prev = tail;
63 tail = a;
64 a = a->next;
65 if (!a)
66 break;
67 } else {
68 tail->next = b;
69 b->prev = tail;
70 tail = b;
71 b = b->next;
72 if (!b) {
73 b = a;
74 break;
75 }
76 }
77 }
78
79 /* Finish linking remainder of list b on to tail */
80 tail->next = b;
81 do {
82 /*
83 * If the merge is highly unbalanced (e.g. the input is
84 * already sorted), this loop may run many iterations.
85 * Continue callbacks to the client even though no
86 * element comparison is needed, so the client's cmp()
87 * routine can invoke cond_resched() periodically.
88 */
89 if (unlikely(!++count))
90 cmp(priv, b, b);
91 b->prev = tail;
92 tail = b;
93 b = b->next;
94 } while (b);
95
96 /* And the final links to make a circular doubly-linked list */
97 tail->next = head;
98 head->prev = tail;
99}
100
101/**
102 * list_sort - sort a list
103 * @priv: private data, opaque to list_sort(), passed to @cmp
104 * @head: the list to sort
105 * @cmp: the elements comparison function
106 *
107 * The comparison function @cmp must return > 0 if @a should sort after
108 * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
109 * sort before @b *or* their original order should be preserved. It is
110 * always called with the element that came first in the input in @a,
111 * and list_sort is a stable sort, so it is not necessary to distinguish
112 * the @a < @b and @a == @b cases.
113 *
114 * This is compatible with two styles of @cmp function:
115 * - The traditional style which returns <0 / =0 / >0, or
116 * - Returning a boolean 0/1.
117 * The latter offers a chance to save a few cycles in the comparison
118 * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
119 *
120 * A good way to write a multi-word comparison is::
121 *
122 * if (a->high != b->high)
123 * return a->high > b->high;
124 * if (a->middle != b->middle)
125 * return a->middle > b->middle;
126 * return a->low > b->low;
127 *
128 *
129 * This mergesort is as eager as possible while always performing at least
130 * 2:1 balanced merges. Given two pending sublists of size 2^k, they are
131 * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
132 *
133 * Thus, it will avoid cache thrashing as long as 3*2^k elements can
134 * fit into the cache. Not quite as good as a fully-eager bottom-up
135 * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
136 * the common case that everything fits into L1.
137 *
138 *
139 * The merging is controlled by "count", the number of elements in the
140 * pending lists. This is beautifully simple code, but rather subtle.
141 *
142 * Each time we increment "count", we set one bit (bit k) and clear
143 * bits k-1 .. 0. Each time this happens (except the very first time
144 * for each bit, when count increments to 2^k), we merge two lists of
145 * size 2^k into one list of size 2^(k+1).
146 *
147 * This merge happens exactly when the count reaches an odd multiple of
148 * 2^k, which is when we have 2^k elements pending in smaller lists,
149 * so it's safe to merge away two lists of size 2^k.
150 *
151 * After this happens twice, we have created two lists of size 2^(k+1),
152 * which will be merged into a list of size 2^(k+2) before we create
153 * a third list of size 2^(k+1), so there are never more than two pending.
154 *
155 * The number of pending lists of size 2^k is determined by the
156 * state of bit k of "count" plus two extra pieces of information:
157 *
158 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
159 * - Whether the higher-order bits are zero or non-zero (i.e.
160 * is count >= 2^(k+1)).
161 *
162 * There are six states we distinguish. "x" represents some arbitrary
163 * bits, and "y" represents some arbitrary non-zero bits:
164 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
165 * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
166 * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
167 * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
168 * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
169 * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
170 * (merge and loop back to state 2)
171 *
172 * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
173 * bit k-1 is set while the more significant bits are non-zero) and
174 * merge them away in the 5->2 transition. Note in particular that just
175 * before the 5->2 transition, all lower-order bits are 11 (state 3),
176 * so there is one list of each smaller size.
177 *
178 * When we reach the end of the input, we merge all the pending
179 * lists, from smallest to largest. If you work through cases 2 to
180 * 5 above, you can see that the number of elements we merge with a list
181 * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
182 * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
183 */
184__attribute__((nonnull(2,3)))
185void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
186{
187 struct list_head *list = head->next, *pending = NULL;
188 size_t count = 0; /* Count of pending */
189
190 if (list == head->prev) /* Zero or one elements */
191 return;
192
193 /* Convert to a null-terminated singly-linked list. */
194 head->prev->next = NULL;
195
196 /*
197 * Data structure invariants:
198 * - All lists are singly linked and null-terminated; prev
199 * pointers are not maintained.
200 * - pending is a prev-linked "list of lists" of sorted
201 * sublists awaiting further merging.
202 * - Each of the sorted sublists is power-of-two in size.
203 * - Sublists are sorted by size and age, smallest & newest at front.
204 * - There are zero to two sublists of each size.
205 * - A pair of pending sublists are merged as soon as the number
206 * of following pending elements equals their size (i.e.
207 * each time count reaches an odd multiple of that size).
208 * That ensures each later final merge will be at worst 2:1.
209 * - Each round consists of:
210 * - Merging the two sublists selected by the highest bit
211 * which flips when count is incremented, and
212 * - Adding an element from the input as a size-1 sublist.
213 */
214 do {
215 size_t bits;
216 struct list_head **tail = &pending;
217
218 /* Find the least-significant clear bit in count */
219 for (bits = count; bits & 1; bits >>= 1)
220 tail = &(*tail)->prev;
221 /* Do the indicated merge */
222 if (likely(bits)) {
223 struct list_head *a = *tail, *b = a->prev;
224
225 a = merge(priv, cmp, b, a);
226 /* Install the merged result in place of the inputs */
227 a->prev = b->prev;
228 *tail = a;
229 }
230
231 /* Move one element from input list to pending */
232 list->prev = pending;
233 pending = list;
234 list = list->next;
235 pending->next = NULL;
236 count++;
237 } while (list);
238
239 /* End of input; merge together all the pending lists. */
240 list = pending;
241 pending = pending->prev;
242 for (;;) {
243 struct list_head *next = pending->prev;
244
245 if (!next)
246 break;
247 list = merge(priv, cmp, pending, list);
248 pending = next;
249 }
250 /* The final merge, rebuilding prev links */
251 merge_final(priv, cmp, head, pending, list);
252}
253EXPORT_SYMBOL(list_sort);
1
2#define pr_fmt(fmt) "list_sort_test: " fmt
3
4#include <linux/kernel.h>
5#include <linux/bug.h>
6#include <linux/compiler.h>
7#include <linux/export.h>
8#include <linux/string.h>
9#include <linux/list_sort.h>
10#include <linux/list.h>
11
12#define MAX_LIST_LENGTH_BITS 20
13
14/*
15 * Returns a list organized in an intermediate format suited
16 * to chaining of merge() calls: null-terminated, no reserved or
17 * sentinel head node, "prev" links not maintained.
18 */
19static struct list_head *merge(void *priv,
20 int (*cmp)(void *priv, struct list_head *a,
21 struct list_head *b),
22 struct list_head *a, struct list_head *b)
23{
24 struct list_head head, *tail = &head;
25
26 while (a && b) {
27 /* if equal, take 'a' -- important for sort stability */
28 if ((*cmp)(priv, a, b) <= 0) {
29 tail->next = a;
30 a = a->next;
31 } else {
32 tail->next = b;
33 b = b->next;
34 }
35 tail = tail->next;
36 }
37 tail->next = a?:b;
38 return head.next;
39}
40
41/*
42 * Combine final list merge with restoration of standard doubly-linked
43 * list structure. This approach duplicates code from merge(), but
44 * runs faster than the tidier alternatives of either a separate final
45 * prev-link restoration pass, or maintaining the prev links
46 * throughout.
47 */
48static void merge_and_restore_back_links(void *priv,
49 int (*cmp)(void *priv, struct list_head *a,
50 struct list_head *b),
51 struct list_head *head,
52 struct list_head *a, struct list_head *b)
53{
54 struct list_head *tail = head;
55 u8 count = 0;
56
57 while (a && b) {
58 /* if equal, take 'a' -- important for sort stability */
59 if ((*cmp)(priv, a, b) <= 0) {
60 tail->next = a;
61 a->prev = tail;
62 a = a->next;
63 } else {
64 tail->next = b;
65 b->prev = tail;
66 b = b->next;
67 }
68 tail = tail->next;
69 }
70 tail->next = a ? : b;
71
72 do {
73 /*
74 * In worst cases this loop may run many iterations.
75 * Continue callbacks to the client even though no
76 * element comparison is needed, so the client's cmp()
77 * routine can invoke cond_resched() periodically.
78 */
79 if (unlikely(!(++count)))
80 (*cmp)(priv, tail->next, tail->next);
81
82 tail->next->prev = tail;
83 tail = tail->next;
84 } while (tail->next);
85
86 tail->next = head;
87 head->prev = tail;
88}
89
90/**
91 * list_sort - sort a list
92 * @priv: private data, opaque to list_sort(), passed to @cmp
93 * @head: the list to sort
94 * @cmp: the elements comparison function
95 *
96 * This function implements "merge sort", which has O(nlog(n))
97 * complexity.
98 *
99 * The comparison function @cmp must return a negative value if @a
100 * should sort before @b, and a positive value if @a should sort after
101 * @b. If @a and @b are equivalent, and their original relative
102 * ordering is to be preserved, @cmp must return 0.
103 */
104void list_sort(void *priv, struct list_head *head,
105 int (*cmp)(void *priv, struct list_head *a,
106 struct list_head *b))
107{
108 struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
109 -- last slot is a sentinel */
110 int lev; /* index into part[] */
111 int max_lev = 0;
112 struct list_head *list;
113
114 if (list_empty(head))
115 return;
116
117 memset(part, 0, sizeof(part));
118
119 head->prev->next = NULL;
120 list = head->next;
121
122 while (list) {
123 struct list_head *cur = list;
124 list = list->next;
125 cur->next = NULL;
126
127 for (lev = 0; part[lev]; lev++) {
128 cur = merge(priv, cmp, part[lev], cur);
129 part[lev] = NULL;
130 }
131 if (lev > max_lev) {
132 if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
133 printk_once(KERN_DEBUG "list too long for efficiency\n");
134 lev--;
135 }
136 max_lev = lev;
137 }
138 part[lev] = cur;
139 }
140
141 for (lev = 0; lev < max_lev; lev++)
142 if (part[lev])
143 list = merge(priv, cmp, part[lev], list);
144
145 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
146}
147EXPORT_SYMBOL(list_sort);
148
149#ifdef CONFIG_TEST_LIST_SORT
150
151#include <linux/slab.h>
152#include <linux/random.h>
153
154/*
155 * The pattern of set bits in the list length determines which cases
156 * are hit in list_sort().
157 */
158#define TEST_LIST_LEN (512+128+2) /* not including head */
159
160#define TEST_POISON1 0xDEADBEEF
161#define TEST_POISON2 0xA324354C
162
163struct debug_el {
164 unsigned int poison1;
165 struct list_head list;
166 unsigned int poison2;
167 int value;
168 unsigned serial;
169};
170
171/* Array, containing pointers to all elements in the test list */
172static struct debug_el **elts __initdata;
173
174static int __init check(struct debug_el *ela, struct debug_el *elb)
175{
176 if (ela->serial >= TEST_LIST_LEN) {
177 pr_err("error: incorrect serial %d\n", ela->serial);
178 return -EINVAL;
179 }
180 if (elb->serial >= TEST_LIST_LEN) {
181 pr_err("error: incorrect serial %d\n", elb->serial);
182 return -EINVAL;
183 }
184 if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
185 pr_err("error: phantom element\n");
186 return -EINVAL;
187 }
188 if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
189 pr_err("error: bad poison: %#x/%#x\n",
190 ela->poison1, ela->poison2);
191 return -EINVAL;
192 }
193 if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
194 pr_err("error: bad poison: %#x/%#x\n",
195 elb->poison1, elb->poison2);
196 return -EINVAL;
197 }
198 return 0;
199}
200
201static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
202{
203 struct debug_el *ela, *elb;
204
205 ela = container_of(a, struct debug_el, list);
206 elb = container_of(b, struct debug_el, list);
207
208 check(ela, elb);
209 return ela->value - elb->value;
210}
211
212static int __init list_sort_test(void)
213{
214 int i, count = 1, err = -ENOMEM;
215 struct debug_el *el;
216 struct list_head *cur;
217 LIST_HEAD(head);
218
219 pr_debug("start testing list_sort()\n");
220
221 elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
222 if (!elts) {
223 pr_err("error: cannot allocate memory\n");
224 return err;
225 }
226
227 for (i = 0; i < TEST_LIST_LEN; i++) {
228 el = kmalloc(sizeof(*el), GFP_KERNEL);
229 if (!el) {
230 pr_err("error: cannot allocate memory\n");
231 goto exit;
232 }
233 /* force some equivalencies */
234 el->value = prandom_u32() % (TEST_LIST_LEN / 3);
235 el->serial = i;
236 el->poison1 = TEST_POISON1;
237 el->poison2 = TEST_POISON2;
238 elts[i] = el;
239 list_add_tail(&el->list, &head);
240 }
241
242 list_sort(NULL, &head, cmp);
243
244 err = -EINVAL;
245 for (cur = head.next; cur->next != &head; cur = cur->next) {
246 struct debug_el *el1;
247 int cmp_result;
248
249 if (cur->next->prev != cur) {
250 pr_err("error: list is corrupted\n");
251 goto exit;
252 }
253
254 cmp_result = cmp(NULL, cur, cur->next);
255 if (cmp_result > 0) {
256 pr_err("error: list is not sorted\n");
257 goto exit;
258 }
259
260 el = container_of(cur, struct debug_el, list);
261 el1 = container_of(cur->next, struct debug_el, list);
262 if (cmp_result == 0 && el->serial >= el1->serial) {
263 pr_err("error: order of equivalent elements not "
264 "preserved\n");
265 goto exit;
266 }
267
268 if (check(el, el1)) {
269 pr_err("error: element check failed\n");
270 goto exit;
271 }
272 count++;
273 }
274 if (head.prev != cur) {
275 pr_err("error: list is corrupted\n");
276 goto exit;
277 }
278
279
280 if (count != TEST_LIST_LEN) {
281 pr_err("error: bad list length %d", count);
282 goto exit;
283 }
284
285 err = 0;
286exit:
287 for (i = 0; i < TEST_LIST_LEN; i++)
288 kfree(elts[i]);
289 kfree(elts);
290 return err;
291}
292late_initcall(list_sort_test);
293#endif /* CONFIG_TEST_LIST_SORT */