Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * multiorder.c: Multi-order radix tree entry testing
4 * Copyright (c) 2016 Intel Corporation
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
7 */
8#include <linux/radix-tree.h>
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <pthread.h>
12
13#include "test.h"
14
15static int item_insert_order(struct xarray *xa, unsigned long index,
16 unsigned order)
17{
18 XA_STATE_ORDER(xas, xa, index, order);
19 struct item *item = item_create(index, order);
20
21 do {
22 xas_lock(&xas);
23 xas_store(&xas, item);
24 xas_unlock(&xas);
25 } while (xas_nomem(&xas, GFP_KERNEL));
26
27 if (!xas_error(&xas))
28 return 0;
29
30 free(item);
31 return xas_error(&xas);
32}
33
34void multiorder_iteration(struct xarray *xa)
35{
36 XA_STATE(xas, xa, 0);
37 struct item *item;
38 int i, j, err;
39
40#define NUM_ENTRIES 11
41 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
43
44 printv(1, "Multiorder iteration test\n");
45
46 for (i = 0; i < NUM_ENTRIES; i++) {
47 err = item_insert_order(xa, index[i], order[i]);
48 assert(!err);
49 }
50
51 for (j = 0; j < 256; j++) {
52 for (i = 0; i < NUM_ENTRIES; i++)
53 if (j <= (index[i] | ((1 << order[i]) - 1)))
54 break;
55
56 xas_set(&xas, j);
57 xas_for_each(&xas, item, ULONG_MAX) {
58 int height = order[i] / XA_CHUNK_SHIFT;
59 int shift = height * XA_CHUNK_SHIFT;
60 unsigned long mask = (1UL << order[i]) - 1;
61
62 assert((xas.xa_index | mask) == (index[i] | mask));
63 assert(xas.xa_node->shift == shift);
64 assert(!radix_tree_is_internal_node(item));
65 assert((item->index | mask) == (index[i] | mask));
66 assert(item->order == order[i]);
67 i++;
68 }
69 }
70
71 item_kill_tree(xa);
72}
73
74void multiorder_tagged_iteration(struct xarray *xa)
75{
76 XA_STATE(xas, xa, 0);
77 struct item *item;
78 int i, j;
79
80#define MT_NUM_ENTRIES 9
81 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
82 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
83
84#define TAG_ENTRIES 7
85 int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
86
87 printv(1, "Multiorder tagged iteration test\n");
88
89 for (i = 0; i < MT_NUM_ENTRIES; i++)
90 assert(!item_insert_order(xa, index[i], order[i]));
91
92 assert(!xa_marked(xa, XA_MARK_1));
93
94 for (i = 0; i < TAG_ENTRIES; i++)
95 xa_set_mark(xa, tag_index[i], XA_MARK_1);
96
97 for (j = 0; j < 256; j++) {
98 int k;
99
100 for (i = 0; i < TAG_ENTRIES; i++) {
101 for (k = i; index[k] < tag_index[i]; k++)
102 ;
103 if (j <= (index[k] | ((1 << order[k]) - 1)))
104 break;
105 }
106
107 xas_set(&xas, j);
108 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) {
109 unsigned long mask;
110 for (k = i; index[k] < tag_index[i]; k++)
111 ;
112 mask = (1UL << order[k]) - 1;
113
114 assert((xas.xa_index | mask) == (tag_index[i] | mask));
115 assert(!xa_is_internal(item));
116 assert((item->index | mask) == (tag_index[i] | mask));
117 assert(item->order == order[k]);
118 i++;
119 }
120 }
121
122 assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1,
123 XA_MARK_2) == TAG_ENTRIES);
124
125 for (j = 0; j < 256; j++) {
126 int mask, k;
127
128 for (i = 0; i < TAG_ENTRIES; i++) {
129 for (k = i; index[k] < tag_index[i]; k++)
130 ;
131 if (j <= (index[k] | ((1 << order[k]) - 1)))
132 break;
133 }
134
135 xas_set(&xas, j);
136 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) {
137 for (k = i; index[k] < tag_index[i]; k++)
138 ;
139 mask = (1 << order[k]) - 1;
140
141 assert((xas.xa_index | mask) == (tag_index[i] | mask));
142 assert(!xa_is_internal(item));
143 assert((item->index | mask) == (tag_index[i] | mask));
144 assert(item->order == order[k]);
145 i++;
146 }
147 }
148
149 assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1,
150 XA_MARK_0) == TAG_ENTRIES);
151 i = 0;
152 xas_set(&xas, 0);
153 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) {
154 assert(xas.xa_index == tag_index[i]);
155 i++;
156 }
157 assert(i == TAG_ENTRIES);
158
159 item_kill_tree(xa);
160}
161
162bool stop_iteration = false;
163
164static void *creator_func(void *ptr)
165{
166 /* 'order' is set up to ensure we have sibling entries */
167 unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
168 struct radix_tree_root *tree = ptr;
169 int i;
170
171 for (i = 0; i < 10000; i++) {
172 item_insert_order(tree, 0, order);
173 item_delete_rcu(tree, 0);
174 }
175
176 stop_iteration = true;
177 return NULL;
178}
179
180static void *iterator_func(void *ptr)
181{
182 XA_STATE(xas, ptr, 0);
183 struct item *item;
184
185 while (!stop_iteration) {
186 rcu_read_lock();
187 xas_for_each(&xas, item, ULONG_MAX) {
188 if (xas_retry(&xas, item))
189 continue;
190
191 item_sanity(item, xas.xa_index);
192 }
193 rcu_read_unlock();
194 }
195 return NULL;
196}
197
198static void multiorder_iteration_race(struct xarray *xa)
199{
200 const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
201 pthread_t worker_thread[num_threads];
202 int i;
203
204 pthread_create(&worker_thread[0], NULL, &creator_func, xa);
205 for (i = 1; i < num_threads; i++)
206 pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
207
208 for (i = 0; i < num_threads; i++)
209 pthread_join(worker_thread[i], NULL);
210
211 item_kill_tree(xa);
212}
213
214static DEFINE_XARRAY(array);
215
216void multiorder_checks(void)
217{
218 multiorder_iteration(&array);
219 multiorder_tagged_iteration(&array);
220 multiorder_iteration_race(&array);
221
222 radix_tree_cpu_dead(0);
223}
224
225int __weak main(void)
226{
227 rcu_register_thread();
228 radix_tree_init();
229 multiorder_checks();
230 rcu_unregister_thread();
231 return 0;
232}
1/*
2 * multiorder.c: Multi-order radix tree entry testing
3 * Copyright (c) 2016 Intel Corporation
4 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16#include <linux/radix-tree.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <pthread.h>
20
21#include "test.h"
22
23#define for_each_index(i, base, order) \
24 for (i = base; i < base + (1 << order); i++)
25
26static void __multiorder_tag_test(int index, int order)
27{
28 RADIX_TREE(tree, GFP_KERNEL);
29 int base, err, i;
30
31 /* our canonical entry */
32 base = index & ~((1 << order) - 1);
33
34 printv(2, "Multiorder tag test with index %d, canonical entry %d\n",
35 index, base);
36
37 err = item_insert_order(&tree, index, order);
38 assert(!err);
39
40 /*
41 * Verify we get collisions for covered indices. We try and fail to
42 * insert an exceptional entry so we don't leak memory via
43 * item_insert_order().
44 */
45 for_each_index(i, base, order) {
46 err = __radix_tree_insert(&tree, i, order,
47 (void *)(0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY));
48 assert(err == -EEXIST);
49 }
50
51 for_each_index(i, base, order) {
52 assert(!radix_tree_tag_get(&tree, i, 0));
53 assert(!radix_tree_tag_get(&tree, i, 1));
54 }
55
56 assert(radix_tree_tag_set(&tree, index, 0));
57
58 for_each_index(i, base, order) {
59 assert(radix_tree_tag_get(&tree, i, 0));
60 assert(!radix_tree_tag_get(&tree, i, 1));
61 }
62
63 assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1);
64 assert(radix_tree_tag_clear(&tree, index, 0));
65
66 for_each_index(i, base, order) {
67 assert(!radix_tree_tag_get(&tree, i, 0));
68 assert(radix_tree_tag_get(&tree, i, 1));
69 }
70
71 assert(radix_tree_tag_clear(&tree, index, 1));
72
73 assert(!radix_tree_tagged(&tree, 0));
74 assert(!radix_tree_tagged(&tree, 1));
75
76 item_kill_tree(&tree);
77}
78
79static void __multiorder_tag_test2(unsigned order, unsigned long index2)
80{
81 RADIX_TREE(tree, GFP_KERNEL);
82 unsigned long index = (1 << order);
83 index2 += index;
84
85 assert(item_insert_order(&tree, 0, order) == 0);
86 assert(item_insert(&tree, index2) == 0);
87
88 assert(radix_tree_tag_set(&tree, 0, 0));
89 assert(radix_tree_tag_set(&tree, index2, 0));
90
91 assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2);
92
93 item_kill_tree(&tree);
94}
95
96static void multiorder_tag_tests(void)
97{
98 int i, j;
99
100 /* test multi-order entry for indices 0-7 with no sibling pointers */
101 __multiorder_tag_test(0, 3);
102 __multiorder_tag_test(5, 3);
103
104 /* test multi-order entry for indices 8-15 with no sibling pointers */
105 __multiorder_tag_test(8, 3);
106 __multiorder_tag_test(15, 3);
107
108 /*
109 * Our order 5 entry covers indices 0-31 in a tree with height=2.
110 * This is broken up as follows:
111 * 0-7: canonical entry
112 * 8-15: sibling 1
113 * 16-23: sibling 2
114 * 24-31: sibling 3
115 */
116 __multiorder_tag_test(0, 5);
117 __multiorder_tag_test(29, 5);
118
119 /* same test, but with indices 32-63 */
120 __multiorder_tag_test(32, 5);
121 __multiorder_tag_test(44, 5);
122
123 /*
124 * Our order 8 entry covers indices 0-255 in a tree with height=3.
125 * This is broken up as follows:
126 * 0-63: canonical entry
127 * 64-127: sibling 1
128 * 128-191: sibling 2
129 * 192-255: sibling 3
130 */
131 __multiorder_tag_test(0, 8);
132 __multiorder_tag_test(190, 8);
133
134 /* same test, but with indices 256-511 */
135 __multiorder_tag_test(256, 8);
136 __multiorder_tag_test(300, 8);
137
138 __multiorder_tag_test(0x12345678UL, 8);
139
140 for (i = 1; i < 10; i++)
141 for (j = 0; j < (10 << i); j++)
142 __multiorder_tag_test2(i, j);
143}
144
145static void multiorder_check(unsigned long index, int order)
146{
147 unsigned long i;
148 unsigned long min = index & ~((1UL << order) - 1);
149 unsigned long max = min + (1UL << order);
150 void **slot;
151 struct item *item2 = item_create(min, order);
152 RADIX_TREE(tree, GFP_KERNEL);
153
154 printv(2, "Multiorder index %ld, order %d\n", index, order);
155
156 assert(item_insert_order(&tree, index, order) == 0);
157
158 for (i = min; i < max; i++) {
159 struct item *item = item_lookup(&tree, i);
160 assert(item != 0);
161 assert(item->index == index);
162 }
163 for (i = 0; i < min; i++)
164 item_check_absent(&tree, i);
165 for (i = max; i < 2*max; i++)
166 item_check_absent(&tree, i);
167 for (i = min; i < max; i++)
168 assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
169
170 slot = radix_tree_lookup_slot(&tree, index);
171 free(*slot);
172 radix_tree_replace_slot(&tree, slot, item2);
173 for (i = min; i < max; i++) {
174 struct item *item = item_lookup(&tree, i);
175 assert(item != 0);
176 assert(item->index == min);
177 }
178
179 assert(item_delete(&tree, min) != 0);
180
181 for (i = 0; i < 2*max; i++)
182 item_check_absent(&tree, i);
183}
184
185static void multiorder_shrink(unsigned long index, int order)
186{
187 unsigned long i;
188 unsigned long max = 1 << order;
189 RADIX_TREE(tree, GFP_KERNEL);
190 struct radix_tree_node *node;
191
192 printv(2, "Multiorder shrink index %ld, order %d\n", index, order);
193
194 assert(item_insert_order(&tree, 0, order) == 0);
195
196 node = tree.rnode;
197
198 assert(item_insert(&tree, index) == 0);
199 assert(node != tree.rnode);
200
201 assert(item_delete(&tree, index) != 0);
202 assert(node == tree.rnode);
203
204 for (i = 0; i < max; i++) {
205 struct item *item = item_lookup(&tree, i);
206 assert(item != 0);
207 assert(item->index == 0);
208 }
209 for (i = max; i < 2*max; i++)
210 item_check_absent(&tree, i);
211
212 if (!item_delete(&tree, 0)) {
213 printv(2, "failed to delete index %ld (order %d)\n", index, order);
214 abort();
215 }
216
217 for (i = 0; i < 2*max; i++)
218 item_check_absent(&tree, i);
219}
220
221static void multiorder_insert_bug(void)
222{
223 RADIX_TREE(tree, GFP_KERNEL);
224
225 item_insert(&tree, 0);
226 radix_tree_tag_set(&tree, 0, 0);
227 item_insert_order(&tree, 3 << 6, 6);
228
229 item_kill_tree(&tree);
230}
231
232void multiorder_iteration(void)
233{
234 RADIX_TREE(tree, GFP_KERNEL);
235 struct radix_tree_iter iter;
236 void **slot;
237 int i, j, err;
238
239 printv(1, "Multiorder iteration test\n");
240
241#define NUM_ENTRIES 11
242 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
243 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
244
245 for (i = 0; i < NUM_ENTRIES; i++) {
246 err = item_insert_order(&tree, index[i], order[i]);
247 assert(!err);
248 }
249
250 for (j = 0; j < 256; j++) {
251 for (i = 0; i < NUM_ENTRIES; i++)
252 if (j <= (index[i] | ((1 << order[i]) - 1)))
253 break;
254
255 radix_tree_for_each_slot(slot, &tree, &iter, j) {
256 int height = order[i] / RADIX_TREE_MAP_SHIFT;
257 int shift = height * RADIX_TREE_MAP_SHIFT;
258 unsigned long mask = (1UL << order[i]) - 1;
259 struct item *item = *slot;
260
261 assert((iter.index | mask) == (index[i] | mask));
262 assert(iter.shift == shift);
263 assert(!radix_tree_is_internal_node(item));
264 assert((item->index | mask) == (index[i] | mask));
265 assert(item->order == order[i]);
266 i++;
267 }
268 }
269
270 item_kill_tree(&tree);
271}
272
273void multiorder_tagged_iteration(void)
274{
275 RADIX_TREE(tree, GFP_KERNEL);
276 struct radix_tree_iter iter;
277 void **slot;
278 int i, j;
279
280 printv(1, "Multiorder tagged iteration test\n");
281
282#define MT_NUM_ENTRIES 9
283 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
284 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
285
286#define TAG_ENTRIES 7
287 int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
288
289 for (i = 0; i < MT_NUM_ENTRIES; i++)
290 assert(!item_insert_order(&tree, index[i], order[i]));
291
292 assert(!radix_tree_tagged(&tree, 1));
293
294 for (i = 0; i < TAG_ENTRIES; i++)
295 assert(radix_tree_tag_set(&tree, tag_index[i], 1));
296
297 for (j = 0; j < 256; j++) {
298 int k;
299
300 for (i = 0; i < TAG_ENTRIES; i++) {
301 for (k = i; index[k] < tag_index[i]; k++)
302 ;
303 if (j <= (index[k] | ((1 << order[k]) - 1)))
304 break;
305 }
306
307 radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) {
308 unsigned long mask;
309 struct item *item = *slot;
310 for (k = i; index[k] < tag_index[i]; k++)
311 ;
312 mask = (1UL << order[k]) - 1;
313
314 assert((iter.index | mask) == (tag_index[i] | mask));
315 assert(!radix_tree_is_internal_node(item));
316 assert((item->index | mask) == (tag_index[i] | mask));
317 assert(item->order == order[k]);
318 i++;
319 }
320 }
321
322 assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) ==
323 TAG_ENTRIES);
324
325 for (j = 0; j < 256; j++) {
326 int mask, k;
327
328 for (i = 0; i < TAG_ENTRIES; i++) {
329 for (k = i; index[k] < tag_index[i]; k++)
330 ;
331 if (j <= (index[k] | ((1 << order[k]) - 1)))
332 break;
333 }
334
335 radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) {
336 struct item *item = *slot;
337 for (k = i; index[k] < tag_index[i]; k++)
338 ;
339 mask = (1 << order[k]) - 1;
340
341 assert((iter.index | mask) == (tag_index[i] | mask));
342 assert(!radix_tree_is_internal_node(item));
343 assert((item->index | mask) == (tag_index[i] | mask));
344 assert(item->order == order[k]);
345 i++;
346 }
347 }
348
349 assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0)
350 == TAG_ENTRIES);
351 i = 0;
352 radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) {
353 assert(iter.index == tag_index[i]);
354 i++;
355 }
356
357 item_kill_tree(&tree);
358}
359
360/*
361 * Basic join checks: make sure we can't find an entry in the tree after
362 * a larger entry has replaced it
363 */
364static void multiorder_join1(unsigned long index,
365 unsigned order1, unsigned order2)
366{
367 unsigned long loc;
368 void *item, *item2 = item_create(index + 1, order1);
369 RADIX_TREE(tree, GFP_KERNEL);
370
371 item_insert_order(&tree, index, order2);
372 item = radix_tree_lookup(&tree, index);
373 radix_tree_join(&tree, index + 1, order1, item2);
374 loc = find_item(&tree, item);
375 if (loc == -1)
376 free(item);
377 item = radix_tree_lookup(&tree, index + 1);
378 assert(item == item2);
379 item_kill_tree(&tree);
380}
381
382/*
383 * Check that the accounting of exceptional entries is handled correctly
384 * by joining an exceptional entry to a normal pointer.
385 */
386static void multiorder_join2(unsigned order1, unsigned order2)
387{
388 RADIX_TREE(tree, GFP_KERNEL);
389 struct radix_tree_node *node;
390 void *item1 = item_create(0, order1);
391 void *item2;
392
393 item_insert_order(&tree, 0, order2);
394 radix_tree_insert(&tree, 1 << order2, (void *)0x12UL);
395 item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
396 assert(item2 == (void *)0x12UL);
397 assert(node->exceptional == 1);
398
399 item2 = radix_tree_lookup(&tree, 0);
400 free(item2);
401
402 radix_tree_join(&tree, 0, order1, item1);
403 item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
404 assert(item2 == item1);
405 assert(node->exceptional == 0);
406 item_kill_tree(&tree);
407}
408
409/*
410 * This test revealed an accounting bug for exceptional entries at one point.
411 * Nodes were being freed back into the pool with an elevated exception count
412 * by radix_tree_join() and then radix_tree_split() was failing to zero the
413 * count of exceptional entries.
414 */
415static void multiorder_join3(unsigned int order)
416{
417 RADIX_TREE(tree, GFP_KERNEL);
418 struct radix_tree_node *node;
419 void **slot;
420 struct radix_tree_iter iter;
421 unsigned long i;
422
423 for (i = 0; i < (1 << order); i++) {
424 radix_tree_insert(&tree, i, (void *)0x12UL);
425 }
426
427 radix_tree_join(&tree, 0, order, (void *)0x16UL);
428 rcu_barrier();
429
430 radix_tree_split(&tree, 0, 0);
431
432 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
433 radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL);
434 }
435
436 __radix_tree_lookup(&tree, 0, &node, NULL);
437 assert(node->exceptional == node->count);
438
439 item_kill_tree(&tree);
440}
441
442static void multiorder_join(void)
443{
444 int i, j, idx;
445
446 for (idx = 0; idx < 1024; idx = idx * 2 + 3) {
447 for (i = 1; i < 15; i++) {
448 for (j = 0; j < i; j++) {
449 multiorder_join1(idx, i, j);
450 }
451 }
452 }
453
454 for (i = 1; i < 15; i++) {
455 for (j = 0; j < i; j++) {
456 multiorder_join2(i, j);
457 }
458 }
459
460 for (i = 3; i < 10; i++) {
461 multiorder_join3(i);
462 }
463}
464
465static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
466{
467 struct radix_tree_preload *rtp = &radix_tree_preloads;
468 if (rtp->nr != 0)
469 printv(2, "split(%u %u) remaining %u\n", old_order, new_order,
470 rtp->nr);
471 /*
472 * Can't check for equality here as some nodes may have been
473 * RCU-freed while we ran. But we should never finish with more
474 * nodes allocated since they should have all been preloaded.
475 */
476 if (nr_allocated > alloc)
477 printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order,
478 alloc, nr_allocated);
479}
480
481static void __multiorder_split(int old_order, int new_order)
482{
483 RADIX_TREE(tree, GFP_ATOMIC);
484 void **slot;
485 struct radix_tree_iter iter;
486 unsigned alloc;
487 struct item *item;
488
489 radix_tree_preload(GFP_KERNEL);
490 assert(item_insert_order(&tree, 0, old_order) == 0);
491 radix_tree_preload_end();
492
493 /* Wipe out the preloaded cache or it'll confuse check_mem() */
494 radix_tree_cpu_dead(0);
495
496 item = radix_tree_tag_set(&tree, 0, 2);
497
498 radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
499 alloc = nr_allocated;
500 radix_tree_split(&tree, 0, new_order);
501 check_mem(old_order, new_order, alloc);
502 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
503 radix_tree_iter_replace(&tree, &iter, slot,
504 item_create(iter.index, new_order));
505 }
506 radix_tree_preload_end();
507
508 item_kill_tree(&tree);
509 free(item);
510}
511
512static void __multiorder_split2(int old_order, int new_order)
513{
514 RADIX_TREE(tree, GFP_KERNEL);
515 void **slot;
516 struct radix_tree_iter iter;
517 struct radix_tree_node *node;
518 void *item;
519
520 __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
521
522 item = __radix_tree_lookup(&tree, 0, &node, NULL);
523 assert(item == (void *)0x12);
524 assert(node->exceptional > 0);
525
526 radix_tree_split(&tree, 0, new_order);
527 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
528 radix_tree_iter_replace(&tree, &iter, slot,
529 item_create(iter.index, new_order));
530 }
531
532 item = __radix_tree_lookup(&tree, 0, &node, NULL);
533 assert(item != (void *)0x12);
534 assert(node->exceptional == 0);
535
536 item_kill_tree(&tree);
537}
538
539static void __multiorder_split3(int old_order, int new_order)
540{
541 RADIX_TREE(tree, GFP_KERNEL);
542 void **slot;
543 struct radix_tree_iter iter;
544 struct radix_tree_node *node;
545 void *item;
546
547 __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
548
549 item = __radix_tree_lookup(&tree, 0, &node, NULL);
550 assert(item == (void *)0x12);
551 assert(node->exceptional > 0);
552
553 radix_tree_split(&tree, 0, new_order);
554 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
555 radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16);
556 }
557
558 item = __radix_tree_lookup(&tree, 0, &node, NULL);
559 assert(item == (void *)0x16);
560 assert(node->exceptional > 0);
561
562 item_kill_tree(&tree);
563
564 __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
565
566 item = __radix_tree_lookup(&tree, 0, &node, NULL);
567 assert(item == (void *)0x12);
568 assert(node->exceptional > 0);
569
570 radix_tree_split(&tree, 0, new_order);
571 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
572 if (iter.index == (1 << new_order))
573 radix_tree_iter_replace(&tree, &iter, slot,
574 (void *)0x16);
575 else
576 radix_tree_iter_replace(&tree, &iter, slot, NULL);
577 }
578
579 item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL);
580 assert(item == (void *)0x16);
581 assert(node->count == node->exceptional);
582 do {
583 node = node->parent;
584 if (!node)
585 break;
586 assert(node->count == 1);
587 assert(node->exceptional == 0);
588 } while (1);
589
590 item_kill_tree(&tree);
591}
592
593static void multiorder_split(void)
594{
595 int i, j;
596
597 for (i = 3; i < 11; i++)
598 for (j = 0; j < i; j++) {
599 __multiorder_split(i, j);
600 __multiorder_split2(i, j);
601 __multiorder_split3(i, j);
602 }
603}
604
605static void multiorder_account(void)
606{
607 RADIX_TREE(tree, GFP_KERNEL);
608 struct radix_tree_node *node;
609 void **slot;
610
611 item_insert_order(&tree, 0, 5);
612
613 __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
614 __radix_tree_lookup(&tree, 0, &node, NULL);
615 assert(node->count == node->exceptional * 2);
616 radix_tree_delete(&tree, 1 << 5);
617 assert(node->exceptional == 0);
618
619 __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
620 __radix_tree_lookup(&tree, 1 << 5, &node, &slot);
621 assert(node->count == node->exceptional * 2);
622 __radix_tree_replace(&tree, node, slot, NULL, NULL);
623 assert(node->exceptional == 0);
624
625 item_kill_tree(&tree);
626}
627
628bool stop_iteration = false;
629
630static void *creator_func(void *ptr)
631{
632 /* 'order' is set up to ensure we have sibling entries */
633 unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
634 struct radix_tree_root *tree = ptr;
635 int i;
636
637 for (i = 0; i < 10000; i++) {
638 item_insert_order(tree, 0, order);
639 item_delete_rcu(tree, 0);
640 }
641
642 stop_iteration = true;
643 return NULL;
644}
645
646static void *iterator_func(void *ptr)
647{
648 struct radix_tree_root *tree = ptr;
649 struct radix_tree_iter iter;
650 struct item *item;
651 void **slot;
652
653 while (!stop_iteration) {
654 rcu_read_lock();
655 radix_tree_for_each_slot(slot, tree, &iter, 0) {
656 item = radix_tree_deref_slot(slot);
657
658 if (!item)
659 continue;
660 if (radix_tree_deref_retry(item)) {
661 slot = radix_tree_iter_retry(&iter);
662 continue;
663 }
664
665 item_sanity(item, iter.index);
666 }
667 rcu_read_unlock();
668 }
669 return NULL;
670}
671
672static void multiorder_iteration_race(void)
673{
674 const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
675 pthread_t worker_thread[num_threads];
676 RADIX_TREE(tree, GFP_KERNEL);
677 int i;
678
679 pthread_create(&worker_thread[0], NULL, &creator_func, &tree);
680 for (i = 1; i < num_threads; i++)
681 pthread_create(&worker_thread[i], NULL, &iterator_func, &tree);
682
683 for (i = 0; i < num_threads; i++)
684 pthread_join(worker_thread[i], NULL);
685
686 item_kill_tree(&tree);
687}
688
689void multiorder_checks(void)
690{
691 int i;
692
693 for (i = 0; i < 20; i++) {
694 multiorder_check(200, i);
695 multiorder_check(0, i);
696 multiorder_check((1UL << i) + 1, i);
697 }
698
699 for (i = 0; i < 15; i++)
700 multiorder_shrink((1UL << (i + RADIX_TREE_MAP_SHIFT)), i);
701
702 multiorder_insert_bug();
703 multiorder_tag_tests();
704 multiorder_iteration();
705 multiorder_tagged_iteration();
706 multiorder_join();
707 multiorder_split();
708 multiorder_account();
709 multiorder_iteration_race();
710
711 radix_tree_cpu_dead(0);
712}
713
714int __weak main(void)
715{
716 radix_tree_init();
717 multiorder_checks();
718 return 0;
719}