Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * iteration_check.c: test races having to do with xarray iteration
4 * Copyright (c) 2016 Intel Corporation
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 */
7#include <pthread.h>
8#include "test.h"
9
10#define NUM_THREADS 5
11#define MAX_IDX 100
12#define TAG XA_MARK_0
13#define NEW_TAG XA_MARK_1
14
15static pthread_t threads[NUM_THREADS];
16static unsigned int seeds[3];
17static DEFINE_XARRAY(array);
18static bool test_complete;
19static int max_order;
20
21void my_item_insert(struct xarray *xa, unsigned long index)
22{
23 XA_STATE(xas, xa, index);
24 struct item *item = item_create(index, 0);
25 int order;
26
27retry:
28 xas_lock(&xas);
29 for (order = max_order; order >= 0; order--) {
30 xas_set_order(&xas, index, order);
31 item->order = order;
32 if (xas_find_conflict(&xas))
33 continue;
34 xas_store(&xas, item);
35 xas_set_mark(&xas, TAG);
36 break;
37 }
38 xas_unlock(&xas);
39 if (xas_nomem(&xas, GFP_KERNEL))
40 goto retry;
41 if (order < 0)
42 free(item);
43}
44
45/* relentlessly fill the array with tagged entries */
46static void *add_entries_fn(void *arg)
47{
48 rcu_register_thread();
49
50 while (!test_complete) {
51 unsigned long pgoff;
52
53 for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
54 my_item_insert(&array, pgoff);
55 }
56 }
57
58 rcu_unregister_thread();
59
60 return NULL;
61}
62
63/*
64 * Iterate over tagged entries, retrying when we find ourselves in a deleted
65 * node and randomly pausing the iteration.
66 */
67static void *tagged_iteration_fn(void *arg)
68{
69 XA_STATE(xas, &array, 0);
70 void *entry;
71
72 rcu_register_thread();
73
74 while (!test_complete) {
75 xas_set(&xas, 0);
76 rcu_read_lock();
77 xas_for_each_marked(&xas, entry, ULONG_MAX, TAG) {
78 if (xas_retry(&xas, entry))
79 continue;
80
81 if (rand_r(&seeds[0]) % 50 == 0) {
82 xas_pause(&xas);
83 rcu_read_unlock();
84 rcu_barrier();
85 rcu_read_lock();
86 }
87 }
88 rcu_read_unlock();
89 }
90
91 rcu_unregister_thread();
92
93 return NULL;
94}
95
96/*
97 * Iterate over the entries, retrying when we find ourselves in a deleted
98 * node and randomly pausing the iteration.
99 */
100static void *untagged_iteration_fn(void *arg)
101{
102 XA_STATE(xas, &array, 0);
103 void *entry;
104
105 rcu_register_thread();
106
107 while (!test_complete) {
108 xas_set(&xas, 0);
109 rcu_read_lock();
110 xas_for_each(&xas, entry, ULONG_MAX) {
111 if (xas_retry(&xas, entry))
112 continue;
113
114 if (rand_r(&seeds[1]) % 50 == 0) {
115 xas_pause(&xas);
116 rcu_read_unlock();
117 rcu_barrier();
118 rcu_read_lock();
119 }
120 }
121 rcu_read_unlock();
122 }
123
124 rcu_unregister_thread();
125
126 return NULL;
127}
128
129/*
130 * Randomly remove entries to help induce retries in the
131 * two iteration functions.
132 */
133static void *remove_entries_fn(void *arg)
134{
135 rcu_register_thread();
136
137 while (!test_complete) {
138 int pgoff;
139 struct item *item;
140
141 pgoff = rand_r(&seeds[2]) % MAX_IDX;
142
143 item = xa_erase(&array, pgoff);
144 if (item)
145 item_free(item, pgoff);
146 }
147
148 rcu_unregister_thread();
149
150 return NULL;
151}
152
153static void *tag_entries_fn(void *arg)
154{
155 rcu_register_thread();
156
157 while (!test_complete) {
158 tag_tagged_items(&array, 0, MAX_IDX, 10, TAG, NEW_TAG);
159 }
160 rcu_unregister_thread();
161 return NULL;
162}
163
164/* This is a unit test for a bug found by the syzkaller tester */
165void iteration_test(unsigned order, unsigned test_duration)
166{
167 int i;
168
169 printv(1, "Running %siteration tests for %d seconds\n",
170 order > 0 ? "multiorder " : "", test_duration);
171
172 max_order = order;
173 test_complete = false;
174
175 for (i = 0; i < 3; i++)
176 seeds[i] = rand();
177
178 if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
179 perror("create tagged iteration thread");
180 exit(1);
181 }
182 if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
183 perror("create untagged iteration thread");
184 exit(1);
185 }
186 if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
187 perror("create add entry thread");
188 exit(1);
189 }
190 if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
191 perror("create remove entry thread");
192 exit(1);
193 }
194 if (pthread_create(&threads[4], NULL, tag_entries_fn, NULL)) {
195 perror("create tag entry thread");
196 exit(1);
197 }
198
199 sleep(test_duration);
200 test_complete = true;
201
202 for (i = 0; i < NUM_THREADS; i++) {
203 if (pthread_join(threads[i], NULL)) {
204 perror("pthread_join");
205 exit(1);
206 }
207 }
208
209 item_kill_tree(&array);
210}
1/*
2 * iteration_check.c: test races having to do with radix tree iteration
3 * Copyright (c) 2016 Intel Corporation
4 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#include <linux/radix-tree.h>
16#include <pthread.h>
17#include "test.h"
18
19#define NUM_THREADS 5
20#define MAX_IDX 100
21#define TAG 0
22#define NEW_TAG 1
23
24static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
25static pthread_t threads[NUM_THREADS];
26static unsigned int seeds[3];
27static RADIX_TREE(tree, GFP_KERNEL);
28static bool test_complete;
29static int max_order;
30
31/* relentlessly fill the tree with tagged entries */
32static void *add_entries_fn(void *arg)
33{
34 rcu_register_thread();
35
36 while (!test_complete) {
37 unsigned long pgoff;
38 int order;
39
40 for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
41 pthread_mutex_lock(&tree_lock);
42 for (order = max_order; order >= 0; order--) {
43 if (item_insert_order(&tree, pgoff, order)
44 == 0) {
45 item_tag_set(&tree, pgoff, TAG);
46 break;
47 }
48 }
49 pthread_mutex_unlock(&tree_lock);
50 }
51 }
52
53 rcu_unregister_thread();
54
55 return NULL;
56}
57
58/*
59 * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
60 * things that have been removed and randomly resetting our iteration to the
61 * next chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
62 * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
63 * NULL 'slot' variable.
64 */
65static void *tagged_iteration_fn(void *arg)
66{
67 struct radix_tree_iter iter;
68 void **slot;
69
70 rcu_register_thread();
71
72 while (!test_complete) {
73 rcu_read_lock();
74 radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
75 void *entry = radix_tree_deref_slot(slot);
76 if (unlikely(!entry))
77 continue;
78
79 if (radix_tree_deref_retry(entry)) {
80 slot = radix_tree_iter_retry(&iter);
81 continue;
82 }
83
84 if (rand_r(&seeds[0]) % 50 == 0) {
85 slot = radix_tree_iter_resume(slot, &iter);
86 rcu_read_unlock();
87 rcu_barrier();
88 rcu_read_lock();
89 }
90 }
91 rcu_read_unlock();
92 }
93
94 rcu_unregister_thread();
95
96 return NULL;
97}
98
99/*
100 * Iterate over the entries, doing a radix_tree_iter_retry() as we find things
101 * that have been removed and randomly resetting our iteration to the next
102 * chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
103 * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
104 * NULL 'slot' variable.
105 */
106static void *untagged_iteration_fn(void *arg)
107{
108 struct radix_tree_iter iter;
109 void **slot;
110
111 rcu_register_thread();
112
113 while (!test_complete) {
114 rcu_read_lock();
115 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
116 void *entry = radix_tree_deref_slot(slot);
117 if (unlikely(!entry))
118 continue;
119
120 if (radix_tree_deref_retry(entry)) {
121 slot = radix_tree_iter_retry(&iter);
122 continue;
123 }
124
125 if (rand_r(&seeds[1]) % 50 == 0) {
126 slot = radix_tree_iter_resume(slot, &iter);
127 rcu_read_unlock();
128 rcu_barrier();
129 rcu_read_lock();
130 }
131 }
132 rcu_read_unlock();
133 }
134
135 rcu_unregister_thread();
136
137 return NULL;
138}
139
140/*
141 * Randomly remove entries to help induce radix_tree_iter_retry() calls in the
142 * two iteration functions.
143 */
144static void *remove_entries_fn(void *arg)
145{
146 rcu_register_thread();
147
148 while (!test_complete) {
149 int pgoff;
150
151 pgoff = rand_r(&seeds[2]) % MAX_IDX;
152
153 pthread_mutex_lock(&tree_lock);
154 item_delete(&tree, pgoff);
155 pthread_mutex_unlock(&tree_lock);
156 }
157
158 rcu_unregister_thread();
159
160 return NULL;
161}
162
163static void *tag_entries_fn(void *arg)
164{
165 rcu_register_thread();
166
167 while (!test_complete) {
168 tag_tagged_items(&tree, &tree_lock, 0, MAX_IDX, 10, TAG,
169 NEW_TAG);
170 }
171 rcu_unregister_thread();
172 return NULL;
173}
174
175/* This is a unit test for a bug found by the syzkaller tester */
176void iteration_test(unsigned order, unsigned test_duration)
177{
178 int i;
179
180 printv(1, "Running %siteration tests for %d seconds\n",
181 order > 0 ? "multiorder " : "", test_duration);
182
183 max_order = order;
184 test_complete = false;
185
186 for (i = 0; i < 3; i++)
187 seeds[i] = rand();
188
189 if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
190 perror("create tagged iteration thread");
191 exit(1);
192 }
193 if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
194 perror("create untagged iteration thread");
195 exit(1);
196 }
197 if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
198 perror("create add entry thread");
199 exit(1);
200 }
201 if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
202 perror("create remove entry thread");
203 exit(1);
204 }
205 if (pthread_create(&threads[4], NULL, tag_entries_fn, NULL)) {
206 perror("create tag entry thread");
207 exit(1);
208 }
209
210 sleep(test_duration);
211 test_complete = true;
212
213 for (i = 0; i < NUM_THREADS; i++) {
214 if (pthread_join(threads[i], NULL)) {
215 perror("pthread_join");
216 exit(1);
217 }
218 }
219
220 item_kill_tree(&tree);
221}