Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO AG
4 * written by Arne Jansen <sensille@gmx.net>
5 */
6
7#include <linux/slab.h>
8#include "messages.h"
9#include "ulist.h"
10#include "ctree.h"
11
12/*
13 * ulist is a generic data structure to hold a collection of unique u64
14 * values. The only operations it supports is adding to the list and
15 * enumerating it.
16 * It is possible to store an auxiliary value along with the key.
17 *
18 * A sample usage for ulists is the enumeration of directed graphs without
19 * visiting a node twice. The pseudo-code could look like this:
20 *
21 * ulist = ulist_alloc();
22 * ulist_add(ulist, root);
23 * ULIST_ITER_INIT(&uiter);
24 *
25 * while ((elem = ulist_next(ulist, &uiter)) {
26 * for (all child nodes n in elem)
27 * ulist_add(ulist, n);
28 * do something useful with the node;
29 * }
30 * ulist_free(ulist);
31 *
32 * This assumes the graph nodes are addressable by u64. This stems from the
33 * usage for tree enumeration in btrfs, where the logical addresses are
34 * 64 bit.
35 *
36 * It is also useful for tree enumeration which could be done elegantly
37 * recursively, but is not possible due to kernel stack limitations. The
38 * loop would be similar to the above.
39 */
40
41/*
42 * Freshly initialize a ulist.
43 *
44 * @ulist: the ulist to initialize
45 *
46 * Note: don't use this function to init an already used ulist, use
47 * ulist_reinit instead.
48 */
49void ulist_init(struct ulist *ulist)
50{
51 INIT_LIST_HEAD(&ulist->nodes);
52 ulist->root = RB_ROOT;
53 ulist->nnodes = 0;
54}
55
56/*
57 * Free up additionally allocated memory for the ulist.
58 *
59 * @ulist: the ulist from which to free the additional memory
60 *
61 * This is useful in cases where the base 'struct ulist' has been statically
62 * allocated.
63 */
64void ulist_release(struct ulist *ulist)
65{
66 struct ulist_node *node;
67 struct ulist_node *next;
68
69 list_for_each_entry_safe(node, next, &ulist->nodes, list) {
70 kfree(node);
71 }
72 ulist->root = RB_ROOT;
73 INIT_LIST_HEAD(&ulist->nodes);
74}
75
76/*
77 * Prepare a ulist for reuse.
78 *
79 * @ulist: ulist to be reused
80 *
81 * Free up all additional memory allocated for the list elements and reinit
82 * the ulist.
83 */
84void ulist_reinit(struct ulist *ulist)
85{
86 ulist_release(ulist);
87 ulist_init(ulist);
88}
89
90/*
91 * Dynamically allocate a ulist.
92 *
93 * @gfp_mask: allocation flags to for base allocation
94 *
95 * The allocated ulist will be returned in an initialized state.
96 */
97struct ulist *ulist_alloc(gfp_t gfp_mask)
98{
99 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
100
101 if (!ulist)
102 return NULL;
103
104 ulist_init(ulist);
105
106 return ulist;
107}
108
109/*
110 * Free dynamically allocated ulist.
111 *
112 * @ulist: ulist to free
113 *
114 * It is not necessary to call ulist_release before.
115 */
116void ulist_free(struct ulist *ulist)
117{
118 if (!ulist)
119 return;
120 ulist_release(ulist);
121 kfree(ulist);
122}
123
124static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
125{
126 struct rb_node *n = ulist->root.rb_node;
127 struct ulist_node *u = NULL;
128
129 while (n) {
130 u = rb_entry(n, struct ulist_node, rb_node);
131 if (u->val < val)
132 n = n->rb_right;
133 else if (u->val > val)
134 n = n->rb_left;
135 else
136 return u;
137 }
138 return NULL;
139}
140
141static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
142{
143 rb_erase(&node->rb_node, &ulist->root);
144 list_del(&node->list);
145 kfree(node);
146 BUG_ON(ulist->nnodes == 0);
147 ulist->nnodes--;
148}
149
150static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
151{
152 struct rb_node **p = &ulist->root.rb_node;
153 struct rb_node *parent = NULL;
154 struct ulist_node *cur = NULL;
155
156 while (*p) {
157 parent = *p;
158 cur = rb_entry(parent, struct ulist_node, rb_node);
159
160 if (cur->val < ins->val)
161 p = &(*p)->rb_right;
162 else if (cur->val > ins->val)
163 p = &(*p)->rb_left;
164 else
165 return -EEXIST;
166 }
167 rb_link_node(&ins->rb_node, parent, p);
168 rb_insert_color(&ins->rb_node, &ulist->root);
169 return 0;
170}
171
172/*
173 * Add an element to the ulist.
174 *
175 * @ulist: ulist to add the element to
176 * @val: value to add to ulist
177 * @aux: auxiliary value to store along with val
178 * @gfp_mask: flags to use for allocation
179 *
180 * Note: locking must be provided by the caller. In case of rwlocks write
181 * locking is needed
182 *
183 * Add an element to a ulist. The @val will only be added if it doesn't
184 * already exist. If it is added, the auxiliary value @aux is stored along with
185 * it. In case @val already exists in the ulist, @aux is ignored, even if
186 * it differs from the already stored value.
187 *
188 * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
189 * inserted.
190 * In case of allocation failure -ENOMEM is returned and the ulist stays
191 * unaltered.
192 */
193int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
194{
195 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
196}
197
198int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
199 u64 *old_aux, gfp_t gfp_mask)
200{
201 int ret;
202 struct ulist_node *node;
203
204 node = ulist_rbtree_search(ulist, val);
205 if (node) {
206 if (old_aux)
207 *old_aux = node->aux;
208 return 0;
209 }
210 node = kmalloc(sizeof(*node), gfp_mask);
211 if (!node)
212 return -ENOMEM;
213
214 node->val = val;
215 node->aux = aux;
216
217 ret = ulist_rbtree_insert(ulist, node);
218 ASSERT(!ret);
219 list_add_tail(&node->list, &ulist->nodes);
220 ulist->nnodes++;
221
222 return 1;
223}
224
225/*
226 * Delete one node from ulist.
227 *
228 * @ulist: ulist to remove node from
229 * @val: value to delete
230 * @aux: aux to delete
231 *
232 * The deletion will only be done when *BOTH* val and aux matches.
233 * Return 0 for successful delete.
234 * Return > 0 for not found.
235 */
236int ulist_del(struct ulist *ulist, u64 val, u64 aux)
237{
238 struct ulist_node *node;
239
240 node = ulist_rbtree_search(ulist, val);
241 /* Not found */
242 if (!node)
243 return 1;
244
245 if (node->aux != aux)
246 return 1;
247
248 /* Found and delete */
249 ulist_rbtree_erase(ulist, node);
250 return 0;
251}
252
253/*
254 * Iterate ulist.
255 *
256 * @ulist: ulist to iterate
257 * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
258 *
259 * Note: locking must be provided by the caller. In case of rwlocks only read
260 * locking is needed
261 *
262 * This function is used to iterate an ulist.
263 * It returns the next element from the ulist or %NULL when the
264 * end is reached. No guarantee is made with respect to the order in which
265 * the elements are returned. They might neither be returned in order of
266 * addition nor in ascending order.
267 * It is allowed to call ulist_add during an enumeration. Newly added items
268 * are guaranteed to show up in the running enumeration.
269 */
270struct ulist_node *ulist_next(const struct ulist *ulist, struct ulist_iterator *uiter)
271{
272 struct ulist_node *node;
273
274 if (list_empty(&ulist->nodes))
275 return NULL;
276 if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
277 return NULL;
278 if (uiter->cur_list) {
279 uiter->cur_list = uiter->cur_list->next;
280 } else {
281 uiter->cur_list = ulist->nodes.next;
282 }
283 node = list_entry(uiter->cur_list, struct ulist_node, list);
284 return node;
285}
1/*
2 * Copyright (C) 2011 STRATO AG
3 * written by Arne Jansen <sensille@gmx.net>
4 * Distributed under the GNU GPL license version 2.
5 */
6
7#include <linux/slab.h>
8#include <linux/module.h>
9#include "ulist.h"
10
11/*
12 * ulist is a generic data structure to hold a collection of unique u64
13 * values. The only operations it supports is adding to the list and
14 * enumerating it.
15 * It is possible to store an auxiliary value along with the key.
16 *
17 * The implementation is preliminary and can probably be sped up
18 * significantly. A first step would be to store the values in an rbtree
19 * as soon as ULIST_SIZE is exceeded.
20 *
21 * A sample usage for ulists is the enumeration of directed graphs without
22 * visiting a node twice. The pseudo-code could look like this:
23 *
24 * ulist = ulist_alloc();
25 * ulist_add(ulist, root);
26 * ULIST_ITER_INIT(&uiter);
27 *
28 * while ((elem = ulist_next(ulist, &uiter)) {
29 * for (all child nodes n in elem)
30 * ulist_add(ulist, n);
31 * do something useful with the node;
32 * }
33 * ulist_free(ulist);
34 *
35 * This assumes the graph nodes are adressable by u64. This stems from the
36 * usage for tree enumeration in btrfs, where the logical addresses are
37 * 64 bit.
38 *
39 * It is also useful for tree enumeration which could be done elegantly
40 * recursively, but is not possible due to kernel stack limitations. The
41 * loop would be similar to the above.
42 */
43
44/**
45 * ulist_init - freshly initialize a ulist
46 * @ulist: the ulist to initialize
47 *
48 * Note: don't use this function to init an already used ulist, use
49 * ulist_reinit instead.
50 */
51void ulist_init(struct ulist *ulist)
52{
53 ulist->nnodes = 0;
54 ulist->nodes = ulist->int_nodes;
55 ulist->nodes_alloced = ULIST_SIZE;
56}
57EXPORT_SYMBOL(ulist_init);
58
59/**
60 * ulist_fini - free up additionally allocated memory for the ulist
61 * @ulist: the ulist from which to free the additional memory
62 *
63 * This is useful in cases where the base 'struct ulist' has been statically
64 * allocated.
65 */
66void ulist_fini(struct ulist *ulist)
67{
68 /*
69 * The first ULIST_SIZE elements are stored inline in struct ulist.
70 * Only if more elements are alocated they need to be freed.
71 */
72 if (ulist->nodes_alloced > ULIST_SIZE)
73 kfree(ulist->nodes);
74 ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */
75}
76EXPORT_SYMBOL(ulist_fini);
77
78/**
79 * ulist_reinit - prepare a ulist for reuse
80 * @ulist: ulist to be reused
81 *
82 * Free up all additional memory allocated for the list elements and reinit
83 * the ulist.
84 */
85void ulist_reinit(struct ulist *ulist)
86{
87 ulist_fini(ulist);
88 ulist_init(ulist);
89}
90EXPORT_SYMBOL(ulist_reinit);
91
92/**
93 * ulist_alloc - dynamically allocate a ulist
94 * @gfp_mask: allocation flags to for base allocation
95 *
96 * The allocated ulist will be returned in an initialized state.
97 */
98struct ulist *ulist_alloc(gfp_t gfp_mask)
99{
100 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
101
102 if (!ulist)
103 return NULL;
104
105 ulist_init(ulist);
106
107 return ulist;
108}
109EXPORT_SYMBOL(ulist_alloc);
110
111/**
112 * ulist_free - free dynamically allocated ulist
113 * @ulist: ulist to free
114 *
115 * It is not necessary to call ulist_fini before.
116 */
117void ulist_free(struct ulist *ulist)
118{
119 if (!ulist)
120 return;
121 ulist_fini(ulist);
122 kfree(ulist);
123}
124EXPORT_SYMBOL(ulist_free);
125
126/**
127 * ulist_add - add an element to the ulist
128 * @ulist: ulist to add the element to
129 * @val: value to add to ulist
130 * @aux: auxiliary value to store along with val
131 * @gfp_mask: flags to use for allocation
132 *
133 * Note: locking must be provided by the caller. In case of rwlocks write
134 * locking is needed
135 *
136 * Add an element to a ulist. The @val will only be added if it doesn't
137 * already exist. If it is added, the auxiliary value @aux is stored along with
138 * it. In case @val already exists in the ulist, @aux is ignored, even if
139 * it differs from the already stored value.
140 *
141 * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
142 * inserted.
143 * In case of allocation failure -ENOMEM is returned and the ulist stays
144 * unaltered.
145 */
146int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
147 gfp_t gfp_mask)
148{
149 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
150}
151
152int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
153 unsigned long *old_aux, gfp_t gfp_mask)
154{
155 int i;
156
157 for (i = 0; i < ulist->nnodes; ++i) {
158 if (ulist->nodes[i].val == val) {
159 if (old_aux)
160 *old_aux = ulist->nodes[i].aux;
161 return 0;
162 }
163 }
164
165 if (ulist->nnodes >= ulist->nodes_alloced) {
166 u64 new_alloced = ulist->nodes_alloced + 128;
167 struct ulist_node *new_nodes;
168 void *old = NULL;
169
170 /*
171 * if nodes_alloced == ULIST_SIZE no memory has been allocated
172 * yet, so pass NULL to krealloc
173 */
174 if (ulist->nodes_alloced > ULIST_SIZE)
175 old = ulist->nodes;
176
177 new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
178 gfp_mask);
179 if (!new_nodes)
180 return -ENOMEM;
181
182 if (!old)
183 memcpy(new_nodes, ulist->int_nodes,
184 sizeof(ulist->int_nodes));
185
186 ulist->nodes = new_nodes;
187 ulist->nodes_alloced = new_alloced;
188 }
189 ulist->nodes[ulist->nnodes].val = val;
190 ulist->nodes[ulist->nnodes].aux = aux;
191 ++ulist->nnodes;
192
193 return 1;
194}
195EXPORT_SYMBOL(ulist_add);
196
197/**
198 * ulist_next - iterate ulist
199 * @ulist: ulist to iterate
200 * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
201 *
202 * Note: locking must be provided by the caller. In case of rwlocks only read
203 * locking is needed
204 *
205 * This function is used to iterate an ulist.
206 * It returns the next element from the ulist or %NULL when the
207 * end is reached. No guarantee is made with respect to the order in which
208 * the elements are returned. They might neither be returned in order of
209 * addition nor in ascending order.
210 * It is allowed to call ulist_add during an enumeration. Newly added items
211 * are guaranteed to show up in the running enumeration.
212 */
213struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
214{
215 if (ulist->nnodes == 0)
216 return NULL;
217 if (uiter->i < 0 || uiter->i >= ulist->nnodes)
218 return NULL;
219
220 return &ulist->nodes[uiter->i++];
221}
222EXPORT_SYMBOL(ulist_next);