Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/spinlock.h>
3#include <linux/slab.h>
4#include <linux/list.h>
5#include <linux/list_bl.h>
6#include <linux/module.h>
7#include <linux/sched.h>
8#include <linux/workqueue.h>
9#include <linux/mbcache.h>
10
11/*
12 * Mbcache is a simple key-value store. Keys need not be unique, however
13 * key-value pairs are expected to be unique (we use this fact in
14 * mb_cache_entry_delete_or_get()).
15 *
16 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17 * Ext4 also uses it for deduplication of xattr values stored in inodes.
18 * They use hash of data as a key and provide a value that may represent a
19 * block or inode number. That's why keys need not be unique (hash of different
20 * data may be the same). However user provided value always uniquely
21 * identifies a cache entry.
22 *
23 * We provide functions for creation and removal of entries, search by key,
24 * and a special "delete entry with given key-value pair" operation. Fixed
25 * size hash table is used for fast key lookups.
26 */
27
28struct mb_cache {
29 /* Hash table of entries */
30 struct hlist_bl_head *c_hash;
31 /* log2 of hash table size */
32 int c_bucket_bits;
33 /* Maximum entries in cache to avoid degrading hash too much */
34 unsigned long c_max_entries;
35 /* Protects c_list, c_entry_count */
36 spinlock_t c_list_lock;
37 struct list_head c_list;
38 /* Number of entries in cache */
39 unsigned long c_entry_count;
40 struct shrinker *c_shrink;
41 /* Work for shrinking when the cache has too many entries */
42 struct work_struct c_shrink_work;
43};
44
45static struct kmem_cache *mb_entry_cache;
46
47static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 unsigned long nr_to_scan);
49
50static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
51 u32 key)
52{
53 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54}
55
56/*
57 * Number of entries to reclaim synchronously when there are too many entries
58 * in cache
59 */
60#define SYNC_SHRINK_BATCH 64
61
62/*
63 * mb_cache_entry_create - create entry in cache
64 * @cache - cache where the entry should be created
65 * @mask - gfp mask with which the entry should be allocated
66 * @key - key of the entry
67 * @value - value of the entry
68 * @reusable - is the entry reusable by others?
69 *
70 * Creates entry in @cache with key @key and value @value. The function returns
71 * -EBUSY if entry with the same key and value already exists in cache.
72 * Otherwise 0 is returned.
73 */
74int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 u64 value, bool reusable)
76{
77 struct mb_cache_entry *entry, *dup;
78 struct hlist_bl_node *dup_node;
79 struct hlist_bl_head *head;
80
81 /* Schedule background reclaim if there are too many entries */
82 if (cache->c_entry_count >= cache->c_max_entries)
83 schedule_work(&cache->c_shrink_work);
84 /* Do some sync reclaim if background reclaim cannot keep up */
85 if (cache->c_entry_count >= 2*cache->c_max_entries)
86 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
87
88 entry = kmem_cache_alloc(mb_entry_cache, mask);
89 if (!entry)
90 return -ENOMEM;
91
92 INIT_LIST_HEAD(&entry->e_list);
93 /*
94 * We create entry with two references. One reference is kept by the
95 * hash table, the other reference is used to protect us from
96 * mb_cache_entry_delete_or_get() until the entry is fully setup. This
97 * avoids nesting of cache->c_list_lock into hash table bit locks which
98 * is problematic for RT.
99 */
100 atomic_set(&entry->e_refcnt, 2);
101 entry->e_key = key;
102 entry->e_value = value;
103 entry->e_flags = 0;
104 if (reusable)
105 set_bit(MBE_REUSABLE_B, &entry->e_flags);
106 head = mb_cache_entry_head(cache, key);
107 hlist_bl_lock(head);
108 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
109 if (dup->e_key == key && dup->e_value == value) {
110 hlist_bl_unlock(head);
111 kmem_cache_free(mb_entry_cache, entry);
112 return -EBUSY;
113 }
114 }
115 hlist_bl_add_head(&entry->e_hash_list, head);
116 hlist_bl_unlock(head);
117 spin_lock(&cache->c_list_lock);
118 list_add_tail(&entry->e_list, &cache->c_list);
119 cache->c_entry_count++;
120 spin_unlock(&cache->c_list_lock);
121 mb_cache_entry_put(cache, entry);
122
123 return 0;
124}
125EXPORT_SYMBOL(mb_cache_entry_create);
126
127void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
128{
129 struct hlist_bl_head *head;
130
131 head = mb_cache_entry_head(cache, entry->e_key);
132 hlist_bl_lock(head);
133 hlist_bl_del(&entry->e_hash_list);
134 hlist_bl_unlock(head);
135 kmem_cache_free(mb_entry_cache, entry);
136}
137EXPORT_SYMBOL(__mb_cache_entry_free);
138
139/*
140 * mb_cache_entry_wait_unused - wait to be the last user of the entry
141 *
142 * @entry - entry to work on
143 *
144 * Wait to be the last user of the entry.
145 */
146void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
147{
148 wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
149}
150EXPORT_SYMBOL(mb_cache_entry_wait_unused);
151
152static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
153 struct mb_cache_entry *entry,
154 u32 key)
155{
156 struct mb_cache_entry *old_entry = entry;
157 struct hlist_bl_node *node;
158 struct hlist_bl_head *head;
159
160 head = mb_cache_entry_head(cache, key);
161 hlist_bl_lock(head);
162 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
163 node = entry->e_hash_list.next;
164 else
165 node = hlist_bl_first(head);
166 while (node) {
167 entry = hlist_bl_entry(node, struct mb_cache_entry,
168 e_hash_list);
169 if (entry->e_key == key &&
170 test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
171 atomic_inc_not_zero(&entry->e_refcnt))
172 goto out;
173 node = node->next;
174 }
175 entry = NULL;
176out:
177 hlist_bl_unlock(head);
178 if (old_entry)
179 mb_cache_entry_put(cache, old_entry);
180
181 return entry;
182}
183
184/*
185 * mb_cache_entry_find_first - find the first reusable entry with the given key
186 * @cache: cache where we should search
187 * @key: key to look for
188 *
189 * Search in @cache for a reusable entry with key @key. Grabs reference to the
190 * first reusable entry found and returns the entry.
191 */
192struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
193 u32 key)
194{
195 return __entry_find(cache, NULL, key);
196}
197EXPORT_SYMBOL(mb_cache_entry_find_first);
198
199/*
200 * mb_cache_entry_find_next - find next reusable entry with the same key
201 * @cache: cache where we should search
202 * @entry: entry to start search from
203 *
204 * Finds next reusable entry in the hash chain which has the same key as @entry.
205 * If @entry is unhashed (which can happen when deletion of entry races with the
206 * search), finds the first reusable entry in the hash chain. The function drops
207 * reference to @entry and returns with a reference to the found entry.
208 */
209struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
210 struct mb_cache_entry *entry)
211{
212 return __entry_find(cache, entry, entry->e_key);
213}
214EXPORT_SYMBOL(mb_cache_entry_find_next);
215
216/*
217 * mb_cache_entry_get - get a cache entry by value (and key)
218 * @cache - cache we work with
219 * @key - key
220 * @value - value
221 */
222struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
223 u64 value)
224{
225 struct hlist_bl_node *node;
226 struct hlist_bl_head *head;
227 struct mb_cache_entry *entry;
228
229 head = mb_cache_entry_head(cache, key);
230 hlist_bl_lock(head);
231 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
232 if (entry->e_key == key && entry->e_value == value &&
233 atomic_inc_not_zero(&entry->e_refcnt))
234 goto out;
235 }
236 entry = NULL;
237out:
238 hlist_bl_unlock(head);
239 return entry;
240}
241EXPORT_SYMBOL(mb_cache_entry_get);
242
243/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
244 * @cache - cache we work with
245 * @key - key
246 * @value - value
247 *
248 * Remove entry from cache @cache with key @key and value @value. The removal
249 * happens only if the entry is unused. The function returns NULL in case the
250 * entry was successfully removed or there's no entry in cache. Otherwise the
251 * function grabs reference of the entry that we failed to delete because it
252 * still has users and return it.
253 */
254struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
255 u32 key, u64 value)
256{
257 struct mb_cache_entry *entry;
258
259 entry = mb_cache_entry_get(cache, key, value);
260 if (!entry)
261 return NULL;
262
263 /*
264 * Drop the ref we got from mb_cache_entry_get() and the initial hash
265 * ref if we are the last user
266 */
267 if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
268 return entry;
269
270 spin_lock(&cache->c_list_lock);
271 if (!list_empty(&entry->e_list))
272 list_del_init(&entry->e_list);
273 cache->c_entry_count--;
274 spin_unlock(&cache->c_list_lock);
275 __mb_cache_entry_free(cache, entry);
276 return NULL;
277}
278EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
279
280/* mb_cache_entry_touch - cache entry got used
281 * @cache - cache the entry belongs to
282 * @entry - entry that got used
283 *
284 * Marks entry as used to give hit higher chances of surviving in cache.
285 */
286void mb_cache_entry_touch(struct mb_cache *cache,
287 struct mb_cache_entry *entry)
288{
289 set_bit(MBE_REFERENCED_B, &entry->e_flags);
290}
291EXPORT_SYMBOL(mb_cache_entry_touch);
292
293static unsigned long mb_cache_count(struct shrinker *shrink,
294 struct shrink_control *sc)
295{
296 struct mb_cache *cache = shrink->private_data;
297
298 return cache->c_entry_count;
299}
300
301/* Shrink number of entries in cache */
302static unsigned long mb_cache_shrink(struct mb_cache *cache,
303 unsigned long nr_to_scan)
304{
305 struct mb_cache_entry *entry;
306 unsigned long shrunk = 0;
307
308 spin_lock(&cache->c_list_lock);
309 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
310 entry = list_first_entry(&cache->c_list,
311 struct mb_cache_entry, e_list);
312 /* Drop initial hash reference if there is no user */
313 if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
314 atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
315 clear_bit(MBE_REFERENCED_B, &entry->e_flags);
316 list_move_tail(&entry->e_list, &cache->c_list);
317 continue;
318 }
319 list_del_init(&entry->e_list);
320 cache->c_entry_count--;
321 spin_unlock(&cache->c_list_lock);
322 __mb_cache_entry_free(cache, entry);
323 shrunk++;
324 cond_resched();
325 spin_lock(&cache->c_list_lock);
326 }
327 spin_unlock(&cache->c_list_lock);
328
329 return shrunk;
330}
331
332static unsigned long mb_cache_scan(struct shrinker *shrink,
333 struct shrink_control *sc)
334{
335 struct mb_cache *cache = shrink->private_data;
336 return mb_cache_shrink(cache, sc->nr_to_scan);
337}
338
339/* We shrink 1/X of the cache when we have too many entries in it */
340#define SHRINK_DIVISOR 16
341
342static void mb_cache_shrink_worker(struct work_struct *work)
343{
344 struct mb_cache *cache = container_of(work, struct mb_cache,
345 c_shrink_work);
346 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
347}
348
349/*
350 * mb_cache_create - create cache
351 * @bucket_bits: log2 of the hash table size
352 *
353 * Create cache for keys with 2^bucket_bits hash entries.
354 */
355struct mb_cache *mb_cache_create(int bucket_bits)
356{
357 struct mb_cache *cache;
358 unsigned long bucket_count = 1UL << bucket_bits;
359 unsigned long i;
360
361 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
362 if (!cache)
363 goto err_out;
364 cache->c_bucket_bits = bucket_bits;
365 cache->c_max_entries = bucket_count << 4;
366 INIT_LIST_HEAD(&cache->c_list);
367 spin_lock_init(&cache->c_list_lock);
368 cache->c_hash = kmalloc_array(bucket_count,
369 sizeof(struct hlist_bl_head),
370 GFP_KERNEL);
371 if (!cache->c_hash) {
372 kfree(cache);
373 goto err_out;
374 }
375 for (i = 0; i < bucket_count; i++)
376 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
377
378 cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker");
379 if (!cache->c_shrink) {
380 kfree(cache->c_hash);
381 kfree(cache);
382 goto err_out;
383 }
384
385 cache->c_shrink->count_objects = mb_cache_count;
386 cache->c_shrink->scan_objects = mb_cache_scan;
387 cache->c_shrink->private_data = cache;
388
389 shrinker_register(cache->c_shrink);
390
391 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
392
393 return cache;
394
395err_out:
396 return NULL;
397}
398EXPORT_SYMBOL(mb_cache_create);
399
400/*
401 * mb_cache_destroy - destroy cache
402 * @cache: the cache to destroy
403 *
404 * Free all entries in cache and cache itself. Caller must make sure nobody
405 * (except shrinker) can reach @cache when calling this.
406 */
407void mb_cache_destroy(struct mb_cache *cache)
408{
409 struct mb_cache_entry *entry, *next;
410
411 shrinker_free(cache->c_shrink);
412
413 /*
414 * We don't bother with any locking. Cache must not be used at this
415 * point.
416 */
417 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
418 list_del(&entry->e_list);
419 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
420 mb_cache_entry_put(cache, entry);
421 }
422 kfree(cache->c_hash);
423 kfree(cache);
424}
425EXPORT_SYMBOL(mb_cache_destroy);
426
427static int __init mbcache_init(void)
428{
429 mb_entry_cache = KMEM_CACHE(mb_cache_entry, SLAB_RECLAIM_ACCOUNT);
430 if (!mb_entry_cache)
431 return -ENOMEM;
432 return 0;
433}
434
435static void __exit mbcache_exit(void)
436{
437 kmem_cache_destroy(mb_entry_cache);
438}
439
440module_init(mbcache_init)
441module_exit(mbcache_exit)
442
443MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
444MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
445MODULE_LICENSE("GPL");
1#include <linux/spinlock.h>
2#include <linux/slab.h>
3#include <linux/list.h>
4#include <linux/list_bl.h>
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/workqueue.h>
8#include <linux/mbcache.h>
9
10/*
11 * Mbcache is a simple key-value store. Keys need not be unique, however
12 * key-value pairs are expected to be unique (we use this fact in
13 * mb_cache_entry_delete()).
14 *
15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16 * Ext4 also uses it for deduplication of xattr values stored in inodes.
17 * They use hash of data as a key and provide a value that may represent a
18 * block or inode number. That's why keys need not be unique (hash of different
19 * data may be the same). However user provided value always uniquely
20 * identifies a cache entry.
21 *
22 * We provide functions for creation and removal of entries, search by key,
23 * and a special "delete entry with given key-value pair" operation. Fixed
24 * size hash table is used for fast key lookups.
25 */
26
27struct mb_cache {
28 /* Hash table of entries */
29 struct hlist_bl_head *c_hash;
30 /* log2 of hash table size */
31 int c_bucket_bits;
32 /* Maximum entries in cache to avoid degrading hash too much */
33 unsigned long c_max_entries;
34 /* Protects c_list, c_entry_count */
35 spinlock_t c_list_lock;
36 struct list_head c_list;
37 /* Number of entries in cache */
38 unsigned long c_entry_count;
39 struct shrinker c_shrink;
40 /* Work for shrinking when the cache has too many entries */
41 struct work_struct c_shrink_work;
42};
43
44static struct kmem_cache *mb_entry_cache;
45
46static unsigned long mb_cache_shrink(struct mb_cache *cache,
47 unsigned long nr_to_scan);
48
49static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
50 u32 key)
51{
52 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
53}
54
55/*
56 * Number of entries to reclaim synchronously when there are too many entries
57 * in cache
58 */
59#define SYNC_SHRINK_BATCH 64
60
61/*
62 * mb_cache_entry_create - create entry in cache
63 * @cache - cache where the entry should be created
64 * @mask - gfp mask with which the entry should be allocated
65 * @key - key of the entry
66 * @value - value of the entry
67 * @reusable - is the entry reusable by others?
68 *
69 * Creates entry in @cache with key @key and value @value. The function returns
70 * -EBUSY if entry with the same key and value already exists in cache.
71 * Otherwise 0 is returned.
72 */
73int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
74 u64 value, bool reusable)
75{
76 struct mb_cache_entry *entry, *dup;
77 struct hlist_bl_node *dup_node;
78 struct hlist_bl_head *head;
79
80 /* Schedule background reclaim if there are too many entries */
81 if (cache->c_entry_count >= cache->c_max_entries)
82 schedule_work(&cache->c_shrink_work);
83 /* Do some sync reclaim if background reclaim cannot keep up */
84 if (cache->c_entry_count >= 2*cache->c_max_entries)
85 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
86
87 entry = kmem_cache_alloc(mb_entry_cache, mask);
88 if (!entry)
89 return -ENOMEM;
90
91 INIT_LIST_HEAD(&entry->e_list);
92 /* One ref for hash, one ref returned */
93 atomic_set(&entry->e_refcnt, 1);
94 entry->e_key = key;
95 entry->e_value = value;
96 entry->e_reusable = reusable;
97 entry->e_referenced = 0;
98 head = mb_cache_entry_head(cache, key);
99 hlist_bl_lock(head);
100 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
101 if (dup->e_key == key && dup->e_value == value) {
102 hlist_bl_unlock(head);
103 kmem_cache_free(mb_entry_cache, entry);
104 return -EBUSY;
105 }
106 }
107 hlist_bl_add_head(&entry->e_hash_list, head);
108 hlist_bl_unlock(head);
109
110 spin_lock(&cache->c_list_lock);
111 list_add_tail(&entry->e_list, &cache->c_list);
112 /* Grab ref for LRU list */
113 atomic_inc(&entry->e_refcnt);
114 cache->c_entry_count++;
115 spin_unlock(&cache->c_list_lock);
116
117 return 0;
118}
119EXPORT_SYMBOL(mb_cache_entry_create);
120
121void __mb_cache_entry_free(struct mb_cache_entry *entry)
122{
123 kmem_cache_free(mb_entry_cache, entry);
124}
125EXPORT_SYMBOL(__mb_cache_entry_free);
126
127static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
128 struct mb_cache_entry *entry,
129 u32 key)
130{
131 struct mb_cache_entry *old_entry = entry;
132 struct hlist_bl_node *node;
133 struct hlist_bl_head *head;
134
135 head = mb_cache_entry_head(cache, key);
136 hlist_bl_lock(head);
137 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
138 node = entry->e_hash_list.next;
139 else
140 node = hlist_bl_first(head);
141 while (node) {
142 entry = hlist_bl_entry(node, struct mb_cache_entry,
143 e_hash_list);
144 if (entry->e_key == key && entry->e_reusable) {
145 atomic_inc(&entry->e_refcnt);
146 goto out;
147 }
148 node = node->next;
149 }
150 entry = NULL;
151out:
152 hlist_bl_unlock(head);
153 if (old_entry)
154 mb_cache_entry_put(cache, old_entry);
155
156 return entry;
157}
158
159/*
160 * mb_cache_entry_find_first - find the first reusable entry with the given key
161 * @cache: cache where we should search
162 * @key: key to look for
163 *
164 * Search in @cache for a reusable entry with key @key. Grabs reference to the
165 * first reusable entry found and returns the entry.
166 */
167struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
168 u32 key)
169{
170 return __entry_find(cache, NULL, key);
171}
172EXPORT_SYMBOL(mb_cache_entry_find_first);
173
174/*
175 * mb_cache_entry_find_next - find next reusable entry with the same key
176 * @cache: cache where we should search
177 * @entry: entry to start search from
178 *
179 * Finds next reusable entry in the hash chain which has the same key as @entry.
180 * If @entry is unhashed (which can happen when deletion of entry races with the
181 * search), finds the first reusable entry in the hash chain. The function drops
182 * reference to @entry and returns with a reference to the found entry.
183 */
184struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
185 struct mb_cache_entry *entry)
186{
187 return __entry_find(cache, entry, entry->e_key);
188}
189EXPORT_SYMBOL(mb_cache_entry_find_next);
190
191/*
192 * mb_cache_entry_get - get a cache entry by value (and key)
193 * @cache - cache we work with
194 * @key - key
195 * @value - value
196 */
197struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
198 u64 value)
199{
200 struct hlist_bl_node *node;
201 struct hlist_bl_head *head;
202 struct mb_cache_entry *entry;
203
204 head = mb_cache_entry_head(cache, key);
205 hlist_bl_lock(head);
206 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
207 if (entry->e_key == key && entry->e_value == value) {
208 atomic_inc(&entry->e_refcnt);
209 goto out;
210 }
211 }
212 entry = NULL;
213out:
214 hlist_bl_unlock(head);
215 return entry;
216}
217EXPORT_SYMBOL(mb_cache_entry_get);
218
219/* mb_cache_entry_delete - remove a cache entry
220 * @cache - cache we work with
221 * @key - key
222 * @value - value
223 *
224 * Remove entry from cache @cache with key @key and value @value.
225 */
226void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
227{
228 struct hlist_bl_node *node;
229 struct hlist_bl_head *head;
230 struct mb_cache_entry *entry;
231
232 head = mb_cache_entry_head(cache, key);
233 hlist_bl_lock(head);
234 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
235 if (entry->e_key == key && entry->e_value == value) {
236 /* We keep hash list reference to keep entry alive */
237 hlist_bl_del_init(&entry->e_hash_list);
238 hlist_bl_unlock(head);
239 spin_lock(&cache->c_list_lock);
240 if (!list_empty(&entry->e_list)) {
241 list_del_init(&entry->e_list);
242 if (!WARN_ONCE(cache->c_entry_count == 0,
243 "mbcache: attempt to decrement c_entry_count past zero"))
244 cache->c_entry_count--;
245 atomic_dec(&entry->e_refcnt);
246 }
247 spin_unlock(&cache->c_list_lock);
248 mb_cache_entry_put(cache, entry);
249 return;
250 }
251 }
252 hlist_bl_unlock(head);
253}
254EXPORT_SYMBOL(mb_cache_entry_delete);
255
256/* mb_cache_entry_touch - cache entry got used
257 * @cache - cache the entry belongs to
258 * @entry - entry that got used
259 *
260 * Marks entry as used to give hit higher chances of surviving in cache.
261 */
262void mb_cache_entry_touch(struct mb_cache *cache,
263 struct mb_cache_entry *entry)
264{
265 entry->e_referenced = 1;
266}
267EXPORT_SYMBOL(mb_cache_entry_touch);
268
269static unsigned long mb_cache_count(struct shrinker *shrink,
270 struct shrink_control *sc)
271{
272 struct mb_cache *cache = container_of(shrink, struct mb_cache,
273 c_shrink);
274
275 return cache->c_entry_count;
276}
277
278/* Shrink number of entries in cache */
279static unsigned long mb_cache_shrink(struct mb_cache *cache,
280 unsigned long nr_to_scan)
281{
282 struct mb_cache_entry *entry;
283 struct hlist_bl_head *head;
284 unsigned long shrunk = 0;
285
286 spin_lock(&cache->c_list_lock);
287 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
288 entry = list_first_entry(&cache->c_list,
289 struct mb_cache_entry, e_list);
290 if (entry->e_referenced) {
291 entry->e_referenced = 0;
292 list_move_tail(&entry->e_list, &cache->c_list);
293 continue;
294 }
295 list_del_init(&entry->e_list);
296 cache->c_entry_count--;
297 /*
298 * We keep LRU list reference so that entry doesn't go away
299 * from under us.
300 */
301 spin_unlock(&cache->c_list_lock);
302 head = mb_cache_entry_head(cache, entry->e_key);
303 hlist_bl_lock(head);
304 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
305 hlist_bl_del_init(&entry->e_hash_list);
306 atomic_dec(&entry->e_refcnt);
307 }
308 hlist_bl_unlock(head);
309 if (mb_cache_entry_put(cache, entry))
310 shrunk++;
311 cond_resched();
312 spin_lock(&cache->c_list_lock);
313 }
314 spin_unlock(&cache->c_list_lock);
315
316 return shrunk;
317}
318
319static unsigned long mb_cache_scan(struct shrinker *shrink,
320 struct shrink_control *sc)
321{
322 struct mb_cache *cache = container_of(shrink, struct mb_cache,
323 c_shrink);
324 return mb_cache_shrink(cache, sc->nr_to_scan);
325}
326
327/* We shrink 1/X of the cache when we have too many entries in it */
328#define SHRINK_DIVISOR 16
329
330static void mb_cache_shrink_worker(struct work_struct *work)
331{
332 struct mb_cache *cache = container_of(work, struct mb_cache,
333 c_shrink_work);
334 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
335}
336
337/*
338 * mb_cache_create - create cache
339 * @bucket_bits: log2 of the hash table size
340 *
341 * Create cache for keys with 2^bucket_bits hash entries.
342 */
343struct mb_cache *mb_cache_create(int bucket_bits)
344{
345 struct mb_cache *cache;
346 unsigned long bucket_count = 1UL << bucket_bits;
347 unsigned long i;
348
349 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
350 if (!cache)
351 goto err_out;
352 cache->c_bucket_bits = bucket_bits;
353 cache->c_max_entries = bucket_count << 4;
354 INIT_LIST_HEAD(&cache->c_list);
355 spin_lock_init(&cache->c_list_lock);
356 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
357 GFP_KERNEL);
358 if (!cache->c_hash) {
359 kfree(cache);
360 goto err_out;
361 }
362 for (i = 0; i < bucket_count; i++)
363 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
364
365 cache->c_shrink.count_objects = mb_cache_count;
366 cache->c_shrink.scan_objects = mb_cache_scan;
367 cache->c_shrink.seeks = DEFAULT_SEEKS;
368 if (register_shrinker(&cache->c_shrink)) {
369 kfree(cache->c_hash);
370 kfree(cache);
371 goto err_out;
372 }
373
374 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
375
376 return cache;
377
378err_out:
379 return NULL;
380}
381EXPORT_SYMBOL(mb_cache_create);
382
383/*
384 * mb_cache_destroy - destroy cache
385 * @cache: the cache to destroy
386 *
387 * Free all entries in cache and cache itself. Caller must make sure nobody
388 * (except shrinker) can reach @cache when calling this.
389 */
390void mb_cache_destroy(struct mb_cache *cache)
391{
392 struct mb_cache_entry *entry, *next;
393
394 unregister_shrinker(&cache->c_shrink);
395
396 /*
397 * We don't bother with any locking. Cache must not be used at this
398 * point.
399 */
400 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
401 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
402 hlist_bl_del_init(&entry->e_hash_list);
403 atomic_dec(&entry->e_refcnt);
404 } else
405 WARN_ON(1);
406 list_del(&entry->e_list);
407 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
408 mb_cache_entry_put(cache, entry);
409 }
410 kfree(cache->c_hash);
411 kfree(cache);
412}
413EXPORT_SYMBOL(mb_cache_destroy);
414
415static int __init mbcache_init(void)
416{
417 mb_entry_cache = kmem_cache_create("mbcache",
418 sizeof(struct mb_cache_entry), 0,
419 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
420 if (!mb_entry_cache)
421 return -ENOMEM;
422 return 0;
423}
424
425static void __exit mbcache_exit(void)
426{
427 kmem_cache_destroy(mb_entry_cache);
428}
429
430module_init(mbcache_init)
431module_exit(mbcache_exit)
432
433MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
434MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
435MODULE_LICENSE("GPL");