Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1#include <linux/spinlock.h>
  2#include <linux/slab.h>
  3#include <linux/list.h>
  4#include <linux/list_bl.h>
  5#include <linux/module.h>
  6#include <linux/sched.h>
  7#include <linux/workqueue.h>
  8#include <linux/mbcache.h>
  9
 10/*
 11 * Mbcache is a simple key-value store. Keys need not be unique, however
 12 * key-value pairs are expected to be unique (we use this fact in
 13 * mb_cache_entry_delete_block()).
 14 *
 15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
 16 * They use hash of a block contents as a key and block number as a value.
 17 * That's why keys need not be unique (different xattr blocks may end up having
 18 * the same hash). However block number always uniquely identifies a cache
 19 * entry.
 
 20 *
 21 * We provide functions for creation and removal of entries, search by key,
 22 * and a special "delete entry with given key-value pair" operation. Fixed
 23 * size hash table is used for fast key lookups.
 24 */
 25
 26struct mb_cache {
 27	/* Hash table of entries */
 28	struct hlist_bl_head	*c_hash;
 29	/* log2 of hash table size */
 30	int			c_bucket_bits;
 31	/* Maximum entries in cache to avoid degrading hash too much */
 32	unsigned long		c_max_entries;
 33	/* Protects c_list, c_entry_count */
 34	spinlock_t		c_list_lock;
 35	struct list_head	c_list;
 36	/* Number of entries in cache */
 37	unsigned long		c_entry_count;
 38	struct shrinker		c_shrink;
 39	/* Work for shrinking when the cache has too many entries */
 40	struct work_struct	c_shrink_work;
 41};
 42
 43static struct kmem_cache *mb_entry_cache;
 44
 45static unsigned long mb_cache_shrink(struct mb_cache *cache,
 46				     unsigned long nr_to_scan);
 47
 48static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
 49							u32 key)
 50{
 51	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
 52}
 53
 54/*
 55 * Number of entries to reclaim synchronously when there are too many entries
 56 * in cache
 57 */
 58#define SYNC_SHRINK_BATCH 64
 59
 60/*
 61 * mb_cache_entry_create - create entry in cache
 62 * @cache - cache where the entry should be created
 63 * @mask - gfp mask with which the entry should be allocated
 64 * @key - key of the entry
 65 * @block - block that contains data
 66 * @reusable - is the block reusable by other inodes?
 67 *
 68 * Creates entry in @cache with key @key and records that data is stored in
 69 * block @block. The function returns -EBUSY if entry with the same key
 70 * and for the same block already exists in cache. Otherwise 0 is returned.
 71 */
 72int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
 73			  sector_t block, bool reusable)
 74{
 75	struct mb_cache_entry *entry, *dup;
 76	struct hlist_bl_node *dup_node;
 77	struct hlist_bl_head *head;
 78
 79	/* Schedule background reclaim if there are too many entries */
 80	if (cache->c_entry_count >= cache->c_max_entries)
 81		schedule_work(&cache->c_shrink_work);
 82	/* Do some sync reclaim if background reclaim cannot keep up */
 83	if (cache->c_entry_count >= 2*cache->c_max_entries)
 84		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
 85
 86	entry = kmem_cache_alloc(mb_entry_cache, mask);
 87	if (!entry)
 88		return -ENOMEM;
 89
 90	INIT_LIST_HEAD(&entry->e_list);
 91	/* One ref for hash, one ref returned */
 92	atomic_set(&entry->e_refcnt, 1);
 93	entry->e_key = key;
 94	entry->e_block = block;
 95	entry->e_reusable = reusable;
 
 96	head = mb_cache_entry_head(cache, key);
 97	hlist_bl_lock(head);
 98	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
 99		if (dup->e_key == key && dup->e_block == block) {
100			hlist_bl_unlock(head);
101			kmem_cache_free(mb_entry_cache, entry);
102			return -EBUSY;
103		}
104	}
105	hlist_bl_add_head(&entry->e_hash_list, head);
106	hlist_bl_unlock(head);
107
108	spin_lock(&cache->c_list_lock);
109	list_add_tail(&entry->e_list, &cache->c_list);
110	/* Grab ref for LRU list */
111	atomic_inc(&entry->e_refcnt);
112	cache->c_entry_count++;
113	spin_unlock(&cache->c_list_lock);
114
115	return 0;
116}
117EXPORT_SYMBOL(mb_cache_entry_create);
118
119void __mb_cache_entry_free(struct mb_cache_entry *entry)
120{
121	kmem_cache_free(mb_entry_cache, entry);
122}
123EXPORT_SYMBOL(__mb_cache_entry_free);
124
125static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
126					   struct mb_cache_entry *entry,
127					   u32 key)
128{
129	struct mb_cache_entry *old_entry = entry;
130	struct hlist_bl_node *node;
131	struct hlist_bl_head *head;
132
133	head = mb_cache_entry_head(cache, key);
134	hlist_bl_lock(head);
135	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
136		node = entry->e_hash_list.next;
137	else
138		node = hlist_bl_first(head);
139	while (node) {
140		entry = hlist_bl_entry(node, struct mb_cache_entry,
141				       e_hash_list);
142		if (entry->e_key == key && entry->e_reusable) {
143			atomic_inc(&entry->e_refcnt);
144			goto out;
145		}
146		node = node->next;
147	}
148	entry = NULL;
149out:
150	hlist_bl_unlock(head);
151	if (old_entry)
152		mb_cache_entry_put(cache, old_entry);
153
154	return entry;
155}
156
157/*
158 * mb_cache_entry_find_first - find the first reusable entry with the given key
159 * @cache: cache where we should search
160 * @key: key to look for
161 *
162 * Search in @cache for a reusable entry with key @key. Grabs reference to the
163 * first reusable entry found and returns the entry.
164 */
165struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
166						 u32 key)
167{
168	return __entry_find(cache, NULL, key);
169}
170EXPORT_SYMBOL(mb_cache_entry_find_first);
171
172/*
173 * mb_cache_entry_find_next - find next reusable entry with the same key
174 * @cache: cache where we should search
175 * @entry: entry to start search from
176 *
177 * Finds next reusable entry in the hash chain which has the same key as @entry.
178 * If @entry is unhashed (which can happen when deletion of entry races with the
179 * search), finds the first reusable entry in the hash chain. The function drops
180 * reference to @entry and returns with a reference to the found entry.
181 */
182struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
183						struct mb_cache_entry *entry)
184{
185	return __entry_find(cache, entry, entry->e_key);
186}
187EXPORT_SYMBOL(mb_cache_entry_find_next);
188
189/*
190 * mb_cache_entry_get - get a cache entry by block number (and key)
191 * @cache - cache we work with
192 * @key - key of block number @block
193 * @block - block number
194 */
195struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
196					  sector_t block)
197{
198	struct hlist_bl_node *node;
199	struct hlist_bl_head *head;
200	struct mb_cache_entry *entry;
201
202	head = mb_cache_entry_head(cache, key);
203	hlist_bl_lock(head);
204	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
205		if (entry->e_key == key && entry->e_block == block) {
206			atomic_inc(&entry->e_refcnt);
207			goto out;
208		}
209	}
210	entry = NULL;
211out:
212	hlist_bl_unlock(head);
213	return entry;
214}
215EXPORT_SYMBOL(mb_cache_entry_get);
216
217/* mb_cache_entry_delete_block - remove information about block from cache
218 * @cache - cache we work with
219 * @key - key of block @block
220 * @block - block number
221 *
222 * Remove entry from cache @cache with key @key with data stored in @block.
223 */
224void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
225				 sector_t block)
226{
227	struct hlist_bl_node *node;
228	struct hlist_bl_head *head;
229	struct mb_cache_entry *entry;
230
231	head = mb_cache_entry_head(cache, key);
232	hlist_bl_lock(head);
233	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
234		if (entry->e_key == key && entry->e_block == block) {
235			/* We keep hash list reference to keep entry alive */
236			hlist_bl_del_init(&entry->e_hash_list);
237			hlist_bl_unlock(head);
238			spin_lock(&cache->c_list_lock);
239			if (!list_empty(&entry->e_list)) {
240				list_del_init(&entry->e_list);
241				cache->c_entry_count--;
 
 
242				atomic_dec(&entry->e_refcnt);
243			}
244			spin_unlock(&cache->c_list_lock);
245			mb_cache_entry_put(cache, entry);
246			return;
247		}
248	}
249	hlist_bl_unlock(head);
250}
251EXPORT_SYMBOL(mb_cache_entry_delete_block);
252
253/* mb_cache_entry_touch - cache entry got used
254 * @cache - cache the entry belongs to
255 * @entry - entry that got used
256 *
257 * Marks entry as used to give hit higher chances of surviving in cache.
258 */
259void mb_cache_entry_touch(struct mb_cache *cache,
260			  struct mb_cache_entry *entry)
261{
262	entry->e_referenced = 1;
263}
264EXPORT_SYMBOL(mb_cache_entry_touch);
265
266static unsigned long mb_cache_count(struct shrinker *shrink,
267				    struct shrink_control *sc)
268{
269	struct mb_cache *cache = container_of(shrink, struct mb_cache,
270					      c_shrink);
271
272	return cache->c_entry_count;
273}
274
275/* Shrink number of entries in cache */
276static unsigned long mb_cache_shrink(struct mb_cache *cache,
277				     unsigned long nr_to_scan)
278{
279	struct mb_cache_entry *entry;
280	struct hlist_bl_head *head;
281	unsigned long shrunk = 0;
282
283	spin_lock(&cache->c_list_lock);
284	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
285		entry = list_first_entry(&cache->c_list,
286					 struct mb_cache_entry, e_list);
287		if (entry->e_referenced) {
288			entry->e_referenced = 0;
289			list_move_tail(&entry->e_list, &cache->c_list);
290			continue;
291		}
292		list_del_init(&entry->e_list);
293		cache->c_entry_count--;
294		/*
295		 * We keep LRU list reference so that entry doesn't go away
296		 * from under us.
297		 */
298		spin_unlock(&cache->c_list_lock);
299		head = mb_cache_entry_head(cache, entry->e_key);
300		hlist_bl_lock(head);
301		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
302			hlist_bl_del_init(&entry->e_hash_list);
303			atomic_dec(&entry->e_refcnt);
304		}
305		hlist_bl_unlock(head);
306		if (mb_cache_entry_put(cache, entry))
307			shrunk++;
308		cond_resched();
309		spin_lock(&cache->c_list_lock);
310	}
311	spin_unlock(&cache->c_list_lock);
312
313	return shrunk;
314}
315
316static unsigned long mb_cache_scan(struct shrinker *shrink,
317				   struct shrink_control *sc)
318{
319	struct mb_cache *cache = container_of(shrink, struct mb_cache,
320					      c_shrink);
321	return mb_cache_shrink(cache, sc->nr_to_scan);
322}
323
324/* We shrink 1/X of the cache when we have too many entries in it */
325#define SHRINK_DIVISOR 16
326
327static void mb_cache_shrink_worker(struct work_struct *work)
328{
329	struct mb_cache *cache = container_of(work, struct mb_cache,
330					      c_shrink_work);
331	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
332}
333
334/*
335 * mb_cache_create - create cache
336 * @bucket_bits: log2 of the hash table size
337 *
338 * Create cache for keys with 2^bucket_bits hash entries.
339 */
340struct mb_cache *mb_cache_create(int bucket_bits)
341{
342	struct mb_cache *cache;
343	unsigned long bucket_count = 1UL << bucket_bits;
344	unsigned long i;
345
346	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
347	if (!cache)
348		goto err_out;
349	cache->c_bucket_bits = bucket_bits;
350	cache->c_max_entries = bucket_count << 4;
351	INIT_LIST_HEAD(&cache->c_list);
352	spin_lock_init(&cache->c_list_lock);
353	cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
354				GFP_KERNEL);
 
355	if (!cache->c_hash) {
356		kfree(cache);
357		goto err_out;
358	}
359	for (i = 0; i < bucket_count; i++)
360		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
361
362	cache->c_shrink.count_objects = mb_cache_count;
363	cache->c_shrink.scan_objects = mb_cache_scan;
364	cache->c_shrink.seeks = DEFAULT_SEEKS;
365	if (register_shrinker(&cache->c_shrink)) {
366		kfree(cache->c_hash);
367		kfree(cache);
368		goto err_out;
369	}
370
371	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
372
373	return cache;
374
375err_out:
376	return NULL;
377}
378EXPORT_SYMBOL(mb_cache_create);
379
380/*
381 * mb_cache_destroy - destroy cache
382 * @cache: the cache to destroy
383 *
384 * Free all entries in cache and cache itself. Caller must make sure nobody
385 * (except shrinker) can reach @cache when calling this.
386 */
387void mb_cache_destroy(struct mb_cache *cache)
388{
389	struct mb_cache_entry *entry, *next;
390
391	unregister_shrinker(&cache->c_shrink);
392
393	/*
394	 * We don't bother with any locking. Cache must not be used at this
395	 * point.
396	 */
397	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
398		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
399			hlist_bl_del_init(&entry->e_hash_list);
400			atomic_dec(&entry->e_refcnt);
401		} else
402			WARN_ON(1);
403		list_del(&entry->e_list);
404		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
405		mb_cache_entry_put(cache, entry);
406	}
407	kfree(cache->c_hash);
408	kfree(cache);
409}
410EXPORT_SYMBOL(mb_cache_destroy);
411
412static int __init mbcache_init(void)
413{
414	mb_entry_cache = kmem_cache_create("mbcache",
415				sizeof(struct mb_cache_entry), 0,
416				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
417	if (!mb_entry_cache)
418		return -ENOMEM;
419	return 0;
420}
421
422static void __exit mbcache_exit(void)
423{
424	kmem_cache_destroy(mb_entry_cache);
425}
426
427module_init(mbcache_init)
428module_exit(mbcache_exit)
429
430MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
431MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
432MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/spinlock.h>
  3#include <linux/slab.h>
  4#include <linux/list.h>
  5#include <linux/list_bl.h>
  6#include <linux/module.h>
  7#include <linux/sched.h>
  8#include <linux/workqueue.h>
  9#include <linux/mbcache.h>
 10
 11/*
 12 * Mbcache is a simple key-value store. Keys need not be unique, however
 13 * key-value pairs are expected to be unique (we use this fact in
 14 * mb_cache_entry_delete()).
 15 *
 16 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
 17 * Ext4 also uses it for deduplication of xattr values stored in inodes.
 18 * They use hash of data as a key and provide a value that may represent a
 19 * block or inode number. That's why keys need not be unique (hash of different
 20 * data may be the same). However user provided value always uniquely
 21 * identifies a cache entry.
 22 *
 23 * We provide functions for creation and removal of entries, search by key,
 24 * and a special "delete entry with given key-value pair" operation. Fixed
 25 * size hash table is used for fast key lookups.
 26 */
 27
 28struct mb_cache {
 29	/* Hash table of entries */
 30	struct hlist_bl_head	*c_hash;
 31	/* log2 of hash table size */
 32	int			c_bucket_bits;
 33	/* Maximum entries in cache to avoid degrading hash too much */
 34	unsigned long		c_max_entries;
 35	/* Protects c_list, c_entry_count */
 36	spinlock_t		c_list_lock;
 37	struct list_head	c_list;
 38	/* Number of entries in cache */
 39	unsigned long		c_entry_count;
 40	struct shrinker		c_shrink;
 41	/* Work for shrinking when the cache has too many entries */
 42	struct work_struct	c_shrink_work;
 43};
 44
 45static struct kmem_cache *mb_entry_cache;
 46
 47static unsigned long mb_cache_shrink(struct mb_cache *cache,
 48				     unsigned long nr_to_scan);
 49
 50static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
 51							u32 key)
 52{
 53	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
 54}
 55
 56/*
 57 * Number of entries to reclaim synchronously when there are too many entries
 58 * in cache
 59 */
 60#define SYNC_SHRINK_BATCH 64
 61
 62/*
 63 * mb_cache_entry_create - create entry in cache
 64 * @cache - cache where the entry should be created
 65 * @mask - gfp mask with which the entry should be allocated
 66 * @key - key of the entry
 67 * @value - value of the entry
 68 * @reusable - is the entry reusable by others?
 69 *
 70 * Creates entry in @cache with key @key and value @value. The function returns
 71 * -EBUSY if entry with the same key and value already exists in cache.
 72 * Otherwise 0 is returned.
 73 */
 74int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
 75			  u64 value, bool reusable)
 76{
 77	struct mb_cache_entry *entry, *dup;
 78	struct hlist_bl_node *dup_node;
 79	struct hlist_bl_head *head;
 80
 81	/* Schedule background reclaim if there are too many entries */
 82	if (cache->c_entry_count >= cache->c_max_entries)
 83		schedule_work(&cache->c_shrink_work);
 84	/* Do some sync reclaim if background reclaim cannot keep up */
 85	if (cache->c_entry_count >= 2*cache->c_max_entries)
 86		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
 87
 88	entry = kmem_cache_alloc(mb_entry_cache, mask);
 89	if (!entry)
 90		return -ENOMEM;
 91
 92	INIT_LIST_HEAD(&entry->e_list);
 93	/* One ref for hash, one ref returned */
 94	atomic_set(&entry->e_refcnt, 1);
 95	entry->e_key = key;
 96	entry->e_value = value;
 97	entry->e_reusable = reusable;
 98	entry->e_referenced = 0;
 99	head = mb_cache_entry_head(cache, key);
100	hlist_bl_lock(head);
101	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
102		if (dup->e_key == key && dup->e_value == value) {
103			hlist_bl_unlock(head);
104			kmem_cache_free(mb_entry_cache, entry);
105			return -EBUSY;
106		}
107	}
108	hlist_bl_add_head(&entry->e_hash_list, head);
109	hlist_bl_unlock(head);
110
111	spin_lock(&cache->c_list_lock);
112	list_add_tail(&entry->e_list, &cache->c_list);
113	/* Grab ref for LRU list */
114	atomic_inc(&entry->e_refcnt);
115	cache->c_entry_count++;
116	spin_unlock(&cache->c_list_lock);
117
118	return 0;
119}
120EXPORT_SYMBOL(mb_cache_entry_create);
121
122void __mb_cache_entry_free(struct mb_cache_entry *entry)
123{
124	kmem_cache_free(mb_entry_cache, entry);
125}
126EXPORT_SYMBOL(__mb_cache_entry_free);
127
128static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
129					   struct mb_cache_entry *entry,
130					   u32 key)
131{
132	struct mb_cache_entry *old_entry = entry;
133	struct hlist_bl_node *node;
134	struct hlist_bl_head *head;
135
136	head = mb_cache_entry_head(cache, key);
137	hlist_bl_lock(head);
138	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
139		node = entry->e_hash_list.next;
140	else
141		node = hlist_bl_first(head);
142	while (node) {
143		entry = hlist_bl_entry(node, struct mb_cache_entry,
144				       e_hash_list);
145		if (entry->e_key == key && entry->e_reusable) {
146			atomic_inc(&entry->e_refcnt);
147			goto out;
148		}
149		node = node->next;
150	}
151	entry = NULL;
152out:
153	hlist_bl_unlock(head);
154	if (old_entry)
155		mb_cache_entry_put(cache, old_entry);
156
157	return entry;
158}
159
160/*
161 * mb_cache_entry_find_first - find the first reusable entry with the given key
162 * @cache: cache where we should search
163 * @key: key to look for
164 *
165 * Search in @cache for a reusable entry with key @key. Grabs reference to the
166 * first reusable entry found and returns the entry.
167 */
168struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
169						 u32 key)
170{
171	return __entry_find(cache, NULL, key);
172}
173EXPORT_SYMBOL(mb_cache_entry_find_first);
174
175/*
176 * mb_cache_entry_find_next - find next reusable entry with the same key
177 * @cache: cache where we should search
178 * @entry: entry to start search from
179 *
180 * Finds next reusable entry in the hash chain which has the same key as @entry.
181 * If @entry is unhashed (which can happen when deletion of entry races with the
182 * search), finds the first reusable entry in the hash chain. The function drops
183 * reference to @entry and returns with a reference to the found entry.
184 */
185struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
186						struct mb_cache_entry *entry)
187{
188	return __entry_find(cache, entry, entry->e_key);
189}
190EXPORT_SYMBOL(mb_cache_entry_find_next);
191
192/*
193 * mb_cache_entry_get - get a cache entry by value (and key)
194 * @cache - cache we work with
195 * @key - key
196 * @value - value
197 */
198struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
199					  u64 value)
200{
201	struct hlist_bl_node *node;
202	struct hlist_bl_head *head;
203	struct mb_cache_entry *entry;
204
205	head = mb_cache_entry_head(cache, key);
206	hlist_bl_lock(head);
207	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
208		if (entry->e_key == key && entry->e_value == value) {
209			atomic_inc(&entry->e_refcnt);
210			goto out;
211		}
212	}
213	entry = NULL;
214out:
215	hlist_bl_unlock(head);
216	return entry;
217}
218EXPORT_SYMBOL(mb_cache_entry_get);
219
220/* mb_cache_entry_delete - remove a cache entry
221 * @cache - cache we work with
222 * @key - key
223 * @value - value
224 *
225 * Remove entry from cache @cache with key @key and value @value.
226 */
227void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
 
228{
229	struct hlist_bl_node *node;
230	struct hlist_bl_head *head;
231	struct mb_cache_entry *entry;
232
233	head = mb_cache_entry_head(cache, key);
234	hlist_bl_lock(head);
235	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
236		if (entry->e_key == key && entry->e_value == value) {
237			/* We keep hash list reference to keep entry alive */
238			hlist_bl_del_init(&entry->e_hash_list);
239			hlist_bl_unlock(head);
240			spin_lock(&cache->c_list_lock);
241			if (!list_empty(&entry->e_list)) {
242				list_del_init(&entry->e_list);
243				if (!WARN_ONCE(cache->c_entry_count == 0,
244		"mbcache: attempt to decrement c_entry_count past zero"))
245					cache->c_entry_count--;
246				atomic_dec(&entry->e_refcnt);
247			}
248			spin_unlock(&cache->c_list_lock);
249			mb_cache_entry_put(cache, entry);
250			return;
251		}
252	}
253	hlist_bl_unlock(head);
254}
255EXPORT_SYMBOL(mb_cache_entry_delete);
256
257/* mb_cache_entry_touch - cache entry got used
258 * @cache - cache the entry belongs to
259 * @entry - entry that got used
260 *
261 * Marks entry as used to give hit higher chances of surviving in cache.
262 */
263void mb_cache_entry_touch(struct mb_cache *cache,
264			  struct mb_cache_entry *entry)
265{
266	entry->e_referenced = 1;
267}
268EXPORT_SYMBOL(mb_cache_entry_touch);
269
270static unsigned long mb_cache_count(struct shrinker *shrink,
271				    struct shrink_control *sc)
272{
273	struct mb_cache *cache = container_of(shrink, struct mb_cache,
274					      c_shrink);
275
276	return cache->c_entry_count;
277}
278
279/* Shrink number of entries in cache */
280static unsigned long mb_cache_shrink(struct mb_cache *cache,
281				     unsigned long nr_to_scan)
282{
283	struct mb_cache_entry *entry;
284	struct hlist_bl_head *head;
285	unsigned long shrunk = 0;
286
287	spin_lock(&cache->c_list_lock);
288	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
289		entry = list_first_entry(&cache->c_list,
290					 struct mb_cache_entry, e_list);
291		if (entry->e_referenced) {
292			entry->e_referenced = 0;
293			list_move_tail(&entry->e_list, &cache->c_list);
294			continue;
295		}
296		list_del_init(&entry->e_list);
297		cache->c_entry_count--;
298		/*
299		 * We keep LRU list reference so that entry doesn't go away
300		 * from under us.
301		 */
302		spin_unlock(&cache->c_list_lock);
303		head = mb_cache_entry_head(cache, entry->e_key);
304		hlist_bl_lock(head);
305		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
306			hlist_bl_del_init(&entry->e_hash_list);
307			atomic_dec(&entry->e_refcnt);
308		}
309		hlist_bl_unlock(head);
310		if (mb_cache_entry_put(cache, entry))
311			shrunk++;
312		cond_resched();
313		spin_lock(&cache->c_list_lock);
314	}
315	spin_unlock(&cache->c_list_lock);
316
317	return shrunk;
318}
319
320static unsigned long mb_cache_scan(struct shrinker *shrink,
321				   struct shrink_control *sc)
322{
323	struct mb_cache *cache = container_of(shrink, struct mb_cache,
324					      c_shrink);
325	return mb_cache_shrink(cache, sc->nr_to_scan);
326}
327
328/* We shrink 1/X of the cache when we have too many entries in it */
329#define SHRINK_DIVISOR 16
330
331static void mb_cache_shrink_worker(struct work_struct *work)
332{
333	struct mb_cache *cache = container_of(work, struct mb_cache,
334					      c_shrink_work);
335	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
336}
337
338/*
339 * mb_cache_create - create cache
340 * @bucket_bits: log2 of the hash table size
341 *
342 * Create cache for keys with 2^bucket_bits hash entries.
343 */
344struct mb_cache *mb_cache_create(int bucket_bits)
345{
346	struct mb_cache *cache;
347	unsigned long bucket_count = 1UL << bucket_bits;
348	unsigned long i;
349
350	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
351	if (!cache)
352		goto err_out;
353	cache->c_bucket_bits = bucket_bits;
354	cache->c_max_entries = bucket_count << 4;
355	INIT_LIST_HEAD(&cache->c_list);
356	spin_lock_init(&cache->c_list_lock);
357	cache->c_hash = kmalloc_array(bucket_count,
358				      sizeof(struct hlist_bl_head),
359				      GFP_KERNEL);
360	if (!cache->c_hash) {
361		kfree(cache);
362		goto err_out;
363	}
364	for (i = 0; i < bucket_count; i++)
365		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
366
367	cache->c_shrink.count_objects = mb_cache_count;
368	cache->c_shrink.scan_objects = mb_cache_scan;
369	cache->c_shrink.seeks = DEFAULT_SEEKS;
370	if (register_shrinker(&cache->c_shrink)) {
371		kfree(cache->c_hash);
372		kfree(cache);
373		goto err_out;
374	}
375
376	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
377
378	return cache;
379
380err_out:
381	return NULL;
382}
383EXPORT_SYMBOL(mb_cache_create);
384
385/*
386 * mb_cache_destroy - destroy cache
387 * @cache: the cache to destroy
388 *
389 * Free all entries in cache and cache itself. Caller must make sure nobody
390 * (except shrinker) can reach @cache when calling this.
391 */
392void mb_cache_destroy(struct mb_cache *cache)
393{
394	struct mb_cache_entry *entry, *next;
395
396	unregister_shrinker(&cache->c_shrink);
397
398	/*
399	 * We don't bother with any locking. Cache must not be used at this
400	 * point.
401	 */
402	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
403		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
404			hlist_bl_del_init(&entry->e_hash_list);
405			atomic_dec(&entry->e_refcnt);
406		} else
407			WARN_ON(1);
408		list_del(&entry->e_list);
409		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
410		mb_cache_entry_put(cache, entry);
411	}
412	kfree(cache->c_hash);
413	kfree(cache);
414}
415EXPORT_SYMBOL(mb_cache_destroy);
416
417static int __init mbcache_init(void)
418{
419	mb_entry_cache = kmem_cache_create("mbcache",
420				sizeof(struct mb_cache_entry), 0,
421				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
422	if (!mb_entry_cache)
423		return -ENOMEM;
424	return 0;
425}
426
427static void __exit mbcache_exit(void)
428{
429	kmem_cache_destroy(mb_entry_cache);
430}
431
432module_init(mbcache_init)
433module_exit(mbcache_exit)
434
435MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
436MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
437MODULE_LICENSE("GPL");