Linux Audio

Check our new training course

Loading...
v6.13.7
 1#ifndef IOU_ALLOC_CACHE_H
 2#define IOU_ALLOC_CACHE_H
 3
 4/*
 5 * Don't allow the cache to grow beyond this size.
 6 */
 7#define IO_ALLOC_CACHE_MAX	128
 
 
 
 
 8
 9static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
10				      void *entry)
11{
12	if (cache->nr_cached < cache->max_cached) {
13		if (!kasan_mempool_poison_object(entry))
14			return false;
15		cache->entries[cache->nr_cached++] = entry;
16		return true;
17	}
18	return false;
19}
20
21static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
 
 
 
 
 
22{
23	if (cache->nr_cached) {
24		void *entry = cache->entries[--cache->nr_cached];
25
 
26		kasan_mempool_unpoison_object(entry, cache->elem_size);
 
 
27		return entry;
28	}
29
30	return NULL;
31}
32
33/* returns false if the cache was initialized properly */
34static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
35				       unsigned max_nr, size_t size)
36{
37	cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
38	if (cache->entries) {
39		cache->nr_cached = 0;
40		cache->max_cached = max_nr;
41		cache->elem_size = size;
42		return false;
43	}
44	return true;
45}
46
47static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
48				       void (*free)(const void *))
49{
50	void *entry;
51
52	if (!cache->entries)
53		return;
54
55	while ((entry = io_alloc_cache_get(cache)) != NULL)
 
56		free(entry);
57
58	kvfree(cache->entries);
59	cache->entries = NULL;
60}
61#endif
v6.8
 1#ifndef IOU_ALLOC_CACHE_H
 2#define IOU_ALLOC_CACHE_H
 3
 4/*
 5 * Don't allow the cache to grow beyond this size.
 6 */
 7#define IO_ALLOC_CACHE_MAX	512
 8
 9struct io_cache_entry {
10	struct io_wq_work_node node;
11};
12
13static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
14				      struct io_cache_entry *entry)
15{
16	if (cache->nr_cached < cache->max_cached) {
17		cache->nr_cached++;
18		wq_stack_add_head(&entry->node, &cache->list);
19		kasan_mempool_poison_object(entry);
20		return true;
21	}
22	return false;
23}
24
25static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
26{
27	return !cache->list.next;
28}
29
30static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
31{
32	if (cache->list.next) {
33		struct io_cache_entry *entry;
34
35		entry = container_of(cache->list.next, struct io_cache_entry, node);
36		kasan_mempool_unpoison_object(entry, cache->elem_size);
37		cache->list.next = cache->list.next->next;
38		cache->nr_cached--;
39		return entry;
40	}
41
42	return NULL;
43}
44
45static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
 
46				       unsigned max_nr, size_t size)
47{
48	cache->list.next = NULL;
49	cache->nr_cached = 0;
50	cache->max_cached = max_nr;
51	cache->elem_size = size;
 
 
 
 
52}
53
54static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
55					void (*free)(struct io_cache_entry *))
56{
57	while (1) {
58		struct io_cache_entry *entry = io_alloc_cache_get(cache);
 
 
59
60		if (!entry)
61			break;
62		free(entry);
63	}
64	cache->nr_cached = 0;
 
65}
66#endif