Loading...
1#ifndef IOU_ALLOC_CACHE_H
2#define IOU_ALLOC_CACHE_H
3
4/*
5 * Don't allow the cache to grow beyond this size.
6 */
7#define IO_ALLOC_CACHE_MAX 512
8
9struct io_cache_entry {
10 struct hlist_node node;
11};
12
13static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
14 struct io_cache_entry *entry)
15{
16 if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
17 cache->nr_cached++;
18 hlist_add_head(&entry->node, &cache->list);
19 return true;
20 }
21 return false;
22}
23
24static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
25{
26 if (!hlist_empty(&cache->list)) {
27 struct hlist_node *node = cache->list.first;
28
29 hlist_del(node);
30 return container_of(node, struct io_cache_entry, node);
31 }
32
33 return NULL;
34}
35
36static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
37{
38 INIT_HLIST_HEAD(&cache->list);
39 cache->nr_cached = 0;
40}
41
42static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
43 void (*free)(struct io_cache_entry *))
44{
45 while (!hlist_empty(&cache->list)) {
46 struct hlist_node *node = cache->list.first;
47
48 hlist_del(node);
49 free(container_of(node, struct io_cache_entry, node));
50 }
51 cache->nr_cached = 0;
52}
53#endif
1#ifndef IOU_ALLOC_CACHE_H
2#define IOU_ALLOC_CACHE_H
3
4/*
5 * Don't allow the cache to grow beyond this size.
6 */
7#define IO_ALLOC_CACHE_MAX 512
8
9struct io_cache_entry {
10 struct io_wq_work_node node;
11};
12
13static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
14 struct io_cache_entry *entry)
15{
16 if (cache->nr_cached < cache->max_cached) {
17 cache->nr_cached++;
18 wq_stack_add_head(&entry->node, &cache->list);
19 kasan_mempool_poison_object(entry);
20 return true;
21 }
22 return false;
23}
24
25static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
26{
27 return !cache->list.next;
28}
29
30static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
31{
32 if (cache->list.next) {
33 struct io_cache_entry *entry;
34
35 entry = container_of(cache->list.next, struct io_cache_entry, node);
36 kasan_mempool_unpoison_object(entry, cache->elem_size);
37 cache->list.next = cache->list.next->next;
38 cache->nr_cached--;
39 return entry;
40 }
41
42 return NULL;
43}
44
45static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
46 unsigned max_nr, size_t size)
47{
48 cache->list.next = NULL;
49 cache->nr_cached = 0;
50 cache->max_cached = max_nr;
51 cache->elem_size = size;
52}
53
54static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
55 void (*free)(struct io_cache_entry *))
56{
57 while (1) {
58 struct io_cache_entry *entry = io_alloc_cache_get(cache);
59
60 if (!entry)
61 break;
62 free(entry);
63 }
64 cache->nr_cached = 0;
65}
66#endif