Loading...
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef __BPF_LRU_LIST_H_
8#define __BPF_LRU_LIST_H_
9
10#include <linux/list.h>
11#include <linux/spinlock_types.h>
12
13#define NR_BPF_LRU_LIST_T (3)
14#define NR_BPF_LRU_LIST_COUNT (2)
15#define NR_BPF_LRU_LOCAL_LIST_T (2)
16#define BPF_LOCAL_LIST_T_OFFSET NR_BPF_LRU_LIST_T
17
18enum bpf_lru_list_type {
19 BPF_LRU_LIST_T_ACTIVE,
20 BPF_LRU_LIST_T_INACTIVE,
21 BPF_LRU_LIST_T_FREE,
22 BPF_LRU_LOCAL_LIST_T_FREE,
23 BPF_LRU_LOCAL_LIST_T_PENDING,
24};
25
26struct bpf_lru_node {
27 struct list_head list;
28 u16 cpu;
29 u8 type;
30 u8 ref;
31};
32
33struct bpf_lru_list {
34 struct list_head lists[NR_BPF_LRU_LIST_T];
35 unsigned int counts[NR_BPF_LRU_LIST_COUNT];
36 /* The next inacitve list rotation starts from here */
37 struct list_head *next_inactive_rotation;
38
39 raw_spinlock_t lock ____cacheline_aligned_in_smp;
40};
41
42struct bpf_lru_locallist {
43 struct list_head lists[NR_BPF_LRU_LOCAL_LIST_T];
44 u16 next_steal;
45 raw_spinlock_t lock;
46};
47
48struct bpf_common_lru {
49 struct bpf_lru_list lru_list;
50 struct bpf_lru_locallist __percpu *local_list;
51};
52
53typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
54
55struct bpf_lru {
56 union {
57 struct bpf_common_lru common_lru;
58 struct bpf_lru_list __percpu *percpu_lru;
59 };
60 del_from_htab_func del_from_htab;
61 void *del_arg;
62 unsigned int hash_offset;
63 unsigned int nr_scans;
64 bool percpu;
65};
66
67static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
68{
69 /* ref is an approximation on access frequency. It does not
70 * have to be very accurate. Hence, no protection is used.
71 */
72 if (!node->ref)
73 node->ref = 1;
74}
75
76int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
77 del_from_htab_func del_from_htab, void *delete_arg);
78void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
79 u32 elem_size, u32 nr_elems);
80void bpf_lru_destroy(struct bpf_lru *lru);
81struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash);
82void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
83void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node);
84
85#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (c) 2016 Facebook
3 */
4#ifndef __BPF_LRU_LIST_H_
5#define __BPF_LRU_LIST_H_
6
7#include <linux/cache.h>
8#include <linux/list.h>
9#include <linux/spinlock_types.h>
10
11#define NR_BPF_LRU_LIST_T (3)
12#define NR_BPF_LRU_LIST_COUNT (2)
13#define NR_BPF_LRU_LOCAL_LIST_T (2)
14#define BPF_LOCAL_LIST_T_OFFSET NR_BPF_LRU_LIST_T
15
16enum bpf_lru_list_type {
17 BPF_LRU_LIST_T_ACTIVE,
18 BPF_LRU_LIST_T_INACTIVE,
19 BPF_LRU_LIST_T_FREE,
20 BPF_LRU_LOCAL_LIST_T_FREE,
21 BPF_LRU_LOCAL_LIST_T_PENDING,
22};
23
24struct bpf_lru_node {
25 struct list_head list;
26 u16 cpu;
27 u8 type;
28 u8 ref;
29};
30
31struct bpf_lru_list {
32 struct list_head lists[NR_BPF_LRU_LIST_T];
33 unsigned int counts[NR_BPF_LRU_LIST_COUNT];
34 /* The next inactive list rotation starts from here */
35 struct list_head *next_inactive_rotation;
36
37 raw_spinlock_t lock ____cacheline_aligned_in_smp;
38};
39
40struct bpf_lru_locallist {
41 struct list_head lists[NR_BPF_LRU_LOCAL_LIST_T];
42 u16 next_steal;
43 raw_spinlock_t lock;
44};
45
46struct bpf_common_lru {
47 struct bpf_lru_list lru_list;
48 struct bpf_lru_locallist __percpu *local_list;
49};
50
51typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
52
53struct bpf_lru {
54 union {
55 struct bpf_common_lru common_lru;
56 struct bpf_lru_list __percpu *percpu_lru;
57 };
58 del_from_htab_func del_from_htab;
59 void *del_arg;
60 unsigned int hash_offset;
61 unsigned int nr_scans;
62 bool percpu;
63};
64
65static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
66{
67 if (!READ_ONCE(node->ref))
68 WRITE_ONCE(node->ref, 1);
69}
70
71int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
72 del_from_htab_func del_from_htab, void *delete_arg);
73void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
74 u32 elem_size, u32 nr_elems);
75void bpf_lru_destroy(struct bpf_lru *lru);
76struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash);
77void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
78
79#endif