Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2021 Facebook */
  3
  4#include <linux/bitmap.h>
  5#include <linux/bpf.h>
  6#include <linux/btf.h>
  7#include <linux/err.h>
  8#include <linux/jhash.h>
  9#include <linux/random.h>
 10#include <linux/btf_ids.h>
 11
 12#define BLOOM_CREATE_FLAG_MASK \
 13	(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
 14
 15struct bpf_bloom_filter {
 16	struct bpf_map map;
 17	u32 bitset_mask;
 18	u32 hash_seed;
 
 
 
 
 
 
 
 19	u32 nr_hash_funcs;
 20	unsigned long bitset[];
 21};
 22
 23static u32 hash(struct bpf_bloom_filter *bloom, void *value,
 24		u32 value_size, u32 index)
 25{
 26	u32 h;
 27
 28	if (likely(value_size % 4 == 0))
 29		h = jhash2(value, value_size / 4, bloom->hash_seed + index);
 
 30	else
 31		h = jhash(value, value_size, bloom->hash_seed + index);
 32
 33	return h & bloom->bitset_mask;
 34}
 35
 36static long bloom_map_peek_elem(struct bpf_map *map, void *value)
 37{
 38	struct bpf_bloom_filter *bloom =
 39		container_of(map, struct bpf_bloom_filter, map);
 40	u32 i, h;
 41
 42	for (i = 0; i < bloom->nr_hash_funcs; i++) {
 43		h = hash(bloom, value, map->value_size, i);
 44		if (!test_bit(h, bloom->bitset))
 45			return -ENOENT;
 46	}
 47
 48	return 0;
 49}
 50
 51static long bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
 52{
 53	struct bpf_bloom_filter *bloom =
 54		container_of(map, struct bpf_bloom_filter, map);
 55	u32 i, h;
 56
 57	if (flags != BPF_ANY)
 58		return -EINVAL;
 59
 60	for (i = 0; i < bloom->nr_hash_funcs; i++) {
 61		h = hash(bloom, value, map->value_size, i);
 62		set_bit(h, bloom->bitset);
 63	}
 64
 65	return 0;
 66}
 67
 68static long bloom_map_pop_elem(struct bpf_map *map, void *value)
 69{
 70	return -EOPNOTSUPP;
 71}
 72
 73static long bloom_map_delete_elem(struct bpf_map *map, void *value)
 74{
 75	return -EOPNOTSUPP;
 76}
 77
 78static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 79{
 80	return -EOPNOTSUPP;
 81}
 82
 83static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
 84{
 85	u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
 86	int numa_node = bpf_map_attr_numa_node(attr);
 87	struct bpf_bloom_filter *bloom;
 88
 
 
 
 89	if (attr->key_size != 0 || attr->value_size == 0 ||
 90	    attr->max_entries == 0 ||
 91	    attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
 92	    !bpf_map_flags_access_ok(attr->map_flags) ||
 93	    /* The lower 4 bits of map_extra (0xF) specify the number
 94	     * of hash functions
 95	     */
 96	    (attr->map_extra & ~0xF))
 97		return ERR_PTR(-EINVAL);
 98
 99	nr_hash_funcs = attr->map_extra;
100	if (nr_hash_funcs == 0)
101		/* Default to using 5 hash functions if unspecified */
102		nr_hash_funcs = 5;
103
104	/* For the bloom filter, the optimal bit array size that minimizes the
105	 * false positive probability is n * k / ln(2) where n is the number of
106	 * expected entries in the bloom filter and k is the number of hash
107	 * functions. We use 7 / 5 to approximate 1 / ln(2).
108	 *
109	 * We round this up to the nearest power of two to enable more efficient
110	 * hashing using bitmasks. The bitmask will be the bit array size - 1.
111	 *
112	 * If this overflows a u32, the bit array size will have 2^32 (4
113	 * GB) bits.
114	 */
115	if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) ||
116	    check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) ||
117	    nr_bits > (1UL << 31)) {
118		/* The bit array size is 2^32 bits but to avoid overflowing the
119		 * u32, we use U32_MAX, which will round up to the equivalent
120		 * number of bytes
121		 */
122		bitset_bytes = BITS_TO_BYTES(U32_MAX);
123		bitset_mask = U32_MAX;
124	} else {
125		if (nr_bits <= BITS_PER_LONG)
126			nr_bits = BITS_PER_LONG;
127		else
128			nr_bits = roundup_pow_of_two(nr_bits);
129		bitset_bytes = BITS_TO_BYTES(nr_bits);
130		bitset_mask = nr_bits - 1;
131	}
132
133	bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
134	bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node);
135
136	if (!bloom)
137		return ERR_PTR(-ENOMEM);
138
139	bpf_map_init_from_attr(&bloom->map, attr);
140
141	bloom->nr_hash_funcs = nr_hash_funcs;
142	bloom->bitset_mask = bitset_mask;
143
 
 
 
 
 
144	if (!(attr->map_flags & BPF_F_ZERO_SEED))
145		bloom->hash_seed = get_random_u32();
146
147	return &bloom->map;
148}
149
150static void bloom_map_free(struct bpf_map *map)
151{
152	struct bpf_bloom_filter *bloom =
153		container_of(map, struct bpf_bloom_filter, map);
154
155	bpf_map_area_free(bloom);
156}
157
158static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
159{
160	/* The eBPF program should use map_peek_elem instead */
161	return ERR_PTR(-EINVAL);
162}
163
164static long bloom_map_update_elem(struct bpf_map *map, void *key,
165				  void *value, u64 flags)
166{
167	/* The eBPF program should use map_push_elem instead */
168	return -EINVAL;
169}
170
171static int bloom_map_check_btf(const struct bpf_map *map,
172			       const struct btf *btf,
173			       const struct btf_type *key_type,
174			       const struct btf_type *value_type)
175{
176	/* Bloom filter maps are keyless */
177	return btf_type_is_void(key_type) ? 0 : -EINVAL;
178}
179
180static u64 bloom_map_mem_usage(const struct bpf_map *map)
181{
182	struct bpf_bloom_filter *bloom;
183	u64 bitset_bytes;
184
185	bloom = container_of(map, struct bpf_bloom_filter, map);
186	bitset_bytes = BITS_TO_BYTES((u64)bloom->bitset_mask + 1);
187	bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
188	return sizeof(*bloom) + bitset_bytes;
189}
190
191BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
192const struct bpf_map_ops bloom_filter_map_ops = {
193	.map_meta_equal = bpf_map_meta_equal,
194	.map_alloc = bloom_map_alloc,
195	.map_free = bloom_map_free,
196	.map_get_next_key = bloom_map_get_next_key,
197	.map_push_elem = bloom_map_push_elem,
198	.map_peek_elem = bloom_map_peek_elem,
199	.map_pop_elem = bloom_map_pop_elem,
200	.map_lookup_elem = bloom_map_lookup_elem,
201	.map_update_elem = bloom_map_update_elem,
202	.map_delete_elem = bloom_map_delete_elem,
203	.map_check_btf = bloom_map_check_btf,
204	.map_mem_usage = bloom_map_mem_usage,
205	.map_btf_id = &bpf_bloom_map_btf_ids[0],
206};
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2021 Facebook */
  3
  4#include <linux/bitmap.h>
  5#include <linux/bpf.h>
  6#include <linux/btf.h>
  7#include <linux/err.h>
  8#include <linux/jhash.h>
  9#include <linux/random.h>
 10#include <linux/btf_ids.h>
 11
 12#define BLOOM_CREATE_FLAG_MASK \
 13	(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
 14
 15struct bpf_bloom_filter {
 16	struct bpf_map map;
 17	u32 bitset_mask;
 18	u32 hash_seed;
 19	/* If the size of the values in the bloom filter is u32 aligned,
 20	 * then it is more performant to use jhash2 as the underlying hash
 21	 * function, else we use jhash. This tracks the number of u32s
 22	 * in an u32-aligned value size. If the value size is not u32 aligned,
 23	 * this will be 0.
 24	 */
 25	u32 aligned_u32_count;
 26	u32 nr_hash_funcs;
 27	unsigned long bitset[];
 28};
 29
 30static u32 hash(struct bpf_bloom_filter *bloom, void *value,
 31		u32 value_size, u32 index)
 32{
 33	u32 h;
 34
 35	if (bloom->aligned_u32_count)
 36		h = jhash2(value, bloom->aligned_u32_count,
 37			   bloom->hash_seed + index);
 38	else
 39		h = jhash(value, value_size, bloom->hash_seed + index);
 40
 41	return h & bloom->bitset_mask;
 42}
 43
 44static int bloom_map_peek_elem(struct bpf_map *map, void *value)
 45{
 46	struct bpf_bloom_filter *bloom =
 47		container_of(map, struct bpf_bloom_filter, map);
 48	u32 i, h;
 49
 50	for (i = 0; i < bloom->nr_hash_funcs; i++) {
 51		h = hash(bloom, value, map->value_size, i);
 52		if (!test_bit(h, bloom->bitset))
 53			return -ENOENT;
 54	}
 55
 56	return 0;
 57}
 58
 59static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
 60{
 61	struct bpf_bloom_filter *bloom =
 62		container_of(map, struct bpf_bloom_filter, map);
 63	u32 i, h;
 64
 65	if (flags != BPF_ANY)
 66		return -EINVAL;
 67
 68	for (i = 0; i < bloom->nr_hash_funcs; i++) {
 69		h = hash(bloom, value, map->value_size, i);
 70		set_bit(h, bloom->bitset);
 71	}
 72
 73	return 0;
 74}
 75
 76static int bloom_map_pop_elem(struct bpf_map *map, void *value)
 77{
 78	return -EOPNOTSUPP;
 79}
 80
 81static int bloom_map_delete_elem(struct bpf_map *map, void *value)
 82{
 83	return -EOPNOTSUPP;
 84}
 85
 86static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 87{
 88	return -EOPNOTSUPP;
 89}
 90
 91static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
 92{
 93	u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
 94	int numa_node = bpf_map_attr_numa_node(attr);
 95	struct bpf_bloom_filter *bloom;
 96
 97	if (!bpf_capable())
 98		return ERR_PTR(-EPERM);
 99
100	if (attr->key_size != 0 || attr->value_size == 0 ||
101	    attr->max_entries == 0 ||
102	    attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
103	    !bpf_map_flags_access_ok(attr->map_flags) ||
104	    /* The lower 4 bits of map_extra (0xF) specify the number
105	     * of hash functions
106	     */
107	    (attr->map_extra & ~0xF))
108		return ERR_PTR(-EINVAL);
109
110	nr_hash_funcs = attr->map_extra;
111	if (nr_hash_funcs == 0)
112		/* Default to using 5 hash functions if unspecified */
113		nr_hash_funcs = 5;
114
115	/* For the bloom filter, the optimal bit array size that minimizes the
116	 * false positive probability is n * k / ln(2) where n is the number of
117	 * expected entries in the bloom filter and k is the number of hash
118	 * functions. We use 7 / 5 to approximate 1 / ln(2).
119	 *
120	 * We round this up to the nearest power of two to enable more efficient
121	 * hashing using bitmasks. The bitmask will be the bit array size - 1.
122	 *
123	 * If this overflows a u32, the bit array size will have 2^32 (4
124	 * GB) bits.
125	 */
126	if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) ||
127	    check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) ||
128	    nr_bits > (1UL << 31)) {
129		/* The bit array size is 2^32 bits but to avoid overflowing the
130		 * u32, we use U32_MAX, which will round up to the equivalent
131		 * number of bytes
132		 */
133		bitset_bytes = BITS_TO_BYTES(U32_MAX);
134		bitset_mask = U32_MAX;
135	} else {
136		if (nr_bits <= BITS_PER_LONG)
137			nr_bits = BITS_PER_LONG;
138		else
139			nr_bits = roundup_pow_of_two(nr_bits);
140		bitset_bytes = BITS_TO_BYTES(nr_bits);
141		bitset_mask = nr_bits - 1;
142	}
143
144	bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
145	bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node);
146
147	if (!bloom)
148		return ERR_PTR(-ENOMEM);
149
150	bpf_map_init_from_attr(&bloom->map, attr);
151
152	bloom->nr_hash_funcs = nr_hash_funcs;
153	bloom->bitset_mask = bitset_mask;
154
155	/* Check whether the value size is u32-aligned */
156	if ((attr->value_size & (sizeof(u32) - 1)) == 0)
157		bloom->aligned_u32_count =
158			attr->value_size / sizeof(u32);
159
160	if (!(attr->map_flags & BPF_F_ZERO_SEED))
161		bloom->hash_seed = get_random_u32();
162
163	return &bloom->map;
164}
165
166static void bloom_map_free(struct bpf_map *map)
167{
168	struct bpf_bloom_filter *bloom =
169		container_of(map, struct bpf_bloom_filter, map);
170
171	bpf_map_area_free(bloom);
172}
173
174static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
175{
176	/* The eBPF program should use map_peek_elem instead */
177	return ERR_PTR(-EINVAL);
178}
179
180static int bloom_map_update_elem(struct bpf_map *map, void *key,
181				 void *value, u64 flags)
182{
183	/* The eBPF program should use map_push_elem instead */
184	return -EINVAL;
185}
186
187static int bloom_map_check_btf(const struct bpf_map *map,
188			       const struct btf *btf,
189			       const struct btf_type *key_type,
190			       const struct btf_type *value_type)
191{
192	/* Bloom filter maps are keyless */
193	return btf_type_is_void(key_type) ? 0 : -EINVAL;
194}
195
 
 
 
 
 
 
 
 
 
 
 
196BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
197const struct bpf_map_ops bloom_filter_map_ops = {
198	.map_meta_equal = bpf_map_meta_equal,
199	.map_alloc = bloom_map_alloc,
200	.map_free = bloom_map_free,
201	.map_get_next_key = bloom_map_get_next_key,
202	.map_push_elem = bloom_map_push_elem,
203	.map_peek_elem = bloom_map_peek_elem,
204	.map_pop_elem = bloom_map_pop_elem,
205	.map_lookup_elem = bloom_map_lookup_elem,
206	.map_update_elem = bloom_map_update_elem,
207	.map_delete_elem = bloom_map_delete_elem,
208	.map_check_btf = bloom_map_check_btf,
 
209	.map_btf_id = &bpf_bloom_map_btf_ids[0],
210};