Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2016 Facebook
3 */
4#include <linux/cpumask.h>
5#include <linux/spinlock.h>
6#include <linux/percpu.h>
7
8#include "bpf_lru_list.h"
9
10#define LOCAL_FREE_TARGET (128)
11#define LOCAL_NR_SCANS LOCAL_FREE_TARGET
12
13#define PERCPU_FREE_TARGET (4)
14#define PERCPU_NR_SCANS PERCPU_FREE_TARGET
15
16/* Helpers to get the local list index */
17#define LOCAL_LIST_IDX(t) ((t) - BPF_LOCAL_LIST_T_OFFSET)
18#define LOCAL_FREE_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
19#define LOCAL_PENDING_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
20#define IS_LOCAL_LIST_TYPE(t) ((t) >= BPF_LOCAL_LIST_T_OFFSET)
21
22static int get_next_cpu(int cpu)
23{
24 cpu = cpumask_next(cpu, cpu_possible_mask);
25 if (cpu >= nr_cpu_ids)
26 cpu = cpumask_first(cpu_possible_mask);
27 return cpu;
28}
29
30/* Local list helpers */
31static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
32{
33 return &loc_l->lists[LOCAL_FREE_LIST_IDX];
34}
35
36static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
37{
38 return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
39}
40
41/* bpf_lru_node helpers */
42static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
43{
44 return node->ref;
45}
46
47static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
48 enum bpf_lru_list_type type)
49{
50 if (type < NR_BPF_LRU_LIST_COUNT)
51 l->counts[type]++;
52}
53
54static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
55 enum bpf_lru_list_type type)
56{
57 if (type < NR_BPF_LRU_LIST_COUNT)
58 l->counts[type]--;
59}
60
61static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
62 struct bpf_lru_node *node,
63 struct list_head *free_list,
64 enum bpf_lru_list_type tgt_free_type)
65{
66 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
67 return;
68
69 /* If the removing node is the next_inactive_rotation candidate,
70 * move the next_inactive_rotation pointer also.
71 */
72 if (&node->list == l->next_inactive_rotation)
73 l->next_inactive_rotation = l->next_inactive_rotation->prev;
74
75 bpf_lru_list_count_dec(l, node->type);
76
77 node->type = tgt_free_type;
78 list_move(&node->list, free_list);
79}
80
81/* Move nodes from local list to the LRU list */
82static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
83 struct bpf_lru_node *node,
84 enum bpf_lru_list_type tgt_type)
85{
86 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
87 WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
88 return;
89
90 bpf_lru_list_count_inc(l, tgt_type);
91 node->type = tgt_type;
92 node->ref = 0;
93 list_move(&node->list, &l->lists[tgt_type]);
94}
95
96/* Move nodes between or within active and inactive list (like
97 * active to inactive, inactive to active or tail of active back to
98 * the head of active).
99 */
100static void __bpf_lru_node_move(struct bpf_lru_list *l,
101 struct bpf_lru_node *node,
102 enum bpf_lru_list_type tgt_type)
103{
104 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
105 WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
106 return;
107
108 if (node->type != tgt_type) {
109 bpf_lru_list_count_dec(l, node->type);
110 bpf_lru_list_count_inc(l, tgt_type);
111 node->type = tgt_type;
112 }
113 node->ref = 0;
114
115 /* If the moving node is the next_inactive_rotation candidate,
116 * move the next_inactive_rotation pointer also.
117 */
118 if (&node->list == l->next_inactive_rotation)
119 l->next_inactive_rotation = l->next_inactive_rotation->prev;
120
121 list_move(&node->list, &l->lists[tgt_type]);
122}
123
124static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
125{
126 return l->counts[BPF_LRU_LIST_T_INACTIVE] <
127 l->counts[BPF_LRU_LIST_T_ACTIVE];
128}
129
130/* Rotate the active list:
131 * 1. Start from tail
132 * 2. If the node has the ref bit set, it will be rotated
133 * back to the head of active list with the ref bit cleared.
134 * Give this node one more chance to survive in the active list.
135 * 3. If the ref bit is not set, move it to the head of the
136 * inactive list.
137 * 4. It will at most scan nr_scans nodes
138 */
139static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
140 struct bpf_lru_list *l)
141{
142 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
143 struct bpf_lru_node *node, *tmp_node, *first_node;
144 unsigned int i = 0;
145
146 first_node = list_first_entry(active, struct bpf_lru_node, list);
147 list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
148 if (bpf_lru_node_is_ref(node))
149 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
150 else
151 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
152
153 if (++i == lru->nr_scans || node == first_node)
154 break;
155 }
156}
157
158/* Rotate the inactive list. It starts from the next_inactive_rotation
159 * 1. If the node has ref bit set, it will be moved to the head
160 * of active list with the ref bit cleared.
161 * 2. If the node does not have ref bit set, it will leave it
162 * at its current location (i.e. do nothing) so that it can
163 * be considered during the next inactive_shrink.
164 * 3. It will at most scan nr_scans nodes
165 */
166static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
167 struct bpf_lru_list *l)
168{
169 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
170 struct list_head *cur, *last, *next = inactive;
171 struct bpf_lru_node *node;
172 unsigned int i = 0;
173
174 if (list_empty(inactive))
175 return;
176
177 last = l->next_inactive_rotation->next;
178 if (last == inactive)
179 last = last->next;
180
181 cur = l->next_inactive_rotation;
182 while (i < lru->nr_scans) {
183 if (cur == inactive) {
184 cur = cur->prev;
185 continue;
186 }
187
188 node = list_entry(cur, struct bpf_lru_node, list);
189 next = cur->prev;
190 if (bpf_lru_node_is_ref(node))
191 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
192 if (cur == last)
193 break;
194 cur = next;
195 i++;
196 }
197
198 l->next_inactive_rotation = next;
199}
200
201/* Shrink the inactive list. It starts from the tail of the
202 * inactive list and only move the nodes without the ref bit
203 * set to the designated free list.
204 */
205static unsigned int
206__bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
207 struct bpf_lru_list *l,
208 unsigned int tgt_nshrink,
209 struct list_head *free_list,
210 enum bpf_lru_list_type tgt_free_type)
211{
212 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
213 struct bpf_lru_node *node, *tmp_node;
214 unsigned int nshrinked = 0;
215 unsigned int i = 0;
216
217 list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
218 if (bpf_lru_node_is_ref(node)) {
219 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
220 } else if (lru->del_from_htab(lru->del_arg, node)) {
221 __bpf_lru_node_move_to_free(l, node, free_list,
222 tgt_free_type);
223 if (++nshrinked == tgt_nshrink)
224 break;
225 }
226
227 if (++i == lru->nr_scans)
228 break;
229 }
230
231 return nshrinked;
232}
233
234/* 1. Rotate the active list (if needed)
235 * 2. Always rotate the inactive list
236 */
237static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
238{
239 if (bpf_lru_list_inactive_low(l))
240 __bpf_lru_list_rotate_active(lru, l);
241
242 __bpf_lru_list_rotate_inactive(lru, l);
243}
244
245/* Calls __bpf_lru_list_shrink_inactive() to shrink some
246 * ref-bit-cleared nodes and move them to the designated
247 * free list.
248 *
249 * If it cannot get a free node after calling
250 * __bpf_lru_list_shrink_inactive(). It will just remove
251 * one node from either inactive or active list without
252 * honoring the ref-bit. It prefers inactive list to active
253 * list in this situation.
254 */
255static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
256 struct bpf_lru_list *l,
257 unsigned int tgt_nshrink,
258 struct list_head *free_list,
259 enum bpf_lru_list_type tgt_free_type)
260
261{
262 struct bpf_lru_node *node, *tmp_node;
263 struct list_head *force_shrink_list;
264 unsigned int nshrinked;
265
266 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
267 free_list, tgt_free_type);
268 if (nshrinked)
269 return nshrinked;
270
271 /* Do a force shrink by ignoring the reference bit */
272 if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
273 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
274 else
275 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
276
277 list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
278 list) {
279 if (lru->del_from_htab(lru->del_arg, node)) {
280 __bpf_lru_node_move_to_free(l, node, free_list,
281 tgt_free_type);
282 return 1;
283 }
284 }
285
286 return 0;
287}
288
289/* Flush the nodes from the local pending list to the LRU list */
290static void __local_list_flush(struct bpf_lru_list *l,
291 struct bpf_lru_locallist *loc_l)
292{
293 struct bpf_lru_node *node, *tmp_node;
294
295 list_for_each_entry_safe_reverse(node, tmp_node,
296 local_pending_list(loc_l), list) {
297 if (bpf_lru_node_is_ref(node))
298 __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
299 else
300 __bpf_lru_node_move_in(l, node,
301 BPF_LRU_LIST_T_INACTIVE);
302 }
303}
304
305static void bpf_lru_list_push_free(struct bpf_lru_list *l,
306 struct bpf_lru_node *node)
307{
308 unsigned long flags;
309
310 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
311 return;
312
313 raw_spin_lock_irqsave(&l->lock, flags);
314 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
315 raw_spin_unlock_irqrestore(&l->lock, flags);
316}
317
318static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
319 struct bpf_lru_locallist *loc_l)
320{
321 struct bpf_lru_list *l = &lru->common_lru.lru_list;
322 struct bpf_lru_node *node, *tmp_node;
323 unsigned int nfree = 0;
324
325 raw_spin_lock(&l->lock);
326
327 __local_list_flush(l, loc_l);
328
329 __bpf_lru_list_rotate(lru, l);
330
331 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
332 list) {
333 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
334 BPF_LRU_LOCAL_LIST_T_FREE);
335 if (++nfree == LOCAL_FREE_TARGET)
336 break;
337 }
338
339 if (nfree < LOCAL_FREE_TARGET)
340 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
341 local_free_list(loc_l),
342 BPF_LRU_LOCAL_LIST_T_FREE);
343
344 raw_spin_unlock(&l->lock);
345}
346
347static void __local_list_add_pending(struct bpf_lru *lru,
348 struct bpf_lru_locallist *loc_l,
349 int cpu,
350 struct bpf_lru_node *node,
351 u32 hash)
352{
353 *(u32 *)((void *)node + lru->hash_offset) = hash;
354 node->cpu = cpu;
355 node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
356 node->ref = 0;
357 list_add(&node->list, local_pending_list(loc_l));
358}
359
360static struct bpf_lru_node *
361__local_list_pop_free(struct bpf_lru_locallist *loc_l)
362{
363 struct bpf_lru_node *node;
364
365 node = list_first_entry_or_null(local_free_list(loc_l),
366 struct bpf_lru_node,
367 list);
368 if (node)
369 list_del(&node->list);
370
371 return node;
372}
373
374static struct bpf_lru_node *
375__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
376{
377 struct bpf_lru_node *node;
378 bool force = false;
379
380ignore_ref:
381 /* Get from the tail (i.e. older element) of the pending list. */
382 list_for_each_entry_reverse(node, local_pending_list(loc_l),
383 list) {
384 if ((!bpf_lru_node_is_ref(node) || force) &&
385 lru->del_from_htab(lru->del_arg, node)) {
386 list_del(&node->list);
387 return node;
388 }
389 }
390
391 if (!force) {
392 force = true;
393 goto ignore_ref;
394 }
395
396 return NULL;
397}
398
399static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
400 u32 hash)
401{
402 struct list_head *free_list;
403 struct bpf_lru_node *node = NULL;
404 struct bpf_lru_list *l;
405 unsigned long flags;
406 int cpu = raw_smp_processor_id();
407
408 l = per_cpu_ptr(lru->percpu_lru, cpu);
409
410 raw_spin_lock_irqsave(&l->lock, flags);
411
412 __bpf_lru_list_rotate(lru, l);
413
414 free_list = &l->lists[BPF_LRU_LIST_T_FREE];
415 if (list_empty(free_list))
416 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
417 BPF_LRU_LIST_T_FREE);
418
419 if (!list_empty(free_list)) {
420 node = list_first_entry(free_list, struct bpf_lru_node, list);
421 *(u32 *)((void *)node + lru->hash_offset) = hash;
422 node->ref = 0;
423 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
424 }
425
426 raw_spin_unlock_irqrestore(&l->lock, flags);
427
428 return node;
429}
430
431static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
432 u32 hash)
433{
434 struct bpf_lru_locallist *loc_l, *steal_loc_l;
435 struct bpf_common_lru *clru = &lru->common_lru;
436 struct bpf_lru_node *node;
437 int steal, first_steal;
438 unsigned long flags;
439 int cpu = raw_smp_processor_id();
440
441 loc_l = per_cpu_ptr(clru->local_list, cpu);
442
443 raw_spin_lock_irqsave(&loc_l->lock, flags);
444
445 node = __local_list_pop_free(loc_l);
446 if (!node) {
447 bpf_lru_list_pop_free_to_local(lru, loc_l);
448 node = __local_list_pop_free(loc_l);
449 }
450
451 if (node)
452 __local_list_add_pending(lru, loc_l, cpu, node, hash);
453
454 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
455
456 if (node)
457 return node;
458
459 /* No free nodes found from the local free list and
460 * the global LRU list.
461 *
462 * Steal from the local free/pending list of the
463 * current CPU and remote CPU in RR. It starts
464 * with the loc_l->next_steal CPU.
465 */
466
467 first_steal = loc_l->next_steal;
468 steal = first_steal;
469 do {
470 steal_loc_l = per_cpu_ptr(clru->local_list, steal);
471
472 raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
473
474 node = __local_list_pop_free(steal_loc_l);
475 if (!node)
476 node = __local_list_pop_pending(lru, steal_loc_l);
477
478 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
479
480 steal = get_next_cpu(steal);
481 } while (!node && steal != first_steal);
482
483 loc_l->next_steal = steal;
484
485 if (node) {
486 raw_spin_lock_irqsave(&loc_l->lock, flags);
487 __local_list_add_pending(lru, loc_l, cpu, node, hash);
488 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
489 }
490
491 return node;
492}
493
494struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
495{
496 if (lru->percpu)
497 return bpf_percpu_lru_pop_free(lru, hash);
498 else
499 return bpf_common_lru_pop_free(lru, hash);
500}
501
502static void bpf_common_lru_push_free(struct bpf_lru *lru,
503 struct bpf_lru_node *node)
504{
505 unsigned long flags;
506
507 if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
508 WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
509 return;
510
511 if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
512 struct bpf_lru_locallist *loc_l;
513
514 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
515
516 raw_spin_lock_irqsave(&loc_l->lock, flags);
517
518 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
519 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
520 goto check_lru_list;
521 }
522
523 node->type = BPF_LRU_LOCAL_LIST_T_FREE;
524 node->ref = 0;
525 list_move(&node->list, local_free_list(loc_l));
526
527 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
528 return;
529 }
530
531check_lru_list:
532 bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
533}
534
535static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
536 struct bpf_lru_node *node)
537{
538 struct bpf_lru_list *l;
539 unsigned long flags;
540
541 l = per_cpu_ptr(lru->percpu_lru, node->cpu);
542
543 raw_spin_lock_irqsave(&l->lock, flags);
544
545 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
546
547 raw_spin_unlock_irqrestore(&l->lock, flags);
548}
549
550void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
551{
552 if (lru->percpu)
553 bpf_percpu_lru_push_free(lru, node);
554 else
555 bpf_common_lru_push_free(lru, node);
556}
557
558static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
559 u32 node_offset, u32 elem_size,
560 u32 nr_elems)
561{
562 struct bpf_lru_list *l = &lru->common_lru.lru_list;
563 u32 i;
564
565 for (i = 0; i < nr_elems; i++) {
566 struct bpf_lru_node *node;
567
568 node = (struct bpf_lru_node *)(buf + node_offset);
569 node->type = BPF_LRU_LIST_T_FREE;
570 node->ref = 0;
571 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
572 buf += elem_size;
573 }
574}
575
576static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
577 u32 node_offset, u32 elem_size,
578 u32 nr_elems)
579{
580 u32 i, pcpu_entries;
581 int cpu;
582 struct bpf_lru_list *l;
583
584 pcpu_entries = nr_elems / num_possible_cpus();
585
586 i = 0;
587
588 for_each_possible_cpu(cpu) {
589 struct bpf_lru_node *node;
590
591 l = per_cpu_ptr(lru->percpu_lru, cpu);
592again:
593 node = (struct bpf_lru_node *)(buf + node_offset);
594 node->cpu = cpu;
595 node->type = BPF_LRU_LIST_T_FREE;
596 node->ref = 0;
597 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
598 i++;
599 buf += elem_size;
600 if (i == nr_elems)
601 break;
602 if (i % pcpu_entries)
603 goto again;
604 }
605}
606
607void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
608 u32 elem_size, u32 nr_elems)
609{
610 if (lru->percpu)
611 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
612 nr_elems);
613 else
614 bpf_common_lru_populate(lru, buf, node_offset, elem_size,
615 nr_elems);
616}
617
618static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
619{
620 int i;
621
622 for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
623 INIT_LIST_HEAD(&loc_l->lists[i]);
624
625 loc_l->next_steal = cpu;
626
627 raw_spin_lock_init(&loc_l->lock);
628}
629
630static void bpf_lru_list_init(struct bpf_lru_list *l)
631{
632 int i;
633
634 for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
635 INIT_LIST_HEAD(&l->lists[i]);
636
637 for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
638 l->counts[i] = 0;
639
640 l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
641
642 raw_spin_lock_init(&l->lock);
643}
644
645int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
646 del_from_htab_func del_from_htab, void *del_arg)
647{
648 int cpu;
649
650 if (percpu) {
651 lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
652 if (!lru->percpu_lru)
653 return -ENOMEM;
654
655 for_each_possible_cpu(cpu) {
656 struct bpf_lru_list *l;
657
658 l = per_cpu_ptr(lru->percpu_lru, cpu);
659 bpf_lru_list_init(l);
660 }
661 lru->nr_scans = PERCPU_NR_SCANS;
662 } else {
663 struct bpf_common_lru *clru = &lru->common_lru;
664
665 clru->local_list = alloc_percpu(struct bpf_lru_locallist);
666 if (!clru->local_list)
667 return -ENOMEM;
668
669 for_each_possible_cpu(cpu) {
670 struct bpf_lru_locallist *loc_l;
671
672 loc_l = per_cpu_ptr(clru->local_list, cpu);
673 bpf_lru_locallist_init(loc_l, cpu);
674 }
675
676 bpf_lru_list_init(&clru->lru_list);
677 lru->nr_scans = LOCAL_NR_SCANS;
678 }
679
680 lru->percpu = percpu;
681 lru->del_from_htab = del_from_htab;
682 lru->del_arg = del_arg;
683 lru->hash_offset = hash_offset;
684
685 return 0;
686}
687
688void bpf_lru_destroy(struct bpf_lru *lru)
689{
690 if (lru->percpu)
691 free_percpu(lru->percpu_lru);
692 else
693 free_percpu(lru->common_lru.local_list);
694}
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/cpumask.h>
8#include <linux/spinlock.h>
9#include <linux/percpu.h>
10
11#include "bpf_lru_list.h"
12
13#define LOCAL_FREE_TARGET (128)
14#define LOCAL_NR_SCANS LOCAL_FREE_TARGET
15
16#define PERCPU_FREE_TARGET (4)
17#define PERCPU_NR_SCANS PERCPU_FREE_TARGET
18
19/* Helpers to get the local list index */
20#define LOCAL_LIST_IDX(t) ((t) - BPF_LOCAL_LIST_T_OFFSET)
21#define LOCAL_FREE_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
22#define LOCAL_PENDING_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
23#define IS_LOCAL_LIST_TYPE(t) ((t) >= BPF_LOCAL_LIST_T_OFFSET)
24
25static int get_next_cpu(int cpu)
26{
27 cpu = cpumask_next(cpu, cpu_possible_mask);
28 if (cpu >= nr_cpu_ids)
29 cpu = cpumask_first(cpu_possible_mask);
30 return cpu;
31}
32
33/* Local list helpers */
34static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
35{
36 return &loc_l->lists[LOCAL_FREE_LIST_IDX];
37}
38
39static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
40{
41 return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
42}
43
44/* bpf_lru_node helpers */
45static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
46{
47 return node->ref;
48}
49
50static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
51 enum bpf_lru_list_type type)
52{
53 if (type < NR_BPF_LRU_LIST_COUNT)
54 l->counts[type]++;
55}
56
57static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
58 enum bpf_lru_list_type type)
59{
60 if (type < NR_BPF_LRU_LIST_COUNT)
61 l->counts[type]--;
62}
63
64static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
65 struct bpf_lru_node *node,
66 struct list_head *free_list,
67 enum bpf_lru_list_type tgt_free_type)
68{
69 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
70 return;
71
72 /* If the removing node is the next_inactive_rotation candidate,
73 * move the next_inactive_rotation pointer also.
74 */
75 if (&node->list == l->next_inactive_rotation)
76 l->next_inactive_rotation = l->next_inactive_rotation->prev;
77
78 bpf_lru_list_count_dec(l, node->type);
79
80 node->type = tgt_free_type;
81 list_move(&node->list, free_list);
82}
83
84/* Move nodes from local list to the LRU list */
85static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
86 struct bpf_lru_node *node,
87 enum bpf_lru_list_type tgt_type)
88{
89 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
90 WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
91 return;
92
93 bpf_lru_list_count_inc(l, tgt_type);
94 node->type = tgt_type;
95 node->ref = 0;
96 list_move(&node->list, &l->lists[tgt_type]);
97}
98
99/* Move nodes between or within active and inactive list (like
100 * active to inactive, inactive to active or tail of active back to
101 * the head of active).
102 */
103static void __bpf_lru_node_move(struct bpf_lru_list *l,
104 struct bpf_lru_node *node,
105 enum bpf_lru_list_type tgt_type)
106{
107 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
108 WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
109 return;
110
111 if (node->type != tgt_type) {
112 bpf_lru_list_count_dec(l, node->type);
113 bpf_lru_list_count_inc(l, tgt_type);
114 node->type = tgt_type;
115 }
116 node->ref = 0;
117
118 /* If the moving node is the next_inactive_rotation candidate,
119 * move the next_inactive_rotation pointer also.
120 */
121 if (&node->list == l->next_inactive_rotation)
122 l->next_inactive_rotation = l->next_inactive_rotation->prev;
123
124 list_move(&node->list, &l->lists[tgt_type]);
125}
126
127static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
128{
129 return l->counts[BPF_LRU_LIST_T_INACTIVE] <
130 l->counts[BPF_LRU_LIST_T_ACTIVE];
131}
132
133/* Rotate the active list:
134 * 1. Start from tail
135 * 2. If the node has the ref bit set, it will be rotated
136 * back to the head of active list with the ref bit cleared.
137 * Give this node one more chance to survive in the active list.
138 * 3. If the ref bit is not set, move it to the head of the
139 * inactive list.
140 * 4. It will at most scan nr_scans nodes
141 */
142static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
143 struct bpf_lru_list *l)
144{
145 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
146 struct bpf_lru_node *node, *tmp_node, *first_node;
147 unsigned int i = 0;
148
149 first_node = list_first_entry(active, struct bpf_lru_node, list);
150 list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
151 if (bpf_lru_node_is_ref(node))
152 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
153 else
154 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
155
156 if (++i == lru->nr_scans || node == first_node)
157 break;
158 }
159}
160
161/* Rotate the inactive list. It starts from the next_inactive_rotation
162 * 1. If the node has ref bit set, it will be moved to the head
163 * of active list with the ref bit cleared.
164 * 2. If the node does not have ref bit set, it will leave it
165 * at its current location (i.e. do nothing) so that it can
166 * be considered during the next inactive_shrink.
167 * 3. It will at most scan nr_scans nodes
168 */
169static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
170 struct bpf_lru_list *l)
171{
172 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
173 struct list_head *cur, *last, *next = inactive;
174 struct bpf_lru_node *node;
175 unsigned int i = 0;
176
177 if (list_empty(inactive))
178 return;
179
180 last = l->next_inactive_rotation->next;
181 if (last == inactive)
182 last = last->next;
183
184 cur = l->next_inactive_rotation;
185 while (i < lru->nr_scans) {
186 if (cur == inactive) {
187 cur = cur->prev;
188 continue;
189 }
190
191 node = list_entry(cur, struct bpf_lru_node, list);
192 next = cur->prev;
193 if (bpf_lru_node_is_ref(node))
194 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
195 if (cur == last)
196 break;
197 cur = next;
198 i++;
199 }
200
201 l->next_inactive_rotation = next;
202}
203
204/* Shrink the inactive list. It starts from the tail of the
205 * inactive list and only move the nodes without the ref bit
206 * set to the designated free list.
207 */
208static unsigned int
209__bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
210 struct bpf_lru_list *l,
211 unsigned int tgt_nshrink,
212 struct list_head *free_list,
213 enum bpf_lru_list_type tgt_free_type)
214{
215 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
216 struct bpf_lru_node *node, *tmp_node;
217 unsigned int nshrinked = 0;
218 unsigned int i = 0;
219
220 list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
221 if (bpf_lru_node_is_ref(node)) {
222 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
223 } else if (lru->del_from_htab(lru->del_arg, node)) {
224 __bpf_lru_node_move_to_free(l, node, free_list,
225 tgt_free_type);
226 if (++nshrinked == tgt_nshrink)
227 break;
228 }
229
230 if (++i == lru->nr_scans)
231 break;
232 }
233
234 return nshrinked;
235}
236
237/* 1. Rotate the active list (if needed)
238 * 2. Always rotate the inactive list
239 */
240static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
241{
242 if (bpf_lru_list_inactive_low(l))
243 __bpf_lru_list_rotate_active(lru, l);
244
245 __bpf_lru_list_rotate_inactive(lru, l);
246}
247
248/* Calls __bpf_lru_list_shrink_inactive() to shrink some
249 * ref-bit-cleared nodes and move them to the designated
250 * free list.
251 *
252 * If it cannot get a free node after calling
253 * __bpf_lru_list_shrink_inactive(). It will just remove
254 * one node from either inactive or active list without
255 * honoring the ref-bit. It prefers inactive list to active
256 * list in this situation.
257 */
258static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
259 struct bpf_lru_list *l,
260 unsigned int tgt_nshrink,
261 struct list_head *free_list,
262 enum bpf_lru_list_type tgt_free_type)
263
264{
265 struct bpf_lru_node *node, *tmp_node;
266 struct list_head *force_shrink_list;
267 unsigned int nshrinked;
268
269 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
270 free_list, tgt_free_type);
271 if (nshrinked)
272 return nshrinked;
273
274 /* Do a force shrink by ignoring the reference bit */
275 if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
276 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
277 else
278 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
279
280 list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
281 list) {
282 if (lru->del_from_htab(lru->del_arg, node)) {
283 __bpf_lru_node_move_to_free(l, node, free_list,
284 tgt_free_type);
285 return 1;
286 }
287 }
288
289 return 0;
290}
291
292/* Flush the nodes from the local pending list to the LRU list */
293static void __local_list_flush(struct bpf_lru_list *l,
294 struct bpf_lru_locallist *loc_l)
295{
296 struct bpf_lru_node *node, *tmp_node;
297
298 list_for_each_entry_safe_reverse(node, tmp_node,
299 local_pending_list(loc_l), list) {
300 if (bpf_lru_node_is_ref(node))
301 __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
302 else
303 __bpf_lru_node_move_in(l, node,
304 BPF_LRU_LIST_T_INACTIVE);
305 }
306}
307
308static void bpf_lru_list_push_free(struct bpf_lru_list *l,
309 struct bpf_lru_node *node)
310{
311 unsigned long flags;
312
313 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
314 return;
315
316 raw_spin_lock_irqsave(&l->lock, flags);
317 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
318 raw_spin_unlock_irqrestore(&l->lock, flags);
319}
320
321static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
322 struct bpf_lru_locallist *loc_l)
323{
324 struct bpf_lru_list *l = &lru->common_lru.lru_list;
325 struct bpf_lru_node *node, *tmp_node;
326 unsigned int nfree = 0;
327
328 raw_spin_lock(&l->lock);
329
330 __local_list_flush(l, loc_l);
331
332 __bpf_lru_list_rotate(lru, l);
333
334 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
335 list) {
336 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
337 BPF_LRU_LOCAL_LIST_T_FREE);
338 if (++nfree == LOCAL_FREE_TARGET)
339 break;
340 }
341
342 if (nfree < LOCAL_FREE_TARGET)
343 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
344 local_free_list(loc_l),
345 BPF_LRU_LOCAL_LIST_T_FREE);
346
347 raw_spin_unlock(&l->lock);
348}
349
350static void __local_list_add_pending(struct bpf_lru *lru,
351 struct bpf_lru_locallist *loc_l,
352 int cpu,
353 struct bpf_lru_node *node,
354 u32 hash)
355{
356 *(u32 *)((void *)node + lru->hash_offset) = hash;
357 node->cpu = cpu;
358 node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
359 node->ref = 0;
360 list_add(&node->list, local_pending_list(loc_l));
361}
362
363static struct bpf_lru_node *
364__local_list_pop_free(struct bpf_lru_locallist *loc_l)
365{
366 struct bpf_lru_node *node;
367
368 node = list_first_entry_or_null(local_free_list(loc_l),
369 struct bpf_lru_node,
370 list);
371 if (node)
372 list_del(&node->list);
373
374 return node;
375}
376
377static struct bpf_lru_node *
378__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
379{
380 struct bpf_lru_node *node;
381 bool force = false;
382
383ignore_ref:
384 /* Get from the tail (i.e. older element) of the pending list. */
385 list_for_each_entry_reverse(node, local_pending_list(loc_l),
386 list) {
387 if ((!bpf_lru_node_is_ref(node) || force) &&
388 lru->del_from_htab(lru->del_arg, node)) {
389 list_del(&node->list);
390 return node;
391 }
392 }
393
394 if (!force) {
395 force = true;
396 goto ignore_ref;
397 }
398
399 return NULL;
400}
401
402static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
403 u32 hash)
404{
405 struct list_head *free_list;
406 struct bpf_lru_node *node = NULL;
407 struct bpf_lru_list *l;
408 unsigned long flags;
409 int cpu = raw_smp_processor_id();
410
411 l = per_cpu_ptr(lru->percpu_lru, cpu);
412
413 raw_spin_lock_irqsave(&l->lock, flags);
414
415 __bpf_lru_list_rotate(lru, l);
416
417 free_list = &l->lists[BPF_LRU_LIST_T_FREE];
418 if (list_empty(free_list))
419 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
420 BPF_LRU_LIST_T_FREE);
421
422 if (!list_empty(free_list)) {
423 node = list_first_entry(free_list, struct bpf_lru_node, list);
424 *(u32 *)((void *)node + lru->hash_offset) = hash;
425 node->ref = 0;
426 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
427 }
428
429 raw_spin_unlock_irqrestore(&l->lock, flags);
430
431 return node;
432}
433
434static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
435 u32 hash)
436{
437 struct bpf_lru_locallist *loc_l, *steal_loc_l;
438 struct bpf_common_lru *clru = &lru->common_lru;
439 struct bpf_lru_node *node;
440 int steal, first_steal;
441 unsigned long flags;
442 int cpu = raw_smp_processor_id();
443
444 loc_l = per_cpu_ptr(clru->local_list, cpu);
445
446 raw_spin_lock_irqsave(&loc_l->lock, flags);
447
448 node = __local_list_pop_free(loc_l);
449 if (!node) {
450 bpf_lru_list_pop_free_to_local(lru, loc_l);
451 node = __local_list_pop_free(loc_l);
452 }
453
454 if (node)
455 __local_list_add_pending(lru, loc_l, cpu, node, hash);
456
457 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
458
459 if (node)
460 return node;
461
462 /* No free nodes found from the local free list and
463 * the global LRU list.
464 *
465 * Steal from the local free/pending list of the
466 * current CPU and remote CPU in RR. It starts
467 * with the loc_l->next_steal CPU.
468 */
469
470 first_steal = loc_l->next_steal;
471 steal = first_steal;
472 do {
473 steal_loc_l = per_cpu_ptr(clru->local_list, steal);
474
475 raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
476
477 node = __local_list_pop_free(steal_loc_l);
478 if (!node)
479 node = __local_list_pop_pending(lru, steal_loc_l);
480
481 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
482
483 steal = get_next_cpu(steal);
484 } while (!node && steal != first_steal);
485
486 loc_l->next_steal = steal;
487
488 if (node) {
489 raw_spin_lock_irqsave(&loc_l->lock, flags);
490 __local_list_add_pending(lru, loc_l, cpu, node, hash);
491 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
492 }
493
494 return node;
495}
496
497struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
498{
499 if (lru->percpu)
500 return bpf_percpu_lru_pop_free(lru, hash);
501 else
502 return bpf_common_lru_pop_free(lru, hash);
503}
504
505static void bpf_common_lru_push_free(struct bpf_lru *lru,
506 struct bpf_lru_node *node)
507{
508 unsigned long flags;
509
510 if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
511 WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
512 return;
513
514 if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
515 struct bpf_lru_locallist *loc_l;
516
517 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
518
519 raw_spin_lock_irqsave(&loc_l->lock, flags);
520
521 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
522 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
523 goto check_lru_list;
524 }
525
526 node->type = BPF_LRU_LOCAL_LIST_T_FREE;
527 node->ref = 0;
528 list_move(&node->list, local_free_list(loc_l));
529
530 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
531 return;
532 }
533
534check_lru_list:
535 bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
536}
537
538static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
539 struct bpf_lru_node *node)
540{
541 struct bpf_lru_list *l;
542 unsigned long flags;
543
544 l = per_cpu_ptr(lru->percpu_lru, node->cpu);
545
546 raw_spin_lock_irqsave(&l->lock, flags);
547
548 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
549
550 raw_spin_unlock_irqrestore(&l->lock, flags);
551}
552
553void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
554{
555 if (lru->percpu)
556 bpf_percpu_lru_push_free(lru, node);
557 else
558 bpf_common_lru_push_free(lru, node);
559}
560
561static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
562 u32 node_offset, u32 elem_size,
563 u32 nr_elems)
564{
565 struct bpf_lru_list *l = &lru->common_lru.lru_list;
566 u32 i;
567
568 for (i = 0; i < nr_elems; i++) {
569 struct bpf_lru_node *node;
570
571 node = (struct bpf_lru_node *)(buf + node_offset);
572 node->type = BPF_LRU_LIST_T_FREE;
573 node->ref = 0;
574 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
575 buf += elem_size;
576 }
577}
578
579static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
580 u32 node_offset, u32 elem_size,
581 u32 nr_elems)
582{
583 u32 i, pcpu_entries;
584 int cpu;
585 struct bpf_lru_list *l;
586
587 pcpu_entries = nr_elems / num_possible_cpus();
588
589 i = 0;
590
591 for_each_possible_cpu(cpu) {
592 struct bpf_lru_node *node;
593
594 l = per_cpu_ptr(lru->percpu_lru, cpu);
595again:
596 node = (struct bpf_lru_node *)(buf + node_offset);
597 node->cpu = cpu;
598 node->type = BPF_LRU_LIST_T_FREE;
599 node->ref = 0;
600 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
601 i++;
602 buf += elem_size;
603 if (i == nr_elems)
604 break;
605 if (i % pcpu_entries)
606 goto again;
607 }
608}
609
610void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
611 u32 elem_size, u32 nr_elems)
612{
613 if (lru->percpu)
614 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
615 nr_elems);
616 else
617 bpf_common_lru_populate(lru, buf, node_offset, elem_size,
618 nr_elems);
619}
620
621static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
622{
623 int i;
624
625 for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
626 INIT_LIST_HEAD(&loc_l->lists[i]);
627
628 loc_l->next_steal = cpu;
629
630 raw_spin_lock_init(&loc_l->lock);
631}
632
633static void bpf_lru_list_init(struct bpf_lru_list *l)
634{
635 int i;
636
637 for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
638 INIT_LIST_HEAD(&l->lists[i]);
639
640 for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
641 l->counts[i] = 0;
642
643 l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
644
645 raw_spin_lock_init(&l->lock);
646}
647
648int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
649 del_from_htab_func del_from_htab, void *del_arg)
650{
651 int cpu;
652
653 if (percpu) {
654 lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
655 if (!lru->percpu_lru)
656 return -ENOMEM;
657
658 for_each_possible_cpu(cpu) {
659 struct bpf_lru_list *l;
660
661 l = per_cpu_ptr(lru->percpu_lru, cpu);
662 bpf_lru_list_init(l);
663 }
664 lru->nr_scans = PERCPU_NR_SCANS;
665 } else {
666 struct bpf_common_lru *clru = &lru->common_lru;
667
668 clru->local_list = alloc_percpu(struct bpf_lru_locallist);
669 if (!clru->local_list)
670 return -ENOMEM;
671
672 for_each_possible_cpu(cpu) {
673 struct bpf_lru_locallist *loc_l;
674
675 loc_l = per_cpu_ptr(clru->local_list, cpu);
676 bpf_lru_locallist_init(loc_l, cpu);
677 }
678
679 bpf_lru_list_init(&clru->lru_list);
680 lru->nr_scans = LOCAL_NR_SCANS;
681 }
682
683 lru->percpu = percpu;
684 lru->del_from_htab = del_from_htab;
685 lru->del_arg = del_arg;
686 lru->hash_offset = hash_offset;
687
688 return 0;
689}
690
691void bpf_lru_destroy(struct bpf_lru *lru)
692{
693 if (lru->percpu)
694 free_percpu(lru->percpu_lru);
695 else
696 free_percpu(lru->common_lru.local_list);
697}