Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/* Copyright (c) 2016 Facebook
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 */
  7#include <linux/cpumask.h>
  8#include <linux/spinlock.h>
  9#include <linux/percpu.h>
 10
 11#include "bpf_lru_list.h"
 12
 13#define LOCAL_FREE_TARGET		(128)
 14#define LOCAL_NR_SCANS			LOCAL_FREE_TARGET
 15
 16#define PERCPU_FREE_TARGET		(16)
 17#define PERCPU_NR_SCANS			PERCPU_FREE_TARGET
 18
 19/* Helpers to get the local list index */
 20#define LOCAL_LIST_IDX(t)	((t) - BPF_LOCAL_LIST_T_OFFSET)
 21#define LOCAL_FREE_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
 22#define LOCAL_PENDING_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
 23#define IS_LOCAL_LIST_TYPE(t)	((t) >= BPF_LOCAL_LIST_T_OFFSET)
 24
 25static int get_next_cpu(int cpu)
 26{
 27	cpu = cpumask_next(cpu, cpu_possible_mask);
 28	if (cpu >= nr_cpu_ids)
 29		cpu = cpumask_first(cpu_possible_mask);
 30	return cpu;
 31}
 32
 33/* Local list helpers */
 34static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
 35{
 36	return &loc_l->lists[LOCAL_FREE_LIST_IDX];
 37}
 38
 39static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
 40{
 41	return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
 42}
 43
 44/* bpf_lru_node helpers */
 45static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
 46{
 47	return node->ref;
 
 
 
 
 
 48}
 49
 50static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
 51				   enum bpf_lru_list_type type)
 52{
 53	if (type < NR_BPF_LRU_LIST_COUNT)
 54		l->counts[type]++;
 55}
 56
 57static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
 58				   enum bpf_lru_list_type type)
 59{
 60	if (type < NR_BPF_LRU_LIST_COUNT)
 61		l->counts[type]--;
 62}
 63
 64static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
 65					struct bpf_lru_node *node,
 66					struct list_head *free_list,
 67					enum bpf_lru_list_type tgt_free_type)
 68{
 69	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
 70		return;
 71
 72	/* If the removing node is the next_inactive_rotation candidate,
 73	 * move the next_inactive_rotation pointer also.
 74	 */
 75	if (&node->list == l->next_inactive_rotation)
 76		l->next_inactive_rotation = l->next_inactive_rotation->prev;
 77
 78	bpf_lru_list_count_dec(l, node->type);
 79
 80	node->type = tgt_free_type;
 81	list_move(&node->list, free_list);
 82}
 83
 84/* Move nodes from local list to the LRU list */
 85static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
 86				   struct bpf_lru_node *node,
 87				   enum bpf_lru_list_type tgt_type)
 88{
 89	if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
 90	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
 91		return;
 92
 93	bpf_lru_list_count_inc(l, tgt_type);
 94	node->type = tgt_type;
 95	node->ref = 0;
 96	list_move(&node->list, &l->lists[tgt_type]);
 97}
 98
 99/* Move nodes between or within active and inactive list (like
100 * active to inactive, inactive to active or tail of active back to
101 * the head of active).
102 */
103static void __bpf_lru_node_move(struct bpf_lru_list *l,
104				struct bpf_lru_node *node,
105				enum bpf_lru_list_type tgt_type)
106{
107	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
108	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
109		return;
110
111	if (node->type != tgt_type) {
112		bpf_lru_list_count_dec(l, node->type);
113		bpf_lru_list_count_inc(l, tgt_type);
114		node->type = tgt_type;
115	}
116	node->ref = 0;
117
118	/* If the moving node is the next_inactive_rotation candidate,
119	 * move the next_inactive_rotation pointer also.
120	 */
121	if (&node->list == l->next_inactive_rotation)
122		l->next_inactive_rotation = l->next_inactive_rotation->prev;
123
124	list_move(&node->list, &l->lists[tgt_type]);
125}
126
127static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
128{
129	return l->counts[BPF_LRU_LIST_T_INACTIVE] <
130		l->counts[BPF_LRU_LIST_T_ACTIVE];
131}
132
133/* Rotate the active list:
134 * 1. Start from tail
135 * 2. If the node has the ref bit set, it will be rotated
136 *    back to the head of active list with the ref bit cleared.
137 *    Give this node one more chance to survive in the active list.
138 * 3. If the ref bit is not set, move it to the head of the
139 *    inactive list.
140 * 4. It will at most scan nr_scans nodes
141 */
142static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
143					 struct bpf_lru_list *l)
144{
145	struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
146	struct bpf_lru_node *node, *tmp_node, *first_node;
147	unsigned int i = 0;
148
149	first_node = list_first_entry(active, struct bpf_lru_node, list);
150	list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
151		if (bpf_lru_node_is_ref(node))
152			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
153		else
154			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
155
156		if (++i == lru->nr_scans || node == first_node)
157			break;
158	}
159}
160
161/* Rotate the inactive list.  It starts from the next_inactive_rotation
162 * 1. If the node has ref bit set, it will be moved to the head
163 *    of active list with the ref bit cleared.
164 * 2. If the node does not have ref bit set, it will leave it
165 *    at its current location (i.e. do nothing) so that it can
166 *    be considered during the next inactive_shrink.
167 * 3. It will at most scan nr_scans nodes
168 */
169static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
170					   struct bpf_lru_list *l)
171{
172	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
173	struct list_head *cur, *last, *next = inactive;
174	struct bpf_lru_node *node;
175	unsigned int i = 0;
176
177	if (list_empty(inactive))
178		return;
179
180	last = l->next_inactive_rotation->next;
181	if (last == inactive)
182		last = last->next;
183
184	cur = l->next_inactive_rotation;
185	while (i < lru->nr_scans) {
186		if (cur == inactive) {
187			cur = cur->prev;
188			continue;
189		}
190
191		node = list_entry(cur, struct bpf_lru_node, list);
192		next = cur->prev;
193		if (bpf_lru_node_is_ref(node))
194			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
195		if (cur == last)
196			break;
197		cur = next;
198		i++;
199	}
200
201	l->next_inactive_rotation = next;
202}
203
204/* Shrink the inactive list.  It starts from the tail of the
205 * inactive list and only move the nodes without the ref bit
206 * set to the designated free list.
207 */
208static unsigned int
209__bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
210			       struct bpf_lru_list *l,
211			       unsigned int tgt_nshrink,
212			       struct list_head *free_list,
213			       enum bpf_lru_list_type tgt_free_type)
214{
215	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
216	struct bpf_lru_node *node, *tmp_node, *first_node;
217	unsigned int nshrinked = 0;
218	unsigned int i = 0;
219
220	first_node = list_first_entry(inactive, struct bpf_lru_node, list);
221	list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
222		if (bpf_lru_node_is_ref(node)) {
223			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
224		} else if (lru->del_from_htab(lru->del_arg, node)) {
225			__bpf_lru_node_move_to_free(l, node, free_list,
226						    tgt_free_type);
227			if (++nshrinked == tgt_nshrink)
228				break;
229		}
230
231		if (++i == lru->nr_scans)
232			break;
233	}
234
235	return nshrinked;
236}
237
238/* 1. Rotate the active list (if needed)
239 * 2. Always rotate the inactive list
240 */
241static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
242{
243	if (bpf_lru_list_inactive_low(l))
244		__bpf_lru_list_rotate_active(lru, l);
245
246	__bpf_lru_list_rotate_inactive(lru, l);
247}
248
249/* Calls __bpf_lru_list_shrink_inactive() to shrink some
250 * ref-bit-cleared nodes and move them to the designated
251 * free list.
252 *
253 * If it cannot get a free node after calling
254 * __bpf_lru_list_shrink_inactive().  It will just remove
255 * one node from either inactive or active list without
256 * honoring the ref-bit.  It prefers inactive list to active
257 * list in this situation.
258 */
259static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
260					  struct bpf_lru_list *l,
261					  unsigned int tgt_nshrink,
262					  struct list_head *free_list,
263					  enum bpf_lru_list_type tgt_free_type)
264
265{
266	struct bpf_lru_node *node, *tmp_node;
267	struct list_head *force_shrink_list;
268	unsigned int nshrinked;
269
270	nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
271						   free_list, tgt_free_type);
272	if (nshrinked)
273		return nshrinked;
274
275	/* Do a force shrink by ignoring the reference bit */
276	if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
277		force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
278	else
279		force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
280
281	list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
282					 list) {
283		if (lru->del_from_htab(lru->del_arg, node)) {
284			__bpf_lru_node_move_to_free(l, node, free_list,
285						    tgt_free_type);
286			return 1;
287		}
288	}
289
290	return 0;
291}
292
293/* Flush the nodes from the local pending list to the LRU list */
294static void __local_list_flush(struct bpf_lru_list *l,
295			       struct bpf_lru_locallist *loc_l)
296{
297	struct bpf_lru_node *node, *tmp_node;
298
299	list_for_each_entry_safe_reverse(node, tmp_node,
300					 local_pending_list(loc_l), list) {
301		if (bpf_lru_node_is_ref(node))
302			__bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
303		else
304			__bpf_lru_node_move_in(l, node,
305					       BPF_LRU_LIST_T_INACTIVE);
306	}
307}
308
309static void bpf_lru_list_push_free(struct bpf_lru_list *l,
310				   struct bpf_lru_node *node)
311{
312	unsigned long flags;
313
314	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
315		return;
316
317	raw_spin_lock_irqsave(&l->lock, flags);
318	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
319	raw_spin_unlock_irqrestore(&l->lock, flags);
320}
321
322static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
323					   struct bpf_lru_locallist *loc_l)
324{
325	struct bpf_lru_list *l = &lru->common_lru.lru_list;
326	struct bpf_lru_node *node, *tmp_node;
327	unsigned int nfree = 0;
328
329	raw_spin_lock(&l->lock);
330
331	__local_list_flush(l, loc_l);
332
333	__bpf_lru_list_rotate(lru, l);
334
335	list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
336				 list) {
337		__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
338					    BPF_LRU_LOCAL_LIST_T_FREE);
339		if (++nfree == LOCAL_FREE_TARGET)
340			break;
341	}
342
343	if (nfree < LOCAL_FREE_TARGET)
344		__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
345				      local_free_list(loc_l),
346				      BPF_LRU_LOCAL_LIST_T_FREE);
347
348	raw_spin_unlock(&l->lock);
349}
350
351static void __local_list_add_pending(struct bpf_lru *lru,
352				     struct bpf_lru_locallist *loc_l,
353				     int cpu,
354				     struct bpf_lru_node *node,
355				     u32 hash)
356{
357	*(u32 *)((void *)node + lru->hash_offset) = hash;
358	node->cpu = cpu;
359	node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
360	node->ref = 0;
361	list_add(&node->list, local_pending_list(loc_l));
362}
363
364struct bpf_lru_node *__local_list_pop_free(struct bpf_lru_locallist *loc_l)
 
365{
366	struct bpf_lru_node *node;
367
368	node = list_first_entry_or_null(local_free_list(loc_l),
369					struct bpf_lru_node,
370					list);
371	if (node)
372		list_del(&node->list);
373
374	return node;
375}
376
377struct bpf_lru_node *__local_list_pop_pending(struct bpf_lru *lru,
378					      struct bpf_lru_locallist *loc_l)
379{
380	struct bpf_lru_node *node;
381	bool force = false;
382
383ignore_ref:
384	/* Get from the tail (i.e. older element) of the pending list. */
385	list_for_each_entry_reverse(node, local_pending_list(loc_l),
386				    list) {
387		if ((!bpf_lru_node_is_ref(node) || force) &&
388		    lru->del_from_htab(lru->del_arg, node)) {
389			list_del(&node->list);
390			return node;
391		}
392	}
393
394	if (!force) {
395		force = true;
396		goto ignore_ref;
397	}
398
399	return NULL;
400}
401
402static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
403						    u32 hash)
404{
405	struct list_head *free_list;
406	struct bpf_lru_node *node = NULL;
407	struct bpf_lru_list *l;
408	unsigned long flags;
409	int cpu = raw_smp_processor_id();
410
411	l = per_cpu_ptr(lru->percpu_lru, cpu);
412
413	raw_spin_lock_irqsave(&l->lock, flags);
414
415	__bpf_lru_list_rotate(lru, l);
416
417	free_list = &l->lists[BPF_LRU_LIST_T_FREE];
418	if (list_empty(free_list))
419		__bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
420				      BPF_LRU_LIST_T_FREE);
421
422	if (!list_empty(free_list)) {
423		node = list_first_entry(free_list, struct bpf_lru_node, list);
424		*(u32 *)((void *)node + lru->hash_offset) = hash;
425		node->ref = 0;
426		__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
427	}
428
429	raw_spin_unlock_irqrestore(&l->lock, flags);
430
431	return node;
432}
433
434static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
435						    u32 hash)
436{
437	struct bpf_lru_locallist *loc_l, *steal_loc_l;
438	struct bpf_common_lru *clru = &lru->common_lru;
439	struct bpf_lru_node *node;
440	int steal, first_steal;
441	unsigned long flags;
442	int cpu = raw_smp_processor_id();
443
444	loc_l = per_cpu_ptr(clru->local_list, cpu);
445
446	raw_spin_lock_irqsave(&loc_l->lock, flags);
447
448	node = __local_list_pop_free(loc_l);
449	if (!node) {
450		bpf_lru_list_pop_free_to_local(lru, loc_l);
451		node = __local_list_pop_free(loc_l);
452	}
453
454	if (node)
455		__local_list_add_pending(lru, loc_l, cpu, node, hash);
456
457	raw_spin_unlock_irqrestore(&loc_l->lock, flags);
458
459	if (node)
460		return node;
461
462	/* No free nodes found from the local free list and
463	 * the global LRU list.
464	 *
465	 * Steal from the local free/pending list of the
466	 * current CPU and remote CPU in RR.  It starts
467	 * with the loc_l->next_steal CPU.
468	 */
469
470	first_steal = loc_l->next_steal;
471	steal = first_steal;
472	do {
473		steal_loc_l = per_cpu_ptr(clru->local_list, steal);
474
475		raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
476
477		node = __local_list_pop_free(steal_loc_l);
478		if (!node)
479			node = __local_list_pop_pending(lru, steal_loc_l);
480
481		raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
482
483		steal = get_next_cpu(steal);
484	} while (!node && steal != first_steal);
485
486	loc_l->next_steal = steal;
487
488	if (node) {
489		raw_spin_lock_irqsave(&loc_l->lock, flags);
490		__local_list_add_pending(lru, loc_l, cpu, node, hash);
491		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
492	}
493
494	return node;
495}
496
497struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
498{
499	if (lru->percpu)
500		return bpf_percpu_lru_pop_free(lru, hash);
501	else
502		return bpf_common_lru_pop_free(lru, hash);
503}
504
505static void bpf_common_lru_push_free(struct bpf_lru *lru,
506				     struct bpf_lru_node *node)
507{
 
508	unsigned long flags;
509
510	if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
511	    WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
512		return;
513
514	if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
515		struct bpf_lru_locallist *loc_l;
516
517		loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
518
519		raw_spin_lock_irqsave(&loc_l->lock, flags);
520
521		if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
522			raw_spin_unlock_irqrestore(&loc_l->lock, flags);
523			goto check_lru_list;
524		}
525
526		node->type = BPF_LRU_LOCAL_LIST_T_FREE;
527		node->ref = 0;
528		list_move(&node->list, local_free_list(loc_l));
529
530		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
531		return;
532	}
533
534check_lru_list:
535	bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
536}
537
538static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
539				     struct bpf_lru_node *node)
540{
541	struct bpf_lru_list *l;
542	unsigned long flags;
543
544	l = per_cpu_ptr(lru->percpu_lru, node->cpu);
545
546	raw_spin_lock_irqsave(&l->lock, flags);
547
548	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
549
550	raw_spin_unlock_irqrestore(&l->lock, flags);
551}
552
553void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
554{
555	if (lru->percpu)
556		bpf_percpu_lru_push_free(lru, node);
557	else
558		bpf_common_lru_push_free(lru, node);
559}
560
561void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
562			     u32 elem_size, u32 nr_elems)
 
563{
564	struct bpf_lru_list *l = &lru->common_lru.lru_list;
565	u32 i;
566
567	for (i = 0; i < nr_elems; i++) {
568		struct bpf_lru_node *node;
569
570		node = (struct bpf_lru_node *)(buf + node_offset);
571		node->type = BPF_LRU_LIST_T_FREE;
572		node->ref = 0;
573		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
574		buf += elem_size;
575	}
576}
577
578void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
579			     u32 elem_size, u32 nr_elems)
 
580{
581	u32 i, pcpu_entries;
582	int cpu;
583	struct bpf_lru_list *l;
584
585	pcpu_entries = nr_elems / num_possible_cpus();
586
587	i = 0;
588
589	for_each_possible_cpu(cpu) {
590		struct bpf_lru_node *node;
591
592		l = per_cpu_ptr(lru->percpu_lru, cpu);
593again:
594		node = (struct bpf_lru_node *)(buf + node_offset);
595		node->cpu = cpu;
596		node->type = BPF_LRU_LIST_T_FREE;
597		node->ref = 0;
598		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
599		i++;
600		buf += elem_size;
601		if (i == nr_elems)
602			break;
603		if (i % pcpu_entries)
604			goto again;
605	}
606}
607
608void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
609		      u32 elem_size, u32 nr_elems)
610{
611	if (lru->percpu)
612		bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
613					nr_elems);
614	else
615		bpf_common_lru_populate(lru, buf, node_offset, elem_size,
616					nr_elems);
617}
618
619static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
620{
621	int i;
622
623	for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
624		INIT_LIST_HEAD(&loc_l->lists[i]);
625
626	loc_l->next_steal = cpu;
627
628	raw_spin_lock_init(&loc_l->lock);
629}
630
631static void bpf_lru_list_init(struct bpf_lru_list *l)
632{
633	int i;
634
635	for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
636		INIT_LIST_HEAD(&l->lists[i]);
637
638	for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
639		l->counts[i] = 0;
640
641	l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
642
643	raw_spin_lock_init(&l->lock);
644}
645
646int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
647		 del_from_htab_func del_from_htab, void *del_arg)
648{
649	int cpu;
650
651	if (percpu) {
652		lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
653		if (!lru->percpu_lru)
654			return -ENOMEM;
655
656		for_each_possible_cpu(cpu) {
657			struct bpf_lru_list *l;
658
659			l = per_cpu_ptr(lru->percpu_lru, cpu);
660			bpf_lru_list_init(l);
661		}
662		lru->nr_scans = PERCPU_NR_SCANS;
663	} else {
664		struct bpf_common_lru *clru = &lru->common_lru;
665
666		clru->local_list = alloc_percpu(struct bpf_lru_locallist);
667		if (!clru->local_list)
668			return -ENOMEM;
669
670		for_each_possible_cpu(cpu) {
671			struct bpf_lru_locallist *loc_l;
672
673			loc_l = per_cpu_ptr(clru->local_list, cpu);
674			bpf_lru_locallist_init(loc_l, cpu);
675		}
676
677		bpf_lru_list_init(&clru->lru_list);
678		lru->nr_scans = LOCAL_NR_SCANS;
679	}
680
681	lru->percpu = percpu;
682	lru->del_from_htab = del_from_htab;
683	lru->del_arg = del_arg;
684	lru->hash_offset = hash_offset;
685
686	return 0;
687}
688
689void bpf_lru_destroy(struct bpf_lru *lru)
690{
691	if (lru->percpu)
692		free_percpu(lru->percpu_lru);
693	else
694		free_percpu(lru->common_lru.local_list);
695}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
 
 
 
 
  3 */
  4#include <linux/cpumask.h>
  5#include <linux/spinlock.h>
  6#include <linux/percpu.h>
  7
  8#include "bpf_lru_list.h"
  9
 10#define LOCAL_FREE_TARGET		(128)
 11#define LOCAL_NR_SCANS			LOCAL_FREE_TARGET
 12
 13#define PERCPU_FREE_TARGET		(4)
 14#define PERCPU_NR_SCANS			PERCPU_FREE_TARGET
 15
 16/* Helpers to get the local list index */
 17#define LOCAL_LIST_IDX(t)	((t) - BPF_LOCAL_LIST_T_OFFSET)
 18#define LOCAL_FREE_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
 19#define LOCAL_PENDING_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
 20#define IS_LOCAL_LIST_TYPE(t)	((t) >= BPF_LOCAL_LIST_T_OFFSET)
 21
 22static int get_next_cpu(int cpu)
 23{
 24	cpu = cpumask_next(cpu, cpu_possible_mask);
 25	if (cpu >= nr_cpu_ids)
 26		cpu = cpumask_first(cpu_possible_mask);
 27	return cpu;
 28}
 29
 30/* Local list helpers */
 31static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
 32{
 33	return &loc_l->lists[LOCAL_FREE_LIST_IDX];
 34}
 35
 36static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
 37{
 38	return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
 39}
 40
 41/* bpf_lru_node helpers */
 42static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
 43{
 44	return READ_ONCE(node->ref);
 45}
 46
 47static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
 48{
 49	WRITE_ONCE(node->ref, 0);
 50}
 51
 52static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
 53				   enum bpf_lru_list_type type)
 54{
 55	if (type < NR_BPF_LRU_LIST_COUNT)
 56		l->counts[type]++;
 57}
 58
 59static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
 60				   enum bpf_lru_list_type type)
 61{
 62	if (type < NR_BPF_LRU_LIST_COUNT)
 63		l->counts[type]--;
 64}
 65
 66static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
 67					struct bpf_lru_node *node,
 68					struct list_head *free_list,
 69					enum bpf_lru_list_type tgt_free_type)
 70{
 71	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
 72		return;
 73
 74	/* If the removing node is the next_inactive_rotation candidate,
 75	 * move the next_inactive_rotation pointer also.
 76	 */
 77	if (&node->list == l->next_inactive_rotation)
 78		l->next_inactive_rotation = l->next_inactive_rotation->prev;
 79
 80	bpf_lru_list_count_dec(l, node->type);
 81
 82	node->type = tgt_free_type;
 83	list_move(&node->list, free_list);
 84}
 85
 86/* Move nodes from local list to the LRU list */
 87static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
 88				   struct bpf_lru_node *node,
 89				   enum bpf_lru_list_type tgt_type)
 90{
 91	if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
 92	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
 93		return;
 94
 95	bpf_lru_list_count_inc(l, tgt_type);
 96	node->type = tgt_type;
 97	bpf_lru_node_clear_ref(node);
 98	list_move(&node->list, &l->lists[tgt_type]);
 99}
100
101/* Move nodes between or within active and inactive list (like
102 * active to inactive, inactive to active or tail of active back to
103 * the head of active).
104 */
105static void __bpf_lru_node_move(struct bpf_lru_list *l,
106				struct bpf_lru_node *node,
107				enum bpf_lru_list_type tgt_type)
108{
109	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
110	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
111		return;
112
113	if (node->type != tgt_type) {
114		bpf_lru_list_count_dec(l, node->type);
115		bpf_lru_list_count_inc(l, tgt_type);
116		node->type = tgt_type;
117	}
118	bpf_lru_node_clear_ref(node);
119
120	/* If the moving node is the next_inactive_rotation candidate,
121	 * move the next_inactive_rotation pointer also.
122	 */
123	if (&node->list == l->next_inactive_rotation)
124		l->next_inactive_rotation = l->next_inactive_rotation->prev;
125
126	list_move(&node->list, &l->lists[tgt_type]);
127}
128
129static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
130{
131	return l->counts[BPF_LRU_LIST_T_INACTIVE] <
132		l->counts[BPF_LRU_LIST_T_ACTIVE];
133}
134
135/* Rotate the active list:
136 * 1. Start from tail
137 * 2. If the node has the ref bit set, it will be rotated
138 *    back to the head of active list with the ref bit cleared.
139 *    Give this node one more chance to survive in the active list.
140 * 3. If the ref bit is not set, move it to the head of the
141 *    inactive list.
142 * 4. It will at most scan nr_scans nodes
143 */
144static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
145					 struct bpf_lru_list *l)
146{
147	struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
148	struct bpf_lru_node *node, *tmp_node, *first_node;
149	unsigned int i = 0;
150
151	first_node = list_first_entry(active, struct bpf_lru_node, list);
152	list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
153		if (bpf_lru_node_is_ref(node))
154			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
155		else
156			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
157
158		if (++i == lru->nr_scans || node == first_node)
159			break;
160	}
161}
162
163/* Rotate the inactive list.  It starts from the next_inactive_rotation
164 * 1. If the node has ref bit set, it will be moved to the head
165 *    of active list with the ref bit cleared.
166 * 2. If the node does not have ref bit set, it will leave it
167 *    at its current location (i.e. do nothing) so that it can
168 *    be considered during the next inactive_shrink.
169 * 3. It will at most scan nr_scans nodes
170 */
171static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
172					   struct bpf_lru_list *l)
173{
174	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
175	struct list_head *cur, *last, *next = inactive;
176	struct bpf_lru_node *node;
177	unsigned int i = 0;
178
179	if (list_empty(inactive))
180		return;
181
182	last = l->next_inactive_rotation->next;
183	if (last == inactive)
184		last = last->next;
185
186	cur = l->next_inactive_rotation;
187	while (i < lru->nr_scans) {
188		if (cur == inactive) {
189			cur = cur->prev;
190			continue;
191		}
192
193		node = list_entry(cur, struct bpf_lru_node, list);
194		next = cur->prev;
195		if (bpf_lru_node_is_ref(node))
196			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
197		if (cur == last)
198			break;
199		cur = next;
200		i++;
201	}
202
203	l->next_inactive_rotation = next;
204}
205
206/* Shrink the inactive list.  It starts from the tail of the
207 * inactive list and only move the nodes without the ref bit
208 * set to the designated free list.
209 */
210static unsigned int
211__bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
212			       struct bpf_lru_list *l,
213			       unsigned int tgt_nshrink,
214			       struct list_head *free_list,
215			       enum bpf_lru_list_type tgt_free_type)
216{
217	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
218	struct bpf_lru_node *node, *tmp_node;
219	unsigned int nshrinked = 0;
220	unsigned int i = 0;
221
 
222	list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
223		if (bpf_lru_node_is_ref(node)) {
224			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
225		} else if (lru->del_from_htab(lru->del_arg, node)) {
226			__bpf_lru_node_move_to_free(l, node, free_list,
227						    tgt_free_type);
228			if (++nshrinked == tgt_nshrink)
229				break;
230		}
231
232		if (++i == lru->nr_scans)
233			break;
234	}
235
236	return nshrinked;
237}
238
239/* 1. Rotate the active list (if needed)
240 * 2. Always rotate the inactive list
241 */
242static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
243{
244	if (bpf_lru_list_inactive_low(l))
245		__bpf_lru_list_rotate_active(lru, l);
246
247	__bpf_lru_list_rotate_inactive(lru, l);
248}
249
250/* Calls __bpf_lru_list_shrink_inactive() to shrink some
251 * ref-bit-cleared nodes and move them to the designated
252 * free list.
253 *
254 * If it cannot get a free node after calling
255 * __bpf_lru_list_shrink_inactive().  It will just remove
256 * one node from either inactive or active list without
257 * honoring the ref-bit.  It prefers inactive list to active
258 * list in this situation.
259 */
260static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
261					  struct bpf_lru_list *l,
262					  unsigned int tgt_nshrink,
263					  struct list_head *free_list,
264					  enum bpf_lru_list_type tgt_free_type)
265
266{
267	struct bpf_lru_node *node, *tmp_node;
268	struct list_head *force_shrink_list;
269	unsigned int nshrinked;
270
271	nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
272						   free_list, tgt_free_type);
273	if (nshrinked)
274		return nshrinked;
275
276	/* Do a force shrink by ignoring the reference bit */
277	if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
278		force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
279	else
280		force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
281
282	list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
283					 list) {
284		if (lru->del_from_htab(lru->del_arg, node)) {
285			__bpf_lru_node_move_to_free(l, node, free_list,
286						    tgt_free_type);
287			return 1;
288		}
289	}
290
291	return 0;
292}
293
294/* Flush the nodes from the local pending list to the LRU list */
295static void __local_list_flush(struct bpf_lru_list *l,
296			       struct bpf_lru_locallist *loc_l)
297{
298	struct bpf_lru_node *node, *tmp_node;
299
300	list_for_each_entry_safe_reverse(node, tmp_node,
301					 local_pending_list(loc_l), list) {
302		if (bpf_lru_node_is_ref(node))
303			__bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
304		else
305			__bpf_lru_node_move_in(l, node,
306					       BPF_LRU_LIST_T_INACTIVE);
307	}
308}
309
310static void bpf_lru_list_push_free(struct bpf_lru_list *l,
311				   struct bpf_lru_node *node)
312{
313	unsigned long flags;
314
315	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
316		return;
317
318	raw_spin_lock_irqsave(&l->lock, flags);
319	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
320	raw_spin_unlock_irqrestore(&l->lock, flags);
321}
322
323static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
324					   struct bpf_lru_locallist *loc_l)
325{
326	struct bpf_lru_list *l = &lru->common_lru.lru_list;
327	struct bpf_lru_node *node, *tmp_node;
328	unsigned int nfree = 0;
329
330	raw_spin_lock(&l->lock);
331
332	__local_list_flush(l, loc_l);
333
334	__bpf_lru_list_rotate(lru, l);
335
336	list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
337				 list) {
338		__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
339					    BPF_LRU_LOCAL_LIST_T_FREE);
340		if (++nfree == LOCAL_FREE_TARGET)
341			break;
342	}
343
344	if (nfree < LOCAL_FREE_TARGET)
345		__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
346				      local_free_list(loc_l),
347				      BPF_LRU_LOCAL_LIST_T_FREE);
348
349	raw_spin_unlock(&l->lock);
350}
351
352static void __local_list_add_pending(struct bpf_lru *lru,
353				     struct bpf_lru_locallist *loc_l,
354				     int cpu,
355				     struct bpf_lru_node *node,
356				     u32 hash)
357{
358	*(u32 *)((void *)node + lru->hash_offset) = hash;
359	node->cpu = cpu;
360	node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
361	bpf_lru_node_clear_ref(node);
362	list_add(&node->list, local_pending_list(loc_l));
363}
364
365static struct bpf_lru_node *
366__local_list_pop_free(struct bpf_lru_locallist *loc_l)
367{
368	struct bpf_lru_node *node;
369
370	node = list_first_entry_or_null(local_free_list(loc_l),
371					struct bpf_lru_node,
372					list);
373	if (node)
374		list_del(&node->list);
375
376	return node;
377}
378
379static struct bpf_lru_node *
380__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
381{
382	struct bpf_lru_node *node;
383	bool force = false;
384
385ignore_ref:
386	/* Get from the tail (i.e. older element) of the pending list. */
387	list_for_each_entry_reverse(node, local_pending_list(loc_l),
388				    list) {
389		if ((!bpf_lru_node_is_ref(node) || force) &&
390		    lru->del_from_htab(lru->del_arg, node)) {
391			list_del(&node->list);
392			return node;
393		}
394	}
395
396	if (!force) {
397		force = true;
398		goto ignore_ref;
399	}
400
401	return NULL;
402}
403
404static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
405						    u32 hash)
406{
407	struct list_head *free_list;
408	struct bpf_lru_node *node = NULL;
409	struct bpf_lru_list *l;
410	unsigned long flags;
411	int cpu = raw_smp_processor_id();
412
413	l = per_cpu_ptr(lru->percpu_lru, cpu);
414
415	raw_spin_lock_irqsave(&l->lock, flags);
416
417	__bpf_lru_list_rotate(lru, l);
418
419	free_list = &l->lists[BPF_LRU_LIST_T_FREE];
420	if (list_empty(free_list))
421		__bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
422				      BPF_LRU_LIST_T_FREE);
423
424	if (!list_empty(free_list)) {
425		node = list_first_entry(free_list, struct bpf_lru_node, list);
426		*(u32 *)((void *)node + lru->hash_offset) = hash;
427		bpf_lru_node_clear_ref(node);
428		__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
429	}
430
431	raw_spin_unlock_irqrestore(&l->lock, flags);
432
433	return node;
434}
435
436static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
437						    u32 hash)
438{
439	struct bpf_lru_locallist *loc_l, *steal_loc_l;
440	struct bpf_common_lru *clru = &lru->common_lru;
441	struct bpf_lru_node *node;
442	int steal, first_steal;
443	unsigned long flags;
444	int cpu = raw_smp_processor_id();
445
446	loc_l = per_cpu_ptr(clru->local_list, cpu);
447
448	raw_spin_lock_irqsave(&loc_l->lock, flags);
449
450	node = __local_list_pop_free(loc_l);
451	if (!node) {
452		bpf_lru_list_pop_free_to_local(lru, loc_l);
453		node = __local_list_pop_free(loc_l);
454	}
455
456	if (node)
457		__local_list_add_pending(lru, loc_l, cpu, node, hash);
458
459	raw_spin_unlock_irqrestore(&loc_l->lock, flags);
460
461	if (node)
462		return node;
463
464	/* No free nodes found from the local free list and
465	 * the global LRU list.
466	 *
467	 * Steal from the local free/pending list of the
468	 * current CPU and remote CPU in RR.  It starts
469	 * with the loc_l->next_steal CPU.
470	 */
471
472	first_steal = loc_l->next_steal;
473	steal = first_steal;
474	do {
475		steal_loc_l = per_cpu_ptr(clru->local_list, steal);
476
477		raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
478
479		node = __local_list_pop_free(steal_loc_l);
480		if (!node)
481			node = __local_list_pop_pending(lru, steal_loc_l);
482
483		raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
484
485		steal = get_next_cpu(steal);
486	} while (!node && steal != first_steal);
487
488	loc_l->next_steal = steal;
489
490	if (node) {
491		raw_spin_lock_irqsave(&loc_l->lock, flags);
492		__local_list_add_pending(lru, loc_l, cpu, node, hash);
493		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
494	}
495
496	return node;
497}
498
499struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
500{
501	if (lru->percpu)
502		return bpf_percpu_lru_pop_free(lru, hash);
503	else
504		return bpf_common_lru_pop_free(lru, hash);
505}
506
507static void bpf_common_lru_push_free(struct bpf_lru *lru,
508				     struct bpf_lru_node *node)
509{
510	u8 node_type = READ_ONCE(node->type);
511	unsigned long flags;
512
513	if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
514	    WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
515		return;
516
517	if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
518		struct bpf_lru_locallist *loc_l;
519
520		loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
521
522		raw_spin_lock_irqsave(&loc_l->lock, flags);
523
524		if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
525			raw_spin_unlock_irqrestore(&loc_l->lock, flags);
526			goto check_lru_list;
527		}
528
529		node->type = BPF_LRU_LOCAL_LIST_T_FREE;
530		bpf_lru_node_clear_ref(node);
531		list_move(&node->list, local_free_list(loc_l));
532
533		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
534		return;
535	}
536
537check_lru_list:
538	bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
539}
540
541static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
542				     struct bpf_lru_node *node)
543{
544	struct bpf_lru_list *l;
545	unsigned long flags;
546
547	l = per_cpu_ptr(lru->percpu_lru, node->cpu);
548
549	raw_spin_lock_irqsave(&l->lock, flags);
550
551	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
552
553	raw_spin_unlock_irqrestore(&l->lock, flags);
554}
555
556void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
557{
558	if (lru->percpu)
559		bpf_percpu_lru_push_free(lru, node);
560	else
561		bpf_common_lru_push_free(lru, node);
562}
563
564static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
565				    u32 node_offset, u32 elem_size,
566				    u32 nr_elems)
567{
568	struct bpf_lru_list *l = &lru->common_lru.lru_list;
569	u32 i;
570
571	for (i = 0; i < nr_elems; i++) {
572		struct bpf_lru_node *node;
573
574		node = (struct bpf_lru_node *)(buf + node_offset);
575		node->type = BPF_LRU_LIST_T_FREE;
576		bpf_lru_node_clear_ref(node);
577		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
578		buf += elem_size;
579	}
580}
581
582static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
583				    u32 node_offset, u32 elem_size,
584				    u32 nr_elems)
585{
586	u32 i, pcpu_entries;
587	int cpu;
588	struct bpf_lru_list *l;
589
590	pcpu_entries = nr_elems / num_possible_cpus();
591
592	i = 0;
593
594	for_each_possible_cpu(cpu) {
595		struct bpf_lru_node *node;
596
597		l = per_cpu_ptr(lru->percpu_lru, cpu);
598again:
599		node = (struct bpf_lru_node *)(buf + node_offset);
600		node->cpu = cpu;
601		node->type = BPF_LRU_LIST_T_FREE;
602		bpf_lru_node_clear_ref(node);
603		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
604		i++;
605		buf += elem_size;
606		if (i == nr_elems)
607			break;
608		if (i % pcpu_entries)
609			goto again;
610	}
611}
612
613void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
614		      u32 elem_size, u32 nr_elems)
615{
616	if (lru->percpu)
617		bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
618					nr_elems);
619	else
620		bpf_common_lru_populate(lru, buf, node_offset, elem_size,
621					nr_elems);
622}
623
624static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
625{
626	int i;
627
628	for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
629		INIT_LIST_HEAD(&loc_l->lists[i]);
630
631	loc_l->next_steal = cpu;
632
633	raw_spin_lock_init(&loc_l->lock);
634}
635
636static void bpf_lru_list_init(struct bpf_lru_list *l)
637{
638	int i;
639
640	for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
641		INIT_LIST_HEAD(&l->lists[i]);
642
643	for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
644		l->counts[i] = 0;
645
646	l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
647
648	raw_spin_lock_init(&l->lock);
649}
650
651int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
652		 del_from_htab_func del_from_htab, void *del_arg)
653{
654	int cpu;
655
656	if (percpu) {
657		lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
658		if (!lru->percpu_lru)
659			return -ENOMEM;
660
661		for_each_possible_cpu(cpu) {
662			struct bpf_lru_list *l;
663
664			l = per_cpu_ptr(lru->percpu_lru, cpu);
665			bpf_lru_list_init(l);
666		}
667		lru->nr_scans = PERCPU_NR_SCANS;
668	} else {
669		struct bpf_common_lru *clru = &lru->common_lru;
670
671		clru->local_list = alloc_percpu(struct bpf_lru_locallist);
672		if (!clru->local_list)
673			return -ENOMEM;
674
675		for_each_possible_cpu(cpu) {
676			struct bpf_lru_locallist *loc_l;
677
678			loc_l = per_cpu_ptr(clru->local_list, cpu);
679			bpf_lru_locallist_init(loc_l, cpu);
680		}
681
682		bpf_lru_list_init(&clru->lru_list);
683		lru->nr_scans = LOCAL_NR_SCANS;
684	}
685
686	lru->percpu = percpu;
687	lru->del_from_htab = del_from_htab;
688	lru->del_arg = del_arg;
689	lru->hash_offset = hash_offset;
690
691	return 0;
692}
693
694void bpf_lru_destroy(struct bpf_lru *lru)
695{
696	if (lru->percpu)
697		free_percpu(lru->percpu_lru);
698	else
699		free_percpu(lru->common_lru.local_list);
700}