Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.6
 
  1/* Copyright (c) 2016 Facebook
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 */
  7#include "percpu_freelist.h"
  8
  9int pcpu_freelist_init(struct pcpu_freelist *s)
 10{
 11	int cpu;
 12
 13	s->freelist = alloc_percpu(struct pcpu_freelist_head);
 14	if (!s->freelist)
 15		return -ENOMEM;
 16
 17	for_each_possible_cpu(cpu) {
 18		struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
 19
 20		raw_spin_lock_init(&head->lock);
 21		head->first = NULL;
 22	}
 23	return 0;
 24}
 25
 26void pcpu_freelist_destroy(struct pcpu_freelist *s)
 27{
 28	free_percpu(s->freelist);
 29}
 30
 31static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
 32					struct pcpu_freelist_node *node)
 33{
 34	raw_spin_lock(&head->lock);
 35	node->next = head->first;
 36	head->first = node;
 37	raw_spin_unlock(&head->lock);
 38}
 39
 40void pcpu_freelist_push(struct pcpu_freelist *s,
 41			struct pcpu_freelist_node *node)
 42{
 43	struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
 44
 45	__pcpu_freelist_push(head, node);
 
 
 
 
 
 
 
 
 
 
 46}
 47
 48void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
 49			    u32 nr_elems)
 50{
 51	struct pcpu_freelist_head *head;
 52	unsigned long flags;
 53	int i, cpu, pcpu_entries;
 54
 55	pcpu_entries = nr_elems / num_possible_cpus() + 1;
 56	i = 0;
 57
 58	/* disable irq to workaround lockdep false positive
 59	 * in bpf usage pcpu_freelist_populate() will never race
 60	 * with pcpu_freelist_push()
 61	 */
 62	local_irq_save(flags);
 63	for_each_possible_cpu(cpu) {
 64again:
 65		head = per_cpu_ptr(s->freelist, cpu);
 66		__pcpu_freelist_push(head, buf);
 67		i++;
 68		buf += elem_size;
 69		if (i == nr_elems)
 70			break;
 71		if (i % pcpu_entries)
 72			goto again;
 73	}
 74	local_irq_restore(flags);
 75}
 76
 77struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
 78{
 79	struct pcpu_freelist_head *head;
 80	struct pcpu_freelist_node *node;
 81	int orig_cpu, cpu;
 82
 83	orig_cpu = cpu = raw_smp_processor_id();
 84	while (1) {
 85		head = per_cpu_ptr(s->freelist, cpu);
 86		raw_spin_lock(&head->lock);
 87		node = head->first;
 88		if (node) {
 89			head->first = node->next;
 90			raw_spin_unlock(&head->lock);
 91			return node;
 92		}
 93		raw_spin_unlock(&head->lock);
 94		cpu = cpumask_next(cpu, cpu_possible_mask);
 95		if (cpu >= nr_cpu_ids)
 96			cpu = 0;
 97		if (cpu == orig_cpu)
 98			return NULL;
 99	}
 
 
 
 
 
 
 
 
 
 
 
100}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
 
 
 
 
  3 */
  4#include "percpu_freelist.h"
  5
  6int pcpu_freelist_init(struct pcpu_freelist *s)
  7{
  8	int cpu;
  9
 10	s->freelist = alloc_percpu(struct pcpu_freelist_head);
 11	if (!s->freelist)
 12		return -ENOMEM;
 13
 14	for_each_possible_cpu(cpu) {
 15		struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
 16
 17		raw_spin_lock_init(&head->lock);
 18		head->first = NULL;
 19	}
 20	return 0;
 21}
 22
 23void pcpu_freelist_destroy(struct pcpu_freelist *s)
 24{
 25	free_percpu(s->freelist);
 26}
 27
 28static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
 29					 struct pcpu_freelist_node *node)
 30{
 31	raw_spin_lock(&head->lock);
 32	node->next = head->first;
 33	head->first = node;
 34	raw_spin_unlock(&head->lock);
 35}
 36
 37void __pcpu_freelist_push(struct pcpu_freelist *s,
 38			struct pcpu_freelist_node *node)
 39{
 40	struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
 41
 42	___pcpu_freelist_push(head, node);
 43}
 44
 45void pcpu_freelist_push(struct pcpu_freelist *s,
 46			struct pcpu_freelist_node *node)
 47{
 48	unsigned long flags;
 49
 50	local_irq_save(flags);
 51	__pcpu_freelist_push(s, node);
 52	local_irq_restore(flags);
 53}
 54
 55void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
 56			    u32 nr_elems)
 57{
 58	struct pcpu_freelist_head *head;
 59	unsigned long flags;
 60	int i, cpu, pcpu_entries;
 61
 62	pcpu_entries = nr_elems / num_possible_cpus() + 1;
 63	i = 0;
 64
 65	/* disable irq to workaround lockdep false positive
 66	 * in bpf usage pcpu_freelist_populate() will never race
 67	 * with pcpu_freelist_push()
 68	 */
 69	local_irq_save(flags);
 70	for_each_possible_cpu(cpu) {
 71again:
 72		head = per_cpu_ptr(s->freelist, cpu);
 73		___pcpu_freelist_push(head, buf);
 74		i++;
 75		buf += elem_size;
 76		if (i == nr_elems)
 77			break;
 78		if (i % pcpu_entries)
 79			goto again;
 80	}
 81	local_irq_restore(flags);
 82}
 83
 84struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
 85{
 86	struct pcpu_freelist_head *head;
 87	struct pcpu_freelist_node *node;
 88	int orig_cpu, cpu;
 89
 90	orig_cpu = cpu = raw_smp_processor_id();
 91	while (1) {
 92		head = per_cpu_ptr(s->freelist, cpu);
 93		raw_spin_lock(&head->lock);
 94		node = head->first;
 95		if (node) {
 96			head->first = node->next;
 97			raw_spin_unlock(&head->lock);
 98			return node;
 99		}
100		raw_spin_unlock(&head->lock);
101		cpu = cpumask_next(cpu, cpu_possible_mask);
102		if (cpu >= nr_cpu_ids)
103			cpu = 0;
104		if (cpu == orig_cpu)
105			return NULL;
106	}
107}
108
109struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
110{
111	struct pcpu_freelist_node *ret;
112	unsigned long flags;
113
114	local_irq_save(flags);
115	ret = __pcpu_freelist_pop(s);
116	local_irq_restore(flags);
117	return ret;
118}