Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
  3 */
  4#include "percpu_freelist.h"
  5
  6int pcpu_freelist_init(struct pcpu_freelist *s)
  7{
  8	int cpu;
  9
 10	s->freelist = alloc_percpu(struct pcpu_freelist_head);
 11	if (!s->freelist)
 12		return -ENOMEM;
 13
 14	for_each_possible_cpu(cpu) {
 15		struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
 16
 17		raw_spin_lock_init(&head->lock);
 18		head->first = NULL;
 19	}
 
 
 20	return 0;
 21}
 22
 23void pcpu_freelist_destroy(struct pcpu_freelist *s)
 24{
 25	free_percpu(s->freelist);
 26}
 27
 
 
 
 
 
 
 
 28static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
 29					 struct pcpu_freelist_node *node)
 30{
 31	raw_spin_lock(&head->lock);
 32	node->next = head->first;
 33	head->first = node;
 34	raw_spin_unlock(&head->lock);
 35}
 36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37void __pcpu_freelist_push(struct pcpu_freelist *s,
 38			struct pcpu_freelist_node *node)
 39{
 40	struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
 41
 42	___pcpu_freelist_push(head, node);
 
 43}
 44
 45void pcpu_freelist_push(struct pcpu_freelist *s,
 46			struct pcpu_freelist_node *node)
 47{
 48	unsigned long flags;
 49
 50	local_irq_save(flags);
 51	__pcpu_freelist_push(s, node);
 52	local_irq_restore(flags);
 53}
 54
 55void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
 56			    u32 nr_elems)
 57{
 58	struct pcpu_freelist_head *head;
 59	unsigned long flags;
 60	int i, cpu, pcpu_entries;
 61
 62	pcpu_entries = nr_elems / num_possible_cpus() + 1;
 63	i = 0;
 64
 65	/* disable irq to workaround lockdep false positive
 66	 * in bpf usage pcpu_freelist_populate() will never race
 67	 * with pcpu_freelist_push()
 68	 */
 69	local_irq_save(flags);
 70	for_each_possible_cpu(cpu) {
 71again:
 72		head = per_cpu_ptr(s->freelist, cpu);
 73		___pcpu_freelist_push(head, buf);
 
 74		i++;
 75		buf += elem_size;
 76		if (i == nr_elems)
 77			break;
 78		if (i % pcpu_entries)
 79			goto again;
 80	}
 81	local_irq_restore(flags);
 82}
 83
 84struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
 85{
 86	struct pcpu_freelist_head *head;
 87	struct pcpu_freelist_node *node;
 88	int orig_cpu, cpu;
 89
 90	orig_cpu = cpu = raw_smp_processor_id();
 91	while (1) {
 92		head = per_cpu_ptr(s->freelist, cpu);
 93		raw_spin_lock(&head->lock);
 94		node = head->first;
 95		if (node) {
 96			head->first = node->next;
 97			raw_spin_unlock(&head->lock);
 98			return node;
 99		}
100		raw_spin_unlock(&head->lock);
101		cpu = cpumask_next(cpu, cpu_possible_mask);
102		if (cpu >= nr_cpu_ids)
103			cpu = 0;
104		if (cpu == orig_cpu)
105			return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107}
108
109struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
110{
111	struct pcpu_freelist_node *ret;
112	unsigned long flags;
113
114	local_irq_save(flags);
115	ret = __pcpu_freelist_pop(s);
116	local_irq_restore(flags);
117	return ret;
118}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
  3 */
  4#include "percpu_freelist.h"
  5
  6int pcpu_freelist_init(struct pcpu_freelist *s)
  7{
  8	int cpu;
  9
 10	s->freelist = alloc_percpu(struct pcpu_freelist_head);
 11	if (!s->freelist)
 12		return -ENOMEM;
 13
 14	for_each_possible_cpu(cpu) {
 15		struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
 16
 17		raw_spin_lock_init(&head->lock);
 18		head->first = NULL;
 19	}
 20	raw_spin_lock_init(&s->extralist.lock);
 21	s->extralist.first = NULL;
 22	return 0;
 23}
 24
 25void pcpu_freelist_destroy(struct pcpu_freelist *s)
 26{
 27	free_percpu(s->freelist);
 28}
 29
 30static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
 31					   struct pcpu_freelist_node *node)
 32{
 33	node->next = head->first;
 34	head->first = node;
 35}
 36
 37static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
 38					 struct pcpu_freelist_node *node)
 39{
 40	raw_spin_lock(&head->lock);
 41	pcpu_freelist_push_node(head, node);
 
 42	raw_spin_unlock(&head->lock);
 43}
 44
 45static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
 46						struct pcpu_freelist_node *node)
 47{
 48	if (!raw_spin_trylock(&s->extralist.lock))
 49		return false;
 50
 51	pcpu_freelist_push_node(&s->extralist, node);
 52	raw_spin_unlock(&s->extralist.lock);
 53	return true;
 54}
 55
 56static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
 57					     struct pcpu_freelist_node *node)
 58{
 59	int cpu, orig_cpu;
 60
 61	orig_cpu = cpu = raw_smp_processor_id();
 62	while (1) {
 63		struct pcpu_freelist_head *head;
 64
 65		head = per_cpu_ptr(s->freelist, cpu);
 66		if (raw_spin_trylock(&head->lock)) {
 67			pcpu_freelist_push_node(head, node);
 68			raw_spin_unlock(&head->lock);
 69			return;
 70		}
 71		cpu = cpumask_next(cpu, cpu_possible_mask);
 72		if (cpu >= nr_cpu_ids)
 73			cpu = 0;
 74
 75		/* cannot lock any per cpu lock, try extralist */
 76		if (cpu == orig_cpu &&
 77		    pcpu_freelist_try_push_extra(s, node))
 78			return;
 79	}
 80}
 81
 82void __pcpu_freelist_push(struct pcpu_freelist *s,
 83			struct pcpu_freelist_node *node)
 84{
 85	if (in_nmi())
 86		___pcpu_freelist_push_nmi(s, node);
 87	else
 88		___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
 89}
 90
 91void pcpu_freelist_push(struct pcpu_freelist *s,
 92			struct pcpu_freelist_node *node)
 93{
 94	unsigned long flags;
 95
 96	local_irq_save(flags);
 97	__pcpu_freelist_push(s, node);
 98	local_irq_restore(flags);
 99}
100
101void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
102			    u32 nr_elems)
103{
104	struct pcpu_freelist_head *head;
 
105	int i, cpu, pcpu_entries;
106
107	pcpu_entries = nr_elems / num_possible_cpus() + 1;
108	i = 0;
109
 
 
 
 
 
110	for_each_possible_cpu(cpu) {
111again:
112		head = per_cpu_ptr(s->freelist, cpu);
113		/* No locking required as this is not visible yet. */
114		pcpu_freelist_push_node(head, buf);
115		i++;
116		buf += elem_size;
117		if (i == nr_elems)
118			break;
119		if (i % pcpu_entries)
120			goto again;
121	}
 
122}
123
124static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
125{
126	struct pcpu_freelist_head *head;
127	struct pcpu_freelist_node *node;
128	int orig_cpu, cpu;
129
130	orig_cpu = cpu = raw_smp_processor_id();
131	while (1) {
132		head = per_cpu_ptr(s->freelist, cpu);
133		raw_spin_lock(&head->lock);
134		node = head->first;
135		if (node) {
136			head->first = node->next;
137			raw_spin_unlock(&head->lock);
138			return node;
139		}
140		raw_spin_unlock(&head->lock);
141		cpu = cpumask_next(cpu, cpu_possible_mask);
142		if (cpu >= nr_cpu_ids)
143			cpu = 0;
144		if (cpu == orig_cpu)
145			break;
146	}
147
148	/* per cpu lists are all empty, try extralist */
149	raw_spin_lock(&s->extralist.lock);
150	node = s->extralist.first;
151	if (node)
152		s->extralist.first = node->next;
153	raw_spin_unlock(&s->extralist.lock);
154	return node;
155}
156
157static struct pcpu_freelist_node *
158___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
159{
160	struct pcpu_freelist_head *head;
161	struct pcpu_freelist_node *node;
162	int orig_cpu, cpu;
163
164	orig_cpu = cpu = raw_smp_processor_id();
165	while (1) {
166		head = per_cpu_ptr(s->freelist, cpu);
167		if (raw_spin_trylock(&head->lock)) {
168			node = head->first;
169			if (node) {
170				head->first = node->next;
171				raw_spin_unlock(&head->lock);
172				return node;
173			}
174			raw_spin_unlock(&head->lock);
175		}
176		cpu = cpumask_next(cpu, cpu_possible_mask);
177		if (cpu >= nr_cpu_ids)
178			cpu = 0;
179		if (cpu == orig_cpu)
180			break;
181	}
182
183	/* cannot pop from per cpu lists, try extralist */
184	if (!raw_spin_trylock(&s->extralist.lock))
185		return NULL;
186	node = s->extralist.first;
187	if (node)
188		s->extralist.first = node->next;
189	raw_spin_unlock(&s->extralist.lock);
190	return node;
191}
192
193struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
194{
195	if (in_nmi())
196		return ___pcpu_freelist_pop_nmi(s);
197	return ___pcpu_freelist_pop(s);
198}
199
200struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
201{
202	struct pcpu_freelist_node *ret;
203	unsigned long flags;
204
205	local_irq_save(flags);
206	ret = __pcpu_freelist_pop(s);
207	local_irq_restore(flags);
208	return ret;
209}