Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright 2017 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
 25#define DRM_SCHEDULER_SPSC_QUEUE_H_
 26
 27#include <linux/atomic.h>
 28#include <linux/preempt.h>
 29
 30/** SPSC lockless queue */
 31
 32struct spsc_node {
 33
 34	/* Stores spsc_node* */
 35	struct spsc_node *next;
 36};
 37
 38struct spsc_queue {
 39
 40	 struct spsc_node *head;
 41
 42	/* atomic pointer to struct spsc_node* */
 43	atomic_long_t tail;
 44
 45	atomic_t job_count;
 46};
 47
 48static inline void spsc_queue_init(struct spsc_queue *queue)
 49{
 50	queue->head = NULL;
 51	atomic_long_set(&queue->tail, (long)&queue->head);
 52	atomic_set(&queue->job_count, 0);
 53}
 54
 55static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
 56{
 57	return queue->head;
 58}
 59
 60static inline int spsc_queue_count(struct spsc_queue *queue)
 61{
 62	return atomic_read(&queue->job_count);
 63}
 64
 65static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
 66{
 67	struct spsc_node **tail;
 68
 69	node->next = NULL;
 70
 71	preempt_disable();
 72
 73	tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
 74	WRITE_ONCE(*tail, node);
 75	atomic_inc(&queue->job_count);
 76
 77	/*
 78	 * In case of first element verify new node will be visible to the consumer
 79	 * thread when we ping the kernel thread that there is new work to do.
 80	 */
 81	smp_wmb();
 82
 83	preempt_enable();
 84
 85	return tail == &queue->head;
 86}
 87
 88
 89static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
 90{
 91	struct spsc_node *next, *node;
 92
 93	/* Verify reading from memory and not the cache */
 94	smp_rmb();
 95
 96	node = READ_ONCE(queue->head);
 97
 98	if (!node)
 99		return NULL;
100
101	next = READ_ONCE(node->next);
102	WRITE_ONCE(queue->head, next);
103
104	if (unlikely(!next)) {
105		/* slowpath for the last element in the queue */
106
107		if (atomic_long_cmpxchg(&queue->tail,
108				(long)&node->next, (long) &queue->head) != (long)&node->next) {
109			/* Updating tail failed wait for new next to appear */
110			do {
111				smp_rmb();
112			} while (unlikely(!(queue->head = READ_ONCE(node->next))));
113		}
114	}
115
116	atomic_dec(&queue->job_count);
117	return node;
118}
119
120
121
122#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
v6.13.7
  1/*
  2 * Copyright 2017 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
 25#define DRM_SCHEDULER_SPSC_QUEUE_H_
 26
 27#include <linux/atomic.h>
 28#include <linux/preempt.h>
 29
 30/** SPSC lockless queue */
 31
 32struct spsc_node {
 33
 34	/* Stores spsc_node* */
 35	struct spsc_node *next;
 36};
 37
 38struct spsc_queue {
 39
 40	 struct spsc_node *head;
 41
 42	/* atomic pointer to struct spsc_node* */
 43	atomic_long_t tail;
 44
 45	atomic_t job_count;
 46};
 47
 48static inline void spsc_queue_init(struct spsc_queue *queue)
 49{
 50	queue->head = NULL;
 51	atomic_long_set(&queue->tail, (long)&queue->head);
 52	atomic_set(&queue->job_count, 0);
 53}
 54
 55static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
 56{
 57	return queue->head;
 58}
 59
 60static inline int spsc_queue_count(struct spsc_queue *queue)
 61{
 62	return atomic_read(&queue->job_count);
 63}
 64
 65static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
 66{
 67	struct spsc_node **tail;
 68
 69	node->next = NULL;
 70
 71	preempt_disable();
 72
 73	tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
 74	WRITE_ONCE(*tail, node);
 75	atomic_inc(&queue->job_count);
 76
 77	/*
 78	 * In case of first element verify new node will be visible to the consumer
 79	 * thread when we ping the kernel thread that there is new work to do.
 80	 */
 81	smp_wmb();
 82
 83	preempt_enable();
 84
 85	return tail == &queue->head;
 86}
 87
 88
 89static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
 90{
 91	struct spsc_node *next, *node;
 92
 93	/* Verify reading from memory and not the cache */
 94	smp_rmb();
 95
 96	node = READ_ONCE(queue->head);
 97
 98	if (!node)
 99		return NULL;
100
101	next = READ_ONCE(node->next);
102	WRITE_ONCE(queue->head, next);
103
104	if (unlikely(!next)) {
105		/* slowpath for the last element in the queue */
106
107		if (atomic_long_cmpxchg(&queue->tail,
108				(long)&node->next, (long) &queue->head) != (long)&node->next) {
109			/* Updating tail failed wait for new next to appear */
110			do {
111				smp_rmb();
112			} while (unlikely(!(queue->head = READ_ONCE(node->next))));
113		}
114	}
115
116	atomic_dec(&queue->job_count);
117	return node;
118}
119
120
121
122#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */