Linux Audio

Check our new training course

Loading...
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright 2023 Red Hat
  4 */
  5
  6#ifndef VDO_FUNNEL_QUEUE_H
  7#define VDO_FUNNEL_QUEUE_H
  8
  9#include <linux/atomic.h>
 10#include <linux/cache.h>
 11
 12/*
 13 * A funnel queue is a simple (almost) lock-free queue that accepts entries from multiple threads
 14 * (multi-producer) and delivers them to a single thread (single-consumer). "Funnel" is an attempt
 15 * to evoke the image of requests from more than one producer being "funneled down" to a single
 16 * consumer.
 17 *
 18 * This is an unsynchronized but thread-safe data structure when used as intended. There is no
 19 * mechanism to ensure that only one thread is consuming from the queue. If more than one thread
 20 * attempts to consume from the queue, the resulting behavior is undefined. Clients must not
 21 * directly access or manipulate the internals of the queue, which are only exposed for the purpose
 22 * of allowing the very simple enqueue operation to be inlined.
 23 *
 24 * The implementation requires that a funnel_queue_entry structure (a link pointer) is embedded in
 25 * the queue entries, and pointers to those structures are used exclusively by the queue. No macros
 26 * are defined to template the queue, so the offset of the funnel_queue_entry in the records placed
 27 * in the queue must all be the same so the client can derive their structure pointer from the
 28 * entry pointer returned by vdo_funnel_queue_poll().
 29 *
 30 * Callers are wholly responsible for allocating and freeing the entries. Entries may be freed as
 31 * soon as they are returned since this queue is not susceptible to the "ABA problem" present in
 32 * many lock-free data structures. The queue is dynamically allocated to ensure cache-line
 33 * alignment, but no other dynamic allocation is used.
 34 *
 35 * The algorithm is not actually 100% lock-free. There is a single point in vdo_funnel_queue_put()
 36 * at which a preempted producer will prevent the consumers from seeing items added to the queue by
 37 * later producers, and only if the queue is short enough or the consumer fast enough for it to
 38 * reach what was the end of the queue at the time of the preemption.
 39 *
 40 * The consumer function, vdo_funnel_queue_poll(), will return NULL when the queue is empty. To
 41 * wait for data to consume, spin (if safe) or combine the queue with a struct event_count to
 42 * signal the presence of new entries.
 43 */
 44
 45/* This queue link structure must be embedded in client entries. */
 46struct funnel_queue_entry {
 47	/* The next (newer) entry in the queue. */
 48	struct funnel_queue_entry *next;
 49};
 50
 51/*
 52 * The dynamically allocated queue structure, which is allocated on a cache line boundary so the
 53 * producer and consumer fields in the structure will land on separate cache lines. This should be
 54 * consider opaque but it is exposed here so vdo_funnel_queue_put() can be inlined.
 55 */
 56struct __aligned(L1_CACHE_BYTES) funnel_queue {
 57	/*
 58	 * The producers' end of the queue, an atomically exchanged pointer that will never be
 59	 * NULL.
 60	 */
 61	struct funnel_queue_entry *newest;
 62
 63	/* The consumer's end of the queue, which is owned by the consumer and never NULL. */
 64	struct funnel_queue_entry *oldest __aligned(L1_CACHE_BYTES);
 65
 66	/* A dummy entry used to provide the non-NULL invariants above. */
 67	struct funnel_queue_entry stub;
 68};
 69
 70int __must_check vdo_make_funnel_queue(struct funnel_queue **queue_ptr);
 71
 72void vdo_free_funnel_queue(struct funnel_queue *queue);
 73
 74/*
 75 * Put an entry on the end of the queue.
 76 *
 77 * The entry pointer must be to the struct funnel_queue_entry embedded in the caller's data
 78 * structure. The caller must be able to derive the address of the start of their data structure
 79 * from the pointer that passed in here, so every entry in the queue must have the struct
 80 * funnel_queue_entry at the same offset within the client's structure.
 81 */
 82static inline void vdo_funnel_queue_put(struct funnel_queue *queue,
 83					struct funnel_queue_entry *entry)
 84{
 85	struct funnel_queue_entry *previous;
 86
 87	/*
 88	 * Barrier requirements: All stores relating to the entry ("next" pointer, containing data
 89	 * structure fields) must happen before the previous->next store making it visible to the
 90	 * consumer. Also, the entry's "next" field initialization to NULL must happen before any
 91	 * other producer threads can see the entry (the xchg) and try to update the "next" field.
 92	 *
 93	 * xchg implements a full barrier.
 94	 */
 95	WRITE_ONCE(entry->next, NULL);
 96	previous = xchg(&queue->newest, entry);
 97	/*
 98	 * Preemptions between these two statements hide the rest of the queue from the consumer,
 99	 * preventing consumption until the following assignment runs.
100	 */
101	WRITE_ONCE(previous->next, entry);
102}
103
104struct funnel_queue_entry *__must_check vdo_funnel_queue_poll(struct funnel_queue *queue);
105
106bool __must_check vdo_is_funnel_queue_empty(struct funnel_queue *queue);
107
108bool __must_check vdo_is_funnel_queue_idle(struct funnel_queue *queue);
109
110#endif /* VDO_FUNNEL_QUEUE_H */