Linux Audio

Check our new training course

Loading...
v6.2
  1#ifndef LIB_URING_H
  2#define LIB_URING_H
  3
  4#ifdef __cplusplus
  5extern "C" {
  6#endif
  7
  8#include <sys/uio.h>
  9#include <signal.h>
 10#include <string.h>
 11#include "../../include/uapi/linux/io_uring.h"
 12#include <inttypes.h>
 13#include <linux/swab.h>
 14#include "barrier.h"
 15
 16/*
 17 * Library interface to io_uring
 18 */
 19struct io_uring_sq {
 20	unsigned *khead;
 21	unsigned *ktail;
 22	unsigned *kring_mask;
 23	unsigned *kring_entries;
 24	unsigned *kflags;
 25	unsigned *kdropped;
 26	unsigned *array;
 27	struct io_uring_sqe *sqes;
 28
 29	unsigned sqe_head;
 30	unsigned sqe_tail;
 31
 32	size_t ring_sz;
 33};
 34
 35struct io_uring_cq {
 36	unsigned *khead;
 37	unsigned *ktail;
 38	unsigned *kring_mask;
 39	unsigned *kring_entries;
 40	unsigned *koverflow;
 41	struct io_uring_cqe *cqes;
 42
 43	size_t ring_sz;
 44};
 45
 46struct io_uring {
 47	struct io_uring_sq sq;
 48	struct io_uring_cq cq;
 49	int ring_fd;
 50};
 51
 52/*
 53 * System calls
 54 */
 55extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
 56extern int io_uring_enter(int fd, unsigned to_submit,
 57	unsigned min_complete, unsigned flags, sigset_t *sig);
 58extern int io_uring_register(int fd, unsigned int opcode, void *arg,
 59	unsigned int nr_args);
 60
 61/*
 62 * Library interface
 63 */
 64extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
 65	unsigned flags);
 66extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
 67	struct io_uring *ring);
 68extern void io_uring_queue_exit(struct io_uring *ring);
 69extern int io_uring_peek_cqe(struct io_uring *ring,
 70	struct io_uring_cqe **cqe_ptr);
 71extern int io_uring_wait_cqe(struct io_uring *ring,
 72	struct io_uring_cqe **cqe_ptr);
 73extern int io_uring_submit(struct io_uring *ring);
 74extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
 75
 76/*
 77 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
 78 * been processed by the application.
 79 */
 80static inline void io_uring_cqe_seen(struct io_uring *ring,
 81				     struct io_uring_cqe *cqe)
 82{
 83	if (cqe) {
 84		struct io_uring_cq *cq = &ring->cq;
 85
 86		(*cq->khead)++;
 87		/*
 88		 * Ensure that the kernel sees our new head, the kernel has
 89		 * the matching read barrier.
 90		 */
 91		write_barrier();
 92	}
 93}
 94
 95/*
 96 * Command prep helpers
 97 */
 98static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
 99{
100	sqe->user_data = (unsigned long) data;
101}
102
103static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
104{
105	return (void *) (uintptr_t) cqe->user_data;
106}
107
108static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
109				    const void *addr, unsigned len,
110				    off_t offset)
111{
112	memset(sqe, 0, sizeof(*sqe));
113	sqe->opcode = op;
114	sqe->fd = fd;
115	sqe->off = offset;
116	sqe->addr = (unsigned long) addr;
117	sqe->len = len;
118}
119
120static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
121				       const struct iovec *iovecs,
122				       unsigned nr_vecs, off_t offset)
123{
124	io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
125}
126
127static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
128					    void *buf, unsigned nbytes,
129					    off_t offset)
130{
131	io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
132}
133
134static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
135					const struct iovec *iovecs,
136					unsigned nr_vecs, off_t offset)
137{
138	io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
139}
140
141static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
142					     const void *buf, unsigned nbytes,
143					     off_t offset)
144{
145	io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
146}
147
148static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
149					  unsigned poll_mask)
150{
151	memset(sqe, 0, sizeof(*sqe));
152	sqe->opcode = IORING_OP_POLL_ADD;
153	sqe->fd = fd;
154#if __BYTE_ORDER == __BIG_ENDIAN
155	poll_mask = __swahw32(poll_mask);
156#endif
157	sqe->poll_events = poll_mask;
158}
159
160static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
161					     void *user_data)
162{
163	memset(sqe, 0, sizeof(*sqe));
164	sqe->opcode = IORING_OP_POLL_REMOVE;
165	sqe->addr = (unsigned long) user_data;
166}
167
168static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
169				       unsigned fsync_flags)
170{
171	memset(sqe, 0, sizeof(*sqe));
172	sqe->opcode = IORING_OP_FSYNC;
173	sqe->fd = fd;
174	sqe->fsync_flags = fsync_flags;
175}
176
177static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
178{
179	memset(sqe, 0, sizeof(*sqe));
180	sqe->opcode = IORING_OP_NOP;
181}
182
183#ifdef __cplusplus
184}
185#endif
186
187#endif
v5.9
  1#ifndef LIB_URING_H
  2#define LIB_URING_H
  3
  4#ifdef __cplusplus
  5extern "C" {
  6#endif
  7
  8#include <sys/uio.h>
  9#include <signal.h>
 10#include <string.h>
 11#include "../../include/uapi/linux/io_uring.h"
 12#include <inttypes.h>
 13#include <linux/swab.h>
 14#include "barrier.h"
 15
 16/*
 17 * Library interface to io_uring
 18 */
 19struct io_uring_sq {
 20	unsigned *khead;
 21	unsigned *ktail;
 22	unsigned *kring_mask;
 23	unsigned *kring_entries;
 24	unsigned *kflags;
 25	unsigned *kdropped;
 26	unsigned *array;
 27	struct io_uring_sqe *sqes;
 28
 29	unsigned sqe_head;
 30	unsigned sqe_tail;
 31
 32	size_t ring_sz;
 33};
 34
 35struct io_uring_cq {
 36	unsigned *khead;
 37	unsigned *ktail;
 38	unsigned *kring_mask;
 39	unsigned *kring_entries;
 40	unsigned *koverflow;
 41	struct io_uring_cqe *cqes;
 42
 43	size_t ring_sz;
 44};
 45
 46struct io_uring {
 47	struct io_uring_sq sq;
 48	struct io_uring_cq cq;
 49	int ring_fd;
 50};
 51
 52/*
 53 * System calls
 54 */
 55extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
 56extern int io_uring_enter(int fd, unsigned to_submit,
 57	unsigned min_complete, unsigned flags, sigset_t *sig);
 58extern int io_uring_register(int fd, unsigned int opcode, void *arg,
 59	unsigned int nr_args);
 60
 61/*
 62 * Library interface
 63 */
 64extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
 65	unsigned flags);
 66extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
 67	struct io_uring *ring);
 68extern void io_uring_queue_exit(struct io_uring *ring);
 69extern int io_uring_peek_cqe(struct io_uring *ring,
 70	struct io_uring_cqe **cqe_ptr);
 71extern int io_uring_wait_cqe(struct io_uring *ring,
 72	struct io_uring_cqe **cqe_ptr);
 73extern int io_uring_submit(struct io_uring *ring);
 74extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
 75
 76/*
 77 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
 78 * been processed by the application.
 79 */
 80static inline void io_uring_cqe_seen(struct io_uring *ring,
 81				     struct io_uring_cqe *cqe)
 82{
 83	if (cqe) {
 84		struct io_uring_cq *cq = &ring->cq;
 85
 86		(*cq->khead)++;
 87		/*
 88		 * Ensure that the kernel sees our new head, the kernel has
 89		 * the matching read barrier.
 90		 */
 91		write_barrier();
 92	}
 93}
 94
 95/*
 96 * Command prep helpers
 97 */
 98static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
 99{
100	sqe->user_data = (unsigned long) data;
101}
102
103static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
104{
105	return (void *) (uintptr_t) cqe->user_data;
106}
107
108static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
109				    const void *addr, unsigned len,
110				    off_t offset)
111{
112	memset(sqe, 0, sizeof(*sqe));
113	sqe->opcode = op;
114	sqe->fd = fd;
115	sqe->off = offset;
116	sqe->addr = (unsigned long) addr;
117	sqe->len = len;
118}
119
120static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
121				       const struct iovec *iovecs,
122				       unsigned nr_vecs, off_t offset)
123{
124	io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
125}
126
127static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
128					    void *buf, unsigned nbytes,
129					    off_t offset)
130{
131	io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
132}
133
134static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
135					const struct iovec *iovecs,
136					unsigned nr_vecs, off_t offset)
137{
138	io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
139}
140
141static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
142					     const void *buf, unsigned nbytes,
143					     off_t offset)
144{
145	io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
146}
147
148static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
149					  unsigned poll_mask)
150{
151	memset(sqe, 0, sizeof(*sqe));
152	sqe->opcode = IORING_OP_POLL_ADD;
153	sqe->fd = fd;
154#if __BYTE_ORDER == __BIG_ENDIAN
155	poll_mask = __swahw32(poll_mask);
156#endif
157	sqe->poll_events = poll_mask;
158}
159
160static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
161					     void *user_data)
162{
163	memset(sqe, 0, sizeof(*sqe));
164	sqe->opcode = IORING_OP_POLL_REMOVE;
165	sqe->addr = (unsigned long) user_data;
166}
167
168static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
169				       unsigned fsync_flags)
170{
171	memset(sqe, 0, sizeof(*sqe));
172	sqe->opcode = IORING_OP_FSYNC;
173	sqe->fd = fd;
174	sqe->fsync_flags = fsync_flags;
175}
176
177static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
178{
179	memset(sqe, 0, sizeof(*sqe));
180	sqe->opcode = IORING_OP_NOP;
181}
182
183#ifdef __cplusplus
184}
185#endif
186
187#endif