Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* XDP user-space ring structure
  3 * Copyright(c) 2018 Intel Corporation.
  4 */
  5
  6#ifndef _LINUX_XSK_QUEUE_H
  7#define _LINUX_XSK_QUEUE_H
  8
  9#include <linux/types.h>
 10#include <linux/if_xdp.h>
 11#include <net/xdp_sock.h>
 12#include <net/xsk_buff_pool.h>
 13
 14#include "xsk.h"
 15
 16struct xdp_ring {
 17	u32 producer ____cacheline_aligned_in_smp;
 18	/* Hinder the adjacent cache prefetcher to prefetch the consumer
 19	 * pointer if the producer pointer is touched and vice versa.
 20	 */
 21	u32 pad1 ____cacheline_aligned_in_smp;
 22	u32 consumer ____cacheline_aligned_in_smp;
 23	u32 pad2 ____cacheline_aligned_in_smp;
 24	u32 flags;
 25	u32 pad3 ____cacheline_aligned_in_smp;
 26};
 27
 28/* Used for the RX and TX queues for packets */
 29struct xdp_rxtx_ring {
 30	struct xdp_ring ptrs;
 31	struct xdp_desc desc[] ____cacheline_aligned_in_smp;
 32};
 33
 34/* Used for the fill and completion queues for buffers */
 35struct xdp_umem_ring {
 36	struct xdp_ring ptrs;
 37	u64 desc[] ____cacheline_aligned_in_smp;
 38};
 39
 40struct xsk_queue {
 41	u32 ring_mask;
 42	u32 nentries;
 43	u32 cached_prod;
 44	u32 cached_cons;
 45	struct xdp_ring *ring;
 46	u64 invalid_descs;
 47	u64 queue_empty_descs;
 48};
 49
 50/* The structure of the shared state of the rings are a simple
 51 * circular buffer, as outlined in
 52 * Documentation/core-api/circular-buffers.rst. For the Rx and
 53 * completion ring, the kernel is the producer and user space is the
 54 * consumer. For the Tx and fill rings, the kernel is the consumer and
 55 * user space is the producer.
 56 *
 57 * producer                         consumer
 58 *
 59 * if (LOAD ->consumer) {  (A)      LOAD.acq ->producer  (C)
 60 *    STORE $data                   LOAD $data
 61 *    STORE.rel ->producer (B)      STORE.rel ->consumer (D)
 62 * }
 63 *
 64 * (A) pairs with (D), and (B) pairs with (C).
 65 *
 66 * Starting with (B), it protects the data from being written after
 67 * the producer pointer. If this barrier was missing, the consumer
 68 * could observe the producer pointer being set and thus load the data
 69 * before the producer has written the new data. The consumer would in
 70 * this case load the old data.
 71 *
 72 * (C) protects the consumer from speculatively loading the data before
 73 * the producer pointer actually has been read. If we do not have this
 74 * barrier, some architectures could load old data as speculative loads
 75 * are not discarded as the CPU does not know there is a dependency
 76 * between ->producer and data.
 77 *
 78 * (A) is a control dependency that separates the load of ->consumer
 79 * from the stores of $data. In case ->consumer indicates there is no
 80 * room in the buffer to store $data we do not. The dependency will
 81 * order both of the stores after the loads. So no barrier is needed.
 82 *
 83 * (D) protects the load of the data to be observed to happen after the
 84 * store of the consumer pointer. If we did not have this memory
 85 * barrier, the producer could observe the consumer pointer being set
 86 * and overwrite the data with a new value before the consumer got the
 87 * chance to read the old value. The consumer would thus miss reading
 88 * the old entry and very likely read the new entry twice, once right
 89 * now and again after circling through the ring.
 90 */
 91
 92/* The operations on the rings are the following:
 93 *
 94 * producer                           consumer
 95 *
 96 * RESERVE entries                    PEEK in the ring for entries
 97 * WRITE data into the ring           READ data from the ring
 98 * SUBMIT entries                     RELEASE entries
 99 *
100 * The producer reserves one or more entries in the ring. It can then
101 * fill in these entries and finally submit them so that they can be
102 * seen and read by the consumer.
103 *
104 * The consumer peeks into the ring to see if the producer has written
105 * any new entries. If so, the consumer can then read these entries
106 * and when it is done reading them release them back to the producer
107 * so that the producer can use these slots to fill in new entries.
108 *
109 * The function names below reflect these operations.
110 */
111
112/* Functions that read and validate content from consumer rings. */
113
114static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
115{
116	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
117	u32 idx = cached_cons & q->ring_mask;
118
119	*addr = ring->desc[idx];
120}
121
122static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
123{
124	if (q->cached_cons != q->cached_prod) {
125		__xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
 
 
126		return true;
127	}
128
129	return false;
130}
131
132static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
133					    struct xdp_desc *desc)
134{
135	u64 chunk, chunk_end;
136
137	chunk = xp_aligned_extract_addr(pool, desc->addr);
138	if (likely(desc->len)) {
139		chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
140		if (chunk != chunk_end)
141			return false;
142	}
143
144	if (chunk >= pool->addrs_cnt)
145		return false;
146
147	if (desc->options)
148		return false;
149	return true;
150}
151
152static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
153					      struct xdp_desc *desc)
154{
155	u64 addr, base_addr;
156
157	base_addr = xp_unaligned_extract_addr(desc->addr);
158	addr = xp_unaligned_add_offset_to_addr(desc->addr);
159
160	if (desc->len > pool->chunk_size)
161		return false;
162
163	if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
164	    xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
165		return false;
166
167	if (desc->options)
168		return false;
169	return true;
170}
171
172static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
173				    struct xdp_desc *desc)
174{
175	return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
176		xp_aligned_validate_desc(pool, desc);
177}
178
179static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
180					   struct xdp_desc *d,
181					   struct xsk_buff_pool *pool)
182{
183	if (!xp_validate_desc(pool, d)) {
184		q->invalid_descs++;
185		return false;
186	}
187	return true;
188}
189
190static inline bool xskq_cons_read_desc(struct xsk_queue *q,
191				       struct xdp_desc *desc,
192				       struct xsk_buff_pool *pool)
193{
194	while (q->cached_cons != q->cached_prod) {
195		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
196		u32 idx = q->cached_cons & q->ring_mask;
197
198		*desc = ring->desc[idx];
199		if (xskq_cons_is_valid_desc(q, desc, pool))
200			return true;
201
202		q->cached_cons++;
203	}
204
205	return false;
206}
207
208static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
209{
210	q->cached_cons += cnt;
211}
212
213static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
214					    u32 max)
215{
216	u32 cached_cons = q->cached_cons, nb_entries = 0;
217	struct xdp_desc *descs = pool->tx_descs;
218
219	while (cached_cons != q->cached_prod && nb_entries < max) {
220		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
221		u32 idx = cached_cons & q->ring_mask;
222
223		descs[nb_entries] = ring->desc[idx];
224		if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
225			/* Skip the entry */
226			cached_cons++;
227			continue;
228		}
229
230		nb_entries++;
231		cached_cons++;
232	}
233
234	/* Release valid plus any invalid entries */
235	xskq_cons_release_n(q, cached_cons - q->cached_cons);
236	return nb_entries;
237}
238
239/* Functions for consumers */
240
241static inline void __xskq_cons_release(struct xsk_queue *q)
242{
243	smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
244}
245
246static inline void __xskq_cons_peek(struct xsk_queue *q)
247{
248	/* Refresh the local pointer */
249	q->cached_prod = smp_load_acquire(&q->ring->producer);  /* C, matches B */
250}
251
252static inline void xskq_cons_get_entries(struct xsk_queue *q)
253{
254	__xskq_cons_release(q);
255	__xskq_cons_peek(q);
256}
257
258static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
259{
260	u32 entries = q->cached_prod - q->cached_cons;
261
262	if (entries >= max)
263		return max;
264
265	__xskq_cons_peek(q);
266	entries = q->cached_prod - q->cached_cons;
267
268	return entries >= max ? max : entries;
269}
270
271static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
272{
273	return xskq_cons_nb_entries(q, cnt) >= cnt;
274}
275
276static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
277{
278	if (q->cached_prod == q->cached_cons)
279		xskq_cons_get_entries(q);
280	return xskq_cons_read_addr_unchecked(q, addr);
281}
282
283static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
284				       struct xdp_desc *desc,
285				       struct xsk_buff_pool *pool)
286{
287	if (q->cached_prod == q->cached_cons)
288		xskq_cons_get_entries(q);
289	return xskq_cons_read_desc(q, desc, pool);
290}
291
 
 
 
 
 
 
 
 
292/* To improve performance in the xskq_cons_release functions, only update local state here.
293 * Reflect this to global state when we get new entries from the ring in
294 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
295 */
296static inline void xskq_cons_release(struct xsk_queue *q)
297{
298	q->cached_cons++;
299}
300
 
 
 
 
 
 
 
 
 
 
 
 
301static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
302{
303	/* No barriers needed since data is not accessed */
304	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
305}
306
307/* Functions for producers */
308
309static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
310{
311	u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
312
313	if (free_entries >= max)
314		return max;
315
316	/* Refresh the local tail pointer */
317	q->cached_cons = READ_ONCE(q->ring->consumer);
318	free_entries = q->nentries - (q->cached_prod - q->cached_cons);
319
320	return free_entries >= max ? max : free_entries;
321}
322
323static inline bool xskq_prod_is_full(struct xsk_queue *q)
324{
325	return xskq_prod_nb_free(q, 1) ? false : true;
326}
327
328static inline void xskq_prod_cancel(struct xsk_queue *q)
329{
330	q->cached_prod--;
331}
332
333static inline int xskq_prod_reserve(struct xsk_queue *q)
334{
335	if (xskq_prod_is_full(q))
336		return -ENOSPC;
337
338	/* A, matches D */
339	q->cached_prod++;
340	return 0;
341}
342
343static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
344{
345	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
346
347	if (xskq_prod_is_full(q))
348		return -ENOSPC;
349
350	/* A, matches D */
351	ring->desc[q->cached_prod++ & q->ring_mask] = addr;
352	return 0;
353}
354
355static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
356					      u32 nb_entries)
357{
358	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
359	u32 i, cached_prod;
 
 
360
361	/* A, matches D */
362	cached_prod = q->cached_prod;
363	for (i = 0; i < nb_entries; i++)
364		ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
365	q->cached_prod = cached_prod;
 
 
366}
367
368static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
369					 u64 addr, u32 len)
370{
371	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
372	u32 idx;
373
374	if (xskq_prod_is_full(q))
375		return -ENOBUFS;
376
377	/* A, matches D */
378	idx = q->cached_prod++ & q->ring_mask;
379	ring->desc[idx].addr = addr;
380	ring->desc[idx].len = len;
381
382	return 0;
383}
384
385static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
386{
387	smp_store_release(&q->ring->producer, idx); /* B, matches C */
388}
389
390static inline void xskq_prod_submit(struct xsk_queue *q)
391{
392	__xskq_prod_submit(q, q->cached_prod);
393}
394
395static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
396{
397	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
398	u32 idx = q->ring->producer;
399
400	ring->desc[idx++ & q->ring_mask] = addr;
401
402	__xskq_prod_submit(q, idx);
403}
404
405static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
406{
407	__xskq_prod_submit(q, q->ring->producer + nb_entries);
408}
409
410static inline bool xskq_prod_is_empty(struct xsk_queue *q)
411{
412	/* No barriers needed since data is not accessed */
413	return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
414}
415
416/* For both producers and consumers */
417
418static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
419{
420	return q ? q->invalid_descs : 0;
421}
422
423static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
424{
425	return q ? q->queue_empty_descs : 0;
426}
427
428struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
429void xskq_destroy(struct xsk_queue *q_ops);
430
431#endif /* _LINUX_XSK_QUEUE_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* XDP user-space ring structure
  3 * Copyright(c) 2018 Intel Corporation.
  4 */
  5
  6#ifndef _LINUX_XSK_QUEUE_H
  7#define _LINUX_XSK_QUEUE_H
  8
  9#include <linux/types.h>
 10#include <linux/if_xdp.h>
 11#include <net/xdp_sock.h>
 12#include <net/xsk_buff_pool.h>
 13
 14#include "xsk.h"
 15
 16struct xdp_ring {
 17	u32 producer ____cacheline_aligned_in_smp;
 18	/* Hinder the adjacent cache prefetcher to prefetch the consumer
 19	 * pointer if the producer pointer is touched and vice versa.
 20	 */
 21	u32 pad1 ____cacheline_aligned_in_smp;
 22	u32 consumer ____cacheline_aligned_in_smp;
 23	u32 pad2 ____cacheline_aligned_in_smp;
 24	u32 flags;
 25	u32 pad3 ____cacheline_aligned_in_smp;
 26};
 27
 28/* Used for the RX and TX queues for packets */
 29struct xdp_rxtx_ring {
 30	struct xdp_ring ptrs;
 31	struct xdp_desc desc[] ____cacheline_aligned_in_smp;
 32};
 33
 34/* Used for the fill and completion queues for buffers */
 35struct xdp_umem_ring {
 36	struct xdp_ring ptrs;
 37	u64 desc[] ____cacheline_aligned_in_smp;
 38};
 39
 40struct xsk_queue {
 41	u32 ring_mask;
 42	u32 nentries;
 43	u32 cached_prod;
 44	u32 cached_cons;
 45	struct xdp_ring *ring;
 46	u64 invalid_descs;
 47	u64 queue_empty_descs;
 48};
 49
 50/* The structure of the shared state of the rings are a simple
 51 * circular buffer, as outlined in
 52 * Documentation/core-api/circular-buffers.rst. For the Rx and
 53 * completion ring, the kernel is the producer and user space is the
 54 * consumer. For the Tx and fill rings, the kernel is the consumer and
 55 * user space is the producer.
 56 *
 57 * producer                         consumer
 58 *
 59 * if (LOAD ->consumer) {  (A)      LOAD.acq ->producer  (C)
 60 *    STORE $data                   LOAD $data
 61 *    STORE.rel ->producer (B)      STORE.rel ->consumer (D)
 62 * }
 63 *
 64 * (A) pairs with (D), and (B) pairs with (C).
 65 *
 66 * Starting with (B), it protects the data from being written after
 67 * the producer pointer. If this barrier was missing, the consumer
 68 * could observe the producer pointer being set and thus load the data
 69 * before the producer has written the new data. The consumer would in
 70 * this case load the old data.
 71 *
 72 * (C) protects the consumer from speculatively loading the data before
 73 * the producer pointer actually has been read. If we do not have this
 74 * barrier, some architectures could load old data as speculative loads
 75 * are not discarded as the CPU does not know there is a dependency
 76 * between ->producer and data.
 77 *
 78 * (A) is a control dependency that separates the load of ->consumer
 79 * from the stores of $data. In case ->consumer indicates there is no
 80 * room in the buffer to store $data we do not. The dependency will
 81 * order both of the stores after the loads. So no barrier is needed.
 82 *
 83 * (D) protects the load of the data to be observed to happen after the
 84 * store of the consumer pointer. If we did not have this memory
 85 * barrier, the producer could observe the consumer pointer being set
 86 * and overwrite the data with a new value before the consumer got the
 87 * chance to read the old value. The consumer would thus miss reading
 88 * the old entry and very likely read the new entry twice, once right
 89 * now and again after circling through the ring.
 90 */
 91
 92/* The operations on the rings are the following:
 93 *
 94 * producer                           consumer
 95 *
 96 * RESERVE entries                    PEEK in the ring for entries
 97 * WRITE data into the ring           READ data from the ring
 98 * SUBMIT entries                     RELEASE entries
 99 *
100 * The producer reserves one or more entries in the ring. It can then
101 * fill in these entries and finally submit them so that they can be
102 * seen and read by the consumer.
103 *
104 * The consumer peeks into the ring to see if the producer has written
105 * any new entries. If so, the consumer can then read these entries
106 * and when it is done reading them release them back to the producer
107 * so that the producer can use these slots to fill in new entries.
108 *
109 * The function names below reflect these operations.
110 */
111
112/* Functions that read and validate content from consumer rings. */
113
114static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
115{
116	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
 
 
 
 
117
 
 
118	if (q->cached_cons != q->cached_prod) {
119		u32 idx = q->cached_cons & q->ring_mask;
120
121		*addr = ring->desc[idx];
122		return true;
123	}
124
125	return false;
126}
127
128static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
129					    struct xdp_desc *desc)
130{
131	u64 chunk, chunk_end;
132
133	chunk = xp_aligned_extract_addr(pool, desc->addr);
134	if (likely(desc->len)) {
135		chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
136		if (chunk != chunk_end)
137			return false;
138	}
139
140	if (chunk >= pool->addrs_cnt)
141		return false;
142
143	if (desc->options)
144		return false;
145	return true;
146}
147
148static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
149					      struct xdp_desc *desc)
150{
151	u64 addr, base_addr;
152
153	base_addr = xp_unaligned_extract_addr(desc->addr);
154	addr = xp_unaligned_add_offset_to_addr(desc->addr);
155
156	if (desc->len > pool->chunk_size)
157		return false;
158
159	if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
160	    xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
161		return false;
162
163	if (desc->options)
164		return false;
165	return true;
166}
167
168static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
169				    struct xdp_desc *desc)
170{
171	return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
172		xp_aligned_validate_desc(pool, desc);
173}
174
175static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
176					   struct xdp_desc *d,
177					   struct xsk_buff_pool *pool)
178{
179	if (!xp_validate_desc(pool, d)) {
180		q->invalid_descs++;
181		return false;
182	}
183	return true;
184}
185
186static inline bool xskq_cons_read_desc(struct xsk_queue *q,
187				       struct xdp_desc *desc,
188				       struct xsk_buff_pool *pool)
189{
190	while (q->cached_cons != q->cached_prod) {
191		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
192		u32 idx = q->cached_cons & q->ring_mask;
193
194		*desc = ring->desc[idx];
195		if (xskq_cons_is_valid_desc(q, desc, pool))
196			return true;
197
198		q->cached_cons++;
199	}
200
201	return false;
202}
203
204static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
205					    struct xdp_desc *descs,
206					    struct xsk_buff_pool *pool, u32 max)
 
 
 
 
207{
208	u32 cached_cons = q->cached_cons, nb_entries = 0;
 
209
210	while (cached_cons != q->cached_prod && nb_entries < max) {
211		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
212		u32 idx = cached_cons & q->ring_mask;
213
214		descs[nb_entries] = ring->desc[idx];
215		if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
216			/* Skip the entry */
217			cached_cons++;
218			continue;
219		}
220
221		nb_entries++;
222		cached_cons++;
223	}
224
 
 
225	return nb_entries;
226}
227
228/* Functions for consumers */
229
230static inline void __xskq_cons_release(struct xsk_queue *q)
231{
232	smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
233}
234
235static inline void __xskq_cons_peek(struct xsk_queue *q)
236{
237	/* Refresh the local pointer */
238	q->cached_prod = smp_load_acquire(&q->ring->producer);  /* C, matches B */
239}
240
241static inline void xskq_cons_get_entries(struct xsk_queue *q)
242{
243	__xskq_cons_release(q);
244	__xskq_cons_peek(q);
245}
246
247static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
248{
249	u32 entries = q->cached_prod - q->cached_cons;
250
251	if (entries >= max)
252		return max;
253
254	__xskq_cons_peek(q);
255	entries = q->cached_prod - q->cached_cons;
256
257	return entries >= max ? max : entries;
258}
259
260static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
261{
262	return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false;
263}
264
265static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
266{
267	if (q->cached_prod == q->cached_cons)
268		xskq_cons_get_entries(q);
269	return xskq_cons_read_addr_unchecked(q, addr);
270}
271
272static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
273				       struct xdp_desc *desc,
274				       struct xsk_buff_pool *pool)
275{
276	if (q->cached_prod == q->cached_cons)
277		xskq_cons_get_entries(q);
278	return xskq_cons_read_desc(q, desc, pool);
279}
280
281static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs,
282					    struct xsk_buff_pool *pool, u32 max)
283{
284	u32 entries = xskq_cons_nb_entries(q, max);
285
286	return xskq_cons_read_desc_batch(q, descs, pool, entries);
287}
288
289/* To improve performance in the xskq_cons_release functions, only update local state here.
290 * Reflect this to global state when we get new entries from the ring in
291 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
292 */
293static inline void xskq_cons_release(struct xsk_queue *q)
294{
295	q->cached_cons++;
296}
297
298static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
299{
300	q->cached_cons += cnt;
301}
302
303static inline bool xskq_cons_is_full(struct xsk_queue *q)
304{
305	/* No barriers needed since data is not accessed */
306	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
307		q->nentries;
308}
309
310static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
311{
312	/* No barriers needed since data is not accessed */
313	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
314}
315
316/* Functions for producers */
317
318static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
319{
320	u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
321
322	if (free_entries >= max)
323		return max;
324
325	/* Refresh the local tail pointer */
326	q->cached_cons = READ_ONCE(q->ring->consumer);
327	free_entries = q->nentries - (q->cached_prod - q->cached_cons);
328
329	return free_entries >= max ? max : free_entries;
330}
331
332static inline bool xskq_prod_is_full(struct xsk_queue *q)
333{
334	return xskq_prod_nb_free(q, 1) ? false : true;
335}
336
337static inline void xskq_prod_cancel(struct xsk_queue *q)
338{
339	q->cached_prod--;
340}
341
342static inline int xskq_prod_reserve(struct xsk_queue *q)
343{
344	if (xskq_prod_is_full(q))
345		return -ENOSPC;
346
347	/* A, matches D */
348	q->cached_prod++;
349	return 0;
350}
351
352static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
353{
354	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
355
356	if (xskq_prod_is_full(q))
357		return -ENOSPC;
358
359	/* A, matches D */
360	ring->desc[q->cached_prod++ & q->ring_mask] = addr;
361	return 0;
362}
363
364static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
365					       u32 max)
366{
367	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
368	u32 nb_entries, i, cached_prod;
369
370	nb_entries = xskq_prod_nb_free(q, max);
371
372	/* A, matches D */
373	cached_prod = q->cached_prod;
374	for (i = 0; i < nb_entries; i++)
375		ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
376	q->cached_prod = cached_prod;
377
378	return nb_entries;
379}
380
381static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
382					 u64 addr, u32 len)
383{
384	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
385	u32 idx;
386
387	if (xskq_prod_is_full(q))
388		return -ENOSPC;
389
390	/* A, matches D */
391	idx = q->cached_prod++ & q->ring_mask;
392	ring->desc[idx].addr = addr;
393	ring->desc[idx].len = len;
394
395	return 0;
396}
397
398static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
399{
400	smp_store_release(&q->ring->producer, idx); /* B, matches C */
401}
402
403static inline void xskq_prod_submit(struct xsk_queue *q)
404{
405	__xskq_prod_submit(q, q->cached_prod);
406}
407
408static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
409{
410	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
411	u32 idx = q->ring->producer;
412
413	ring->desc[idx++ & q->ring_mask] = addr;
414
415	__xskq_prod_submit(q, idx);
416}
417
418static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
419{
420	__xskq_prod_submit(q, q->ring->producer + nb_entries);
421}
422
423static inline bool xskq_prod_is_empty(struct xsk_queue *q)
424{
425	/* No barriers needed since data is not accessed */
426	return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
427}
428
429/* For both producers and consumers */
430
431static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
432{
433	return q ? q->invalid_descs : 0;
434}
435
436static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
437{
438	return q ? q->queue_empty_descs : 0;
439}
440
441struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
442void xskq_destroy(struct xsk_queue *q_ops);
443
444#endif /* _LINUX_XSK_QUEUE_H */