Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright(c) 2020 Intel Corporation. */
  3
  4#ifndef XSK_BUFF_POOL_H_
  5#define XSK_BUFF_POOL_H_
  6
  7#include <linux/if_xdp.h>
  8#include <linux/types.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/bpf.h>
 11#include <net/xdp.h>
 12
 13struct xsk_buff_pool;
 14struct xdp_rxq_info;
 15struct xsk_cb_desc;
 16struct xsk_queue;
 17struct xdp_desc;
 18struct xdp_umem;
 19struct xdp_sock;
 20struct device;
 21struct page;
 22
 23#define XSK_PRIV_MAX 24
 24
 25struct xdp_buff_xsk {
 26	struct xdp_buff xdp;
 27	u8 cb[XSK_PRIV_MAX];
 28	dma_addr_t dma;
 29	dma_addr_t frame_dma;
 30	struct xsk_buff_pool *pool;
 31	struct list_head list_node;
 32};
 33
 34#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
 35#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
 36
 37struct xsk_dma_map {
 38	dma_addr_t *dma_pages;
 39	struct device *dev;
 40	struct net_device *netdev;
 41	refcount_t users;
 42	struct list_head list; /* Protected by the RTNL_LOCK */
 43	u32 dma_pages_cnt;
 44};
 45
 46struct xsk_buff_pool {
 47	/* Members only used in the control path first. */
 48	struct device *dev;
 49	struct net_device *netdev;
 50	struct list_head xsk_tx_list;
 51	/* Protects modifications to the xsk_tx_list */
 52	spinlock_t xsk_tx_list_lock;
 53	refcount_t users;
 54	struct xdp_umem *umem;
 55	struct work_struct work;
 56	struct list_head free_list;
 57	struct list_head xskb_list;
 58	u32 heads_cnt;
 59	u16 queue_id;
 60
 61	/* Data path members as close to free_heads at the end as possible. */
 62	struct xsk_queue *fq ____cacheline_aligned_in_smp;
 63	struct xsk_queue *cq;
 64	/* For performance reasons, each buff pool has its own array of dma_pages
 65	 * even when they are identical.
 66	 */
 67	dma_addr_t *dma_pages;
 68	struct xdp_buff_xsk *heads;
 69	struct xdp_desc *tx_descs;
 70	u64 chunk_mask;
 71	u64 addrs_cnt;
 72	u32 free_list_cnt;
 73	u32 dma_pages_cnt;
 74	u32 free_heads_cnt;
 75	u32 headroom;
 76	u32 chunk_size;
 77	u32 chunk_shift;
 78	u32 frame_len;
 79	u32 xdp_zc_max_segs;
 80	u8 tx_metadata_len; /* inherited from umem */
 81	u8 cached_need_wakeup;
 82	bool uses_need_wakeup;
 83	bool unaligned;
 84	bool tx_sw_csum;
 85	void *addrs;
 86	/* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
 87	 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
 88	 * sockets share a single cq when the same netdev and queue id is shared.
 89	 */
 90	spinlock_t cq_lock;
 91	struct xdp_buff_xsk *free_heads[];
 92};
 93
 94/* Masks for xdp_umem_page flags.
 95 * The low 12-bits of the addr will be 0 since this is the page address, so we
 96 * can use them for flags.
 97 */
 98#define XSK_NEXT_PG_CONTIG_SHIFT 0
 99#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
100
101/* AF_XDP core. */
102struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
103						struct xdp_umem *umem);
104int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
105		  u16 queue_id, u16 flags);
106int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
107			 struct net_device *dev, u16 queue_id);
108int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
109void xp_destroy(struct xsk_buff_pool *pool);
110void xp_get_pool(struct xsk_buff_pool *pool);
111bool xp_put_pool(struct xsk_buff_pool *pool);
112void xp_clear_dev(struct xsk_buff_pool *pool);
113void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
114void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
115
116/* AF_XDP, and XDP core. */
117void xp_free(struct xdp_buff_xsk *xskb);
118
119static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
120				     u64 addr)
121{
122	xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
123}
124
125static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
126				    dma_addr_t *dma_pages, u64 addr)
127{
128	xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
129		(addr & ~PAGE_MASK);
130	xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
131}
132
133/* AF_XDP ZC drivers, via xdp_sock_buff.h */
134void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
135void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
136int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
137	       unsigned long attrs, struct page **pages, u32 nr_pages);
138void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
139struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
140u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
141bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
142void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
143dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
144static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
145{
146	return xskb->dma;
147}
148
149static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
150{
151	return xskb->frame_dma;
152}
153
154static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
155{
156	dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma,
157				xskb->pool->frame_len,
158				DMA_BIDIRECTIONAL);
159}
160
161static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
162					  dma_addr_t dma, size_t size)
163{
164	dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
165}
166
167/* Masks for xdp_umem_page flags.
168 * The low 12-bits of the addr will be 0 since this is the page address, so we
169 * can use them for flags.
170 */
171#define XSK_NEXT_PG_CONTIG_SHIFT 0
172#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
173
174static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
175						 u64 addr, u32 len)
176{
177	bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
178
179	if (likely(!cross_pg))
180		return false;
181
182	return pool->dma_pages &&
183	       !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
184}
185
186static inline bool xp_mb_desc(struct xdp_desc *desc)
187{
188	return desc->options & XDP_PKT_CONTD;
189}
190
191static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
192{
193	return addr & pool->chunk_mask;
194}
195
196static inline u64 xp_unaligned_extract_addr(u64 addr)
197{
198	return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
199}
200
201static inline u64 xp_unaligned_extract_offset(u64 addr)
202{
203	return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
204}
205
206static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
207{
208	return xp_unaligned_extract_addr(addr) +
209		xp_unaligned_extract_offset(addr);
210}
211
212static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
213{
214	return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
215}
216
217static inline void xp_release(struct xdp_buff_xsk *xskb)
218{
219	if (xskb->pool->unaligned)
220		xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
221}
222
223static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
224				struct xsk_buff_pool *pool)
225{
226	u64 orig_addr = xskb->xdp.data - pool->addrs;
227	u64 offset;
228
229	if (!pool->unaligned)
230		return orig_addr;
231
232	offset = xskb->xdp.data - xskb->xdp.data_hard_start;
233	orig_addr -= offset;
234	offset += pool->headroom;
235	return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
236}
237
238static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
239{
240	return pool->tx_metadata_len > 0;
241}
242
243#endif /* XSK_BUFF_POOL_H_ */