Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef XSK_BUFF_POOL_H_
5#define XSK_BUFF_POOL_H_
6
7#include <linux/if_xdp.h>
8#include <linux/types.h>
9#include <linux/dma-mapping.h>
10#include <linux/bpf.h>
11#include <net/xdp.h>
12
13struct xsk_buff_pool;
14struct xdp_rxq_info;
15struct xsk_queue;
16struct xdp_desc;
17struct xdp_umem;
18struct xdp_sock;
19struct device;
20struct page;
21
22struct xdp_buff_xsk {
23 struct xdp_buff xdp;
24 dma_addr_t dma;
25 dma_addr_t frame_dma;
26 struct xsk_buff_pool *pool;
27 u64 orig_addr;
28 struct list_head free_list_node;
29};
30
31struct xsk_dma_map {
32 dma_addr_t *dma_pages;
33 struct device *dev;
34 struct net_device *netdev;
35 refcount_t users;
36 struct list_head list; /* Protected by the RTNL_LOCK */
37 u32 dma_pages_cnt;
38 bool dma_need_sync;
39};
40
41struct xsk_buff_pool {
42 /* Members only used in the control path first. */
43 struct device *dev;
44 struct net_device *netdev;
45 struct list_head xsk_tx_list;
46 /* Protects modifications to the xsk_tx_list */
47 spinlock_t xsk_tx_list_lock;
48 refcount_t users;
49 struct xdp_umem *umem;
50 struct work_struct work;
51 struct list_head free_list;
52 u32 heads_cnt;
53 u16 queue_id;
54
55 /* Data path members as close to free_heads at the end as possible. */
56 struct xsk_queue *fq ____cacheline_aligned_in_smp;
57 struct xsk_queue *cq;
58 /* For performance reasons, each buff pool has its own array of dma_pages
59 * even when they are identical.
60 */
61 dma_addr_t *dma_pages;
62 struct xdp_buff_xsk *heads;
63 struct xdp_desc *tx_descs;
64 u64 chunk_mask;
65 u64 addrs_cnt;
66 u32 free_list_cnt;
67 u32 dma_pages_cnt;
68 u32 free_heads_cnt;
69 u32 headroom;
70 u32 chunk_size;
71 u32 chunk_shift;
72 u32 frame_len;
73 u8 cached_need_wakeup;
74 bool uses_need_wakeup;
75 bool dma_need_sync;
76 bool unaligned;
77 void *addrs;
78 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
79 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
80 * sockets share a single cq when the same netdev and queue id is shared.
81 */
82 spinlock_t cq_lock;
83 struct xdp_buff_xsk *free_heads[];
84};
85
86/* Masks for xdp_umem_page flags.
87 * The low 12-bits of the addr will be 0 since this is the page address, so we
88 * can use them for flags.
89 */
90#define XSK_NEXT_PG_CONTIG_SHIFT 0
91#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
92
93/* AF_XDP core. */
94struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
95 struct xdp_umem *umem);
96int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
97 u16 queue_id, u16 flags);
98int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
99 struct net_device *dev, u16 queue_id);
100int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
101void xp_destroy(struct xsk_buff_pool *pool);
102void xp_get_pool(struct xsk_buff_pool *pool);
103bool xp_put_pool(struct xsk_buff_pool *pool);
104void xp_clear_dev(struct xsk_buff_pool *pool);
105void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
106void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
107
108/* AF_XDP, and XDP core. */
109void xp_free(struct xdp_buff_xsk *xskb);
110
111static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
112 u64 addr)
113{
114 xskb->orig_addr = addr;
115 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
116}
117
118static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
119 dma_addr_t *dma_pages, u64 addr)
120{
121 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
122 (addr & ~PAGE_MASK);
123 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
124}
125
126/* AF_XDP ZC drivers, via xdp_sock_buff.h */
127void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
128int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
129 unsigned long attrs, struct page **pages, u32 nr_pages);
130void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
131struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
132u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
133bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
134void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
135dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
136static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
137{
138 return xskb->dma;
139}
140
141static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
142{
143 return xskb->frame_dma;
144}
145
146void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
147static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
148{
149 xp_dma_sync_for_cpu_slow(xskb);
150}
151
152void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
153 size_t size);
154static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
155 dma_addr_t dma, size_t size)
156{
157 if (!pool->dma_need_sync)
158 return;
159
160 xp_dma_sync_for_device_slow(pool, dma, size);
161}
162
163/* Masks for xdp_umem_page flags.
164 * The low 12-bits of the addr will be 0 since this is the page address, so we
165 * can use them for flags.
166 */
167#define XSK_NEXT_PG_CONTIG_SHIFT 0
168#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
169
170static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
171 u64 addr, u32 len)
172{
173 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
174
175 if (likely(!cross_pg))
176 return false;
177
178 if (pool->dma_pages_cnt) {
179 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
180 XSK_NEXT_PG_CONTIG_MASK);
181 }
182
183 /* skb path */
184 return addr + len > pool->addrs_cnt;
185}
186
187static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
188{
189 return addr & pool->chunk_mask;
190}
191
192static inline u64 xp_unaligned_extract_addr(u64 addr)
193{
194 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
195}
196
197static inline u64 xp_unaligned_extract_offset(u64 addr)
198{
199 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
200}
201
202static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
203{
204 return xp_unaligned_extract_addr(addr) +
205 xp_unaligned_extract_offset(addr);
206}
207
208static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
209{
210 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
211}
212
213static inline void xp_release(struct xdp_buff_xsk *xskb)
214{
215 if (xskb->pool->unaligned)
216 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
217}
218
219static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
220{
221 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
222
223 offset += xskb->pool->headroom;
224 if (!xskb->pool->unaligned)
225 return xskb->orig_addr + offset;
226 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
227}
228
229#endif /* XSK_BUFF_POOL_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef XSK_BUFF_POOL_H_
5#define XSK_BUFF_POOL_H_
6
7#include <linux/if_xdp.h>
8#include <linux/types.h>
9#include <linux/dma-mapping.h>
10#include <linux/bpf.h>
11#include <net/xdp.h>
12
13struct xsk_buff_pool;
14struct xdp_rxq_info;
15struct xsk_cb_desc;
16struct xsk_queue;
17struct xdp_desc;
18struct xdp_umem;
19struct xdp_sock;
20struct device;
21struct page;
22
23#define XSK_PRIV_MAX 24
24
25struct xdp_buff_xsk {
26 struct xdp_buff xdp;
27 u8 cb[XSK_PRIV_MAX];
28 dma_addr_t dma;
29 dma_addr_t frame_dma;
30 struct xsk_buff_pool *pool;
31 struct list_head list_node;
32};
33
34#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
35#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
36
37struct xsk_dma_map {
38 dma_addr_t *dma_pages;
39 struct device *dev;
40 struct net_device *netdev;
41 refcount_t users;
42 struct list_head list; /* Protected by the RTNL_LOCK */
43 u32 dma_pages_cnt;
44};
45
46struct xsk_buff_pool {
47 /* Members only used in the control path first. */
48 struct device *dev;
49 struct net_device *netdev;
50 struct list_head xsk_tx_list;
51 /* Protects modifications to the xsk_tx_list */
52 spinlock_t xsk_tx_list_lock;
53 refcount_t users;
54 struct xdp_umem *umem;
55 struct work_struct work;
56 struct list_head free_list;
57 struct list_head xskb_list;
58 u32 heads_cnt;
59 u16 queue_id;
60
61 /* Data path members as close to free_heads at the end as possible. */
62 struct xsk_queue *fq ____cacheline_aligned_in_smp;
63 struct xsk_queue *cq;
64 /* For performance reasons, each buff pool has its own array of dma_pages
65 * even when they are identical.
66 */
67 dma_addr_t *dma_pages;
68 struct xdp_buff_xsk *heads;
69 struct xdp_desc *tx_descs;
70 u64 chunk_mask;
71 u64 addrs_cnt;
72 u32 free_list_cnt;
73 u32 dma_pages_cnt;
74 u32 free_heads_cnt;
75 u32 headroom;
76 u32 chunk_size;
77 u32 chunk_shift;
78 u32 frame_len;
79 u32 xdp_zc_max_segs;
80 u8 tx_metadata_len; /* inherited from umem */
81 u8 cached_need_wakeup;
82 bool uses_need_wakeup;
83 bool unaligned;
84 bool tx_sw_csum;
85 void *addrs;
86 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
87 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
88 * sockets share a single cq when the same netdev and queue id is shared.
89 */
90 spinlock_t cq_lock;
91 struct xdp_buff_xsk *free_heads[];
92};
93
94/* Masks for xdp_umem_page flags.
95 * The low 12-bits of the addr will be 0 since this is the page address, so we
96 * can use them for flags.
97 */
98#define XSK_NEXT_PG_CONTIG_SHIFT 0
99#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
100
101/* AF_XDP core. */
102struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
103 struct xdp_umem *umem);
104int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
105 u16 queue_id, u16 flags);
106int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
107 struct net_device *dev, u16 queue_id);
108int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
109void xp_destroy(struct xsk_buff_pool *pool);
110void xp_get_pool(struct xsk_buff_pool *pool);
111bool xp_put_pool(struct xsk_buff_pool *pool);
112void xp_clear_dev(struct xsk_buff_pool *pool);
113void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
114void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
115
116/* AF_XDP, and XDP core. */
117void xp_free(struct xdp_buff_xsk *xskb);
118
119static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
120 u64 addr)
121{
122 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
123}
124
125static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
126 dma_addr_t *dma_pages, u64 addr)
127{
128 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
129 (addr & ~PAGE_MASK);
130 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
131}
132
133/* AF_XDP ZC drivers, via xdp_sock_buff.h */
134void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
135void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
136int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
137 unsigned long attrs, struct page **pages, u32 nr_pages);
138void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
139struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
140u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
141bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
142void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
143dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
144static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
145{
146 return xskb->dma;
147}
148
149static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
150{
151 return xskb->frame_dma;
152}
153
154static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
155{
156 dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma,
157 xskb->pool->frame_len,
158 DMA_BIDIRECTIONAL);
159}
160
161static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
162 dma_addr_t dma, size_t size)
163{
164 dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
165}
166
167/* Masks for xdp_umem_page flags.
168 * The low 12-bits of the addr will be 0 since this is the page address, so we
169 * can use them for flags.
170 */
171#define XSK_NEXT_PG_CONTIG_SHIFT 0
172#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
173
174static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
175 u64 addr, u32 len)
176{
177 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
178
179 if (likely(!cross_pg))
180 return false;
181
182 return pool->dma_pages &&
183 !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
184}
185
186static inline bool xp_mb_desc(struct xdp_desc *desc)
187{
188 return desc->options & XDP_PKT_CONTD;
189}
190
191static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
192{
193 return addr & pool->chunk_mask;
194}
195
196static inline u64 xp_unaligned_extract_addr(u64 addr)
197{
198 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
199}
200
201static inline u64 xp_unaligned_extract_offset(u64 addr)
202{
203 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
204}
205
206static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
207{
208 return xp_unaligned_extract_addr(addr) +
209 xp_unaligned_extract_offset(addr);
210}
211
212static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
213{
214 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
215}
216
217static inline void xp_release(struct xdp_buff_xsk *xskb)
218{
219 if (xskb->pool->unaligned)
220 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
221}
222
223static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
224 struct xsk_buff_pool *pool)
225{
226 u64 orig_addr = xskb->xdp.data - pool->addrs;
227 u64 offset;
228
229 if (!pool->unaligned)
230 return orig_addr;
231
232 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
233 orig_addr -= offset;
234 offset += pool->headroom;
235 return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
236}
237
238static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
239{
240 return pool->tx_metadata_len > 0;
241}
242
243#endif /* XSK_BUFF_POOL_H_ */