Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef XSK_BUFF_POOL_H_
5#define XSK_BUFF_POOL_H_
6
7#include <linux/if_xdp.h>
8#include <linux/types.h>
9#include <linux/dma-mapping.h>
10#include <net/xdp.h>
11
12struct xsk_buff_pool;
13struct xdp_rxq_info;
14struct xsk_queue;
15struct xdp_desc;
16struct xdp_umem;
17struct xdp_sock;
18struct device;
19struct page;
20
21struct xdp_buff_xsk {
22 struct xdp_buff xdp;
23 dma_addr_t dma;
24 dma_addr_t frame_dma;
25 struct xsk_buff_pool *pool;
26 bool unaligned;
27 u64 orig_addr;
28 struct list_head free_list_node;
29};
30
31struct xsk_dma_map {
32 dma_addr_t *dma_pages;
33 struct device *dev;
34 struct net_device *netdev;
35 refcount_t users;
36 struct list_head list; /* Protected by the RTNL_LOCK */
37 u32 dma_pages_cnt;
38 bool dma_need_sync;
39};
40
41struct xsk_buff_pool {
42 /* Members only used in the control path first. */
43 struct device *dev;
44 struct net_device *netdev;
45 struct list_head xsk_tx_list;
46 /* Protects modifications to the xsk_tx_list */
47 spinlock_t xsk_tx_list_lock;
48 refcount_t users;
49 struct xdp_umem *umem;
50 struct work_struct work;
51 struct list_head free_list;
52 u32 heads_cnt;
53 u16 queue_id;
54
55 /* Data path members as close to free_heads at the end as possible. */
56 struct xsk_queue *fq ____cacheline_aligned_in_smp;
57 struct xsk_queue *cq;
58 /* For performance reasons, each buff pool has its own array of dma_pages
59 * even when they are identical.
60 */
61 dma_addr_t *dma_pages;
62 struct xdp_buff_xsk *heads;
63 u64 chunk_mask;
64 u64 addrs_cnt;
65 u32 free_list_cnt;
66 u32 dma_pages_cnt;
67 u32 free_heads_cnt;
68 u32 headroom;
69 u32 chunk_size;
70 u32 frame_len;
71 u8 cached_need_wakeup;
72 bool uses_need_wakeup;
73 bool dma_need_sync;
74 bool unaligned;
75 void *addrs;
76 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
77 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
78 * sockets share a single cq when the same netdev and queue id is shared.
79 */
80 spinlock_t cq_lock;
81 struct xdp_buff_xsk *free_heads[];
82};
83
84/* AF_XDP core. */
85struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
86 struct xdp_umem *umem);
87int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
88 u16 queue_id, u16 flags);
89int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
90 struct net_device *dev, u16 queue_id);
91void xp_destroy(struct xsk_buff_pool *pool);
92void xp_release(struct xdp_buff_xsk *xskb);
93void xp_get_pool(struct xsk_buff_pool *pool);
94bool xp_put_pool(struct xsk_buff_pool *pool);
95void xp_clear_dev(struct xsk_buff_pool *pool);
96void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
97void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
98
99/* AF_XDP, and XDP core. */
100void xp_free(struct xdp_buff_xsk *xskb);
101
102/* AF_XDP ZC drivers, via xdp_sock_buff.h */
103void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
104int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
105 unsigned long attrs, struct page **pages, u32 nr_pages);
106void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
107struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
108bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
109void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
110dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
111static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
112{
113 return xskb->dma;
114}
115
116static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
117{
118 return xskb->frame_dma;
119}
120
121void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
122static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
123{
124 xp_dma_sync_for_cpu_slow(xskb);
125}
126
127void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
128 size_t size);
129static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
130 dma_addr_t dma, size_t size)
131{
132 if (!pool->dma_need_sync)
133 return;
134
135 xp_dma_sync_for_device_slow(pool, dma, size);
136}
137
138/* Masks for xdp_umem_page flags.
139 * The low 12-bits of the addr will be 0 since this is the page address, so we
140 * can use them for flags.
141 */
142#define XSK_NEXT_PG_CONTIG_SHIFT 0
143#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
144
145static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
146 u64 addr, u32 len)
147{
148 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
149
150 if (likely(!cross_pg))
151 return false;
152
153 if (pool->dma_pages_cnt) {
154 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
155 XSK_NEXT_PG_CONTIG_MASK);
156 }
157
158 /* skb path */
159 return addr + len > pool->addrs_cnt;
160}
161
162static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
163{
164 return addr & pool->chunk_mask;
165}
166
167static inline u64 xp_unaligned_extract_addr(u64 addr)
168{
169 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
170}
171
172static inline u64 xp_unaligned_extract_offset(u64 addr)
173{
174 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
175}
176
177static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
178{
179 return xp_unaligned_extract_addr(addr) +
180 xp_unaligned_extract_offset(addr);
181}
182
183#endif /* XSK_BUFF_POOL_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef XSK_BUFF_POOL_H_
5#define XSK_BUFF_POOL_H_
6
7#include <linux/if_xdp.h>
8#include <linux/types.h>
9#include <linux/dma-mapping.h>
10#include <net/xdp.h>
11
12struct xsk_buff_pool;
13struct xdp_rxq_info;
14struct xsk_queue;
15struct xdp_desc;
16struct device;
17struct page;
18
19struct xdp_buff_xsk {
20 struct xdp_buff xdp;
21 dma_addr_t dma;
22 dma_addr_t frame_dma;
23 struct xsk_buff_pool *pool;
24 bool unaligned;
25 u64 orig_addr;
26 struct list_head free_list_node;
27};
28
29struct xsk_buff_pool {
30 struct xsk_queue *fq;
31 struct list_head free_list;
32 dma_addr_t *dma_pages;
33 struct xdp_buff_xsk *heads;
34 u64 chunk_mask;
35 u64 addrs_cnt;
36 u32 free_list_cnt;
37 u32 dma_pages_cnt;
38 u32 heads_cnt;
39 u32 free_heads_cnt;
40 u32 headroom;
41 u32 chunk_size;
42 u32 frame_len;
43 bool dma_need_sync;
44 bool unaligned;
45 void *addrs;
46 struct device *dev;
47 struct xdp_buff_xsk *free_heads[];
48};
49
50/* AF_XDP core. */
51struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
52 u32 chunk_size, u32 headroom, u64 size,
53 bool unaligned);
54void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
55void xp_destroy(struct xsk_buff_pool *pool);
56void xp_release(struct xdp_buff_xsk *xskb);
57
58/* AF_XDP, and XDP core. */
59void xp_free(struct xdp_buff_xsk *xskb);
60
61/* AF_XDP ZC drivers, via xdp_sock_buff.h */
62void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
63int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
64 unsigned long attrs, struct page **pages, u32 nr_pages);
65void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
66struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
67bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
68void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
69dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
70static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
71{
72 return xskb->dma;
73}
74
75static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
76{
77 return xskb->frame_dma;
78}
79
80void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
81static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
82{
83 if (!xskb->pool->dma_need_sync)
84 return;
85
86 xp_dma_sync_for_cpu_slow(xskb);
87}
88
89void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
90 size_t size);
91static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
92 dma_addr_t dma, size_t size)
93{
94 if (!pool->dma_need_sync)
95 return;
96
97 xp_dma_sync_for_device_slow(pool, dma, size);
98}
99
100/* Masks for xdp_umem_page flags.
101 * The low 12-bits of the addr will be 0 since this is the page address, so we
102 * can use them for flags.
103 */
104#define XSK_NEXT_PG_CONTIG_SHIFT 0
105#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
106
107static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
108 u64 addr, u32 len)
109{
110 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
111
112 if (pool->dma_pages_cnt && cross_pg) {
113 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
114 XSK_NEXT_PG_CONTIG_MASK);
115 }
116 return false;
117}
118
119static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
120{
121 return addr & pool->chunk_mask;
122}
123
124static inline u64 xp_unaligned_extract_addr(u64 addr)
125{
126 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
127}
128
129static inline u64 xp_unaligned_extract_offset(u64 addr)
130{
131 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
132}
133
134static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
135{
136 return xp_unaligned_extract_addr(addr) +
137 xp_unaligned_extract_offset(addr);
138}
139
140#endif /* XSK_BUFF_POOL_H_ */