Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/idr.h>
17#include <linux/vmalloc.h>
18
19#include "xdp_umem.h"
20#include "xsk_queue.h"
21
22static DEFINE_IDA(umem_ida);
23
24static void xdp_umem_unpin_pages(struct xdp_umem *umem)
25{
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
27
28 kvfree(umem->pgs);
29 umem->pgs = NULL;
30}
31
32static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
33{
34 if (umem->user) {
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
36 free_uid(umem->user);
37 }
38}
39
40static void xdp_umem_addr_unmap(struct xdp_umem *umem)
41{
42 vunmap(umem->addrs);
43 umem->addrs = NULL;
44}
45
46static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
47 u32 nr_pages)
48{
49 umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
50 if (!umem->addrs)
51 return -ENOMEM;
52 return 0;
53}
54
55static void xdp_umem_release(struct xdp_umem *umem)
56{
57 umem->zc = false;
58 ida_free(&umem_ida, umem->id);
59
60 xdp_umem_addr_unmap(umem);
61 xdp_umem_unpin_pages(umem);
62
63 xdp_umem_unaccount_pages(umem);
64 kfree(umem);
65}
66
67static void xdp_umem_release_deferred(struct work_struct *work)
68{
69 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
70
71 xdp_umem_release(umem);
72}
73
74void xdp_get_umem(struct xdp_umem *umem)
75{
76 refcount_inc(&umem->users);
77}
78
79void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
80{
81 if (!umem)
82 return;
83
84 if (refcount_dec_and_test(&umem->users)) {
85 if (defer_cleanup) {
86 INIT_WORK(&umem->work, xdp_umem_release_deferred);
87 schedule_work(&umem->work);
88 } else {
89 xdp_umem_release(umem);
90 }
91 }
92}
93
94static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
95{
96 unsigned int gup_flags = FOLL_WRITE;
97 long npgs;
98 int err;
99
100 umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN);
101 if (!umem->pgs)
102 return -ENOMEM;
103
104 mmap_read_lock(current->mm);
105 npgs = pin_user_pages(address, umem->npgs,
106 gup_flags | FOLL_LONGTERM, &umem->pgs[0]);
107 mmap_read_unlock(current->mm);
108
109 if (npgs != umem->npgs) {
110 if (npgs >= 0) {
111 umem->npgs = npgs;
112 err = -ENOMEM;
113 goto out_pin;
114 }
115 err = npgs;
116 goto out_pgs;
117 }
118 return 0;
119
120out_pin:
121 xdp_umem_unpin_pages(umem);
122out_pgs:
123 kvfree(umem->pgs);
124 umem->pgs = NULL;
125 return err;
126}
127
128static int xdp_umem_account_pages(struct xdp_umem *umem)
129{
130 unsigned long lock_limit, new_npgs, old_npgs;
131
132 if (capable(CAP_IPC_LOCK))
133 return 0;
134
135 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
136 umem->user = get_uid(current_user());
137
138 do {
139 old_npgs = atomic_long_read(&umem->user->locked_vm);
140 new_npgs = old_npgs + umem->npgs;
141 if (new_npgs > lock_limit) {
142 free_uid(umem->user);
143 umem->user = NULL;
144 return -ENOBUFS;
145 }
146 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
147 new_npgs) != old_npgs);
148 return 0;
149}
150
151#define XDP_UMEM_FLAGS_VALID ( \
152 XDP_UMEM_UNALIGNED_CHUNK_FLAG | \
153 XDP_UMEM_TX_SW_CSUM | \
154 0)
155
156static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
157{
158 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
159 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
160 u64 addr = mr->addr, size = mr->len;
161 u32 chunks_rem, npgs_rem;
162 u64 chunks, npgs;
163 int err;
164
165 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
166 /* Strictly speaking we could support this, if:
167 * - huge pages, or*
168 * - using an IOMMU, or
169 * - making sure the memory area is consecutive
170 * but for now, we simply say "computer says no".
171 */
172 return -EINVAL;
173 }
174
175 if (mr->flags & ~XDP_UMEM_FLAGS_VALID)
176 return -EINVAL;
177
178 if (!unaligned_chunks && !is_power_of_2(chunk_size))
179 return -EINVAL;
180
181 if (!PAGE_ALIGNED(addr)) {
182 /* Memory area has to be page size aligned. For
183 * simplicity, this might change.
184 */
185 return -EINVAL;
186 }
187
188 if ((addr + size) < addr)
189 return -EINVAL;
190
191 npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
192 if (npgs_rem)
193 npgs++;
194 if (npgs > U32_MAX)
195 return -EINVAL;
196
197 chunks = div_u64_rem(size, chunk_size, &chunks_rem);
198 if (!chunks || chunks > U32_MAX)
199 return -EINVAL;
200
201 if (!unaligned_chunks && chunks_rem)
202 return -EINVAL;
203
204 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
205 return -EINVAL;
206
207 if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8)
208 return -EINVAL;
209
210 umem->size = size;
211 umem->headroom = headroom;
212 umem->chunk_size = chunk_size;
213 umem->chunks = chunks;
214 umem->npgs = npgs;
215 umem->pgs = NULL;
216 umem->user = NULL;
217 umem->flags = mr->flags;
218 umem->tx_metadata_len = mr->tx_metadata_len;
219
220 INIT_LIST_HEAD(&umem->xsk_dma_list);
221 refcount_set(&umem->users, 1);
222
223 err = xdp_umem_account_pages(umem);
224 if (err)
225 return err;
226
227 err = xdp_umem_pin_pages(umem, (unsigned long)addr);
228 if (err)
229 goto out_account;
230
231 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
232 if (err)
233 goto out_unpin;
234
235 return 0;
236
237out_unpin:
238 xdp_umem_unpin_pages(umem);
239out_account:
240 xdp_umem_unaccount_pages(umem);
241 return err;
242}
243
244struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
245{
246 struct xdp_umem *umem;
247 int err;
248
249 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
250 if (!umem)
251 return ERR_PTR(-ENOMEM);
252
253 err = ida_alloc(&umem_ida, GFP_KERNEL);
254 if (err < 0) {
255 kfree(umem);
256 return ERR_PTR(err);
257 }
258 umem->id = err;
259
260 err = xdp_umem_reg(umem, mr);
261 if (err) {
262 ida_free(&umem_ida, umem->id);
263 kfree(umem);
264 return ERR_PTR(err);
265 }
266
267 return umem;
268}
1// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/idr.h>
17#include <linux/vmalloc.h>
18
19#include "xdp_umem.h"
20#include "xsk_queue.h"
21
22#define XDP_UMEM_MIN_CHUNK_SIZE 2048
23
24static DEFINE_IDA(umem_ida);
25
26static void xdp_umem_unpin_pages(struct xdp_umem *umem)
27{
28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
29
30 kvfree(umem->pgs);
31 umem->pgs = NULL;
32}
33
34static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
35{
36 if (umem->user) {
37 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
38 free_uid(umem->user);
39 }
40}
41
42static void xdp_umem_addr_unmap(struct xdp_umem *umem)
43{
44 vunmap(umem->addrs);
45 umem->addrs = NULL;
46}
47
48static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
49 u32 nr_pages)
50{
51 umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
52 if (!umem->addrs)
53 return -ENOMEM;
54 return 0;
55}
56
57static void xdp_umem_release(struct xdp_umem *umem)
58{
59 umem->zc = false;
60 ida_simple_remove(&umem_ida, umem->id);
61
62 xdp_umem_addr_unmap(umem);
63 xdp_umem_unpin_pages(umem);
64
65 xdp_umem_unaccount_pages(umem);
66 kfree(umem);
67}
68
69static void xdp_umem_release_deferred(struct work_struct *work)
70{
71 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
72
73 xdp_umem_release(umem);
74}
75
76void xdp_get_umem(struct xdp_umem *umem)
77{
78 refcount_inc(&umem->users);
79}
80
81void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
82{
83 if (!umem)
84 return;
85
86 if (refcount_dec_and_test(&umem->users)) {
87 if (defer_cleanup) {
88 INIT_WORK(&umem->work, xdp_umem_release_deferred);
89 schedule_work(&umem->work);
90 } else {
91 xdp_umem_release(umem);
92 }
93 }
94}
95
96static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
97{
98 unsigned int gup_flags = FOLL_WRITE;
99 long npgs;
100 int err;
101
102 umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN);
103 if (!umem->pgs)
104 return -ENOMEM;
105
106 mmap_read_lock(current->mm);
107 npgs = pin_user_pages(address, umem->npgs,
108 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
109 mmap_read_unlock(current->mm);
110
111 if (npgs != umem->npgs) {
112 if (npgs >= 0) {
113 umem->npgs = npgs;
114 err = -ENOMEM;
115 goto out_pin;
116 }
117 err = npgs;
118 goto out_pgs;
119 }
120 return 0;
121
122out_pin:
123 xdp_umem_unpin_pages(umem);
124out_pgs:
125 kvfree(umem->pgs);
126 umem->pgs = NULL;
127 return err;
128}
129
130static int xdp_umem_account_pages(struct xdp_umem *umem)
131{
132 unsigned long lock_limit, new_npgs, old_npgs;
133
134 if (capable(CAP_IPC_LOCK))
135 return 0;
136
137 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
138 umem->user = get_uid(current_user());
139
140 do {
141 old_npgs = atomic_long_read(&umem->user->locked_vm);
142 new_npgs = old_npgs + umem->npgs;
143 if (new_npgs > lock_limit) {
144 free_uid(umem->user);
145 umem->user = NULL;
146 return -ENOBUFS;
147 }
148 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
149 new_npgs) != old_npgs);
150 return 0;
151}
152
153static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
154{
155 u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
156 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
157 u64 npgs, addr = mr->addr, size = mr->len;
158 unsigned int chunks, chunks_rem;
159 int err;
160
161 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
162 /* Strictly speaking we could support this, if:
163 * - huge pages, or*
164 * - using an IOMMU, or
165 * - making sure the memory area is consecutive
166 * but for now, we simply say "computer says no".
167 */
168 return -EINVAL;
169 }
170
171 if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
172 return -EINVAL;
173
174 if (!unaligned_chunks && !is_power_of_2(chunk_size))
175 return -EINVAL;
176
177 if (!PAGE_ALIGNED(addr)) {
178 /* Memory area has to be page size aligned. For
179 * simplicity, this might change.
180 */
181 return -EINVAL;
182 }
183
184 if ((addr + size) < addr)
185 return -EINVAL;
186
187 npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
188 if (npgs_rem)
189 npgs++;
190 if (npgs > U32_MAX)
191 return -EINVAL;
192
193 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
194 if (chunks == 0)
195 return -EINVAL;
196
197 if (!unaligned_chunks && chunks_rem)
198 return -EINVAL;
199
200 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
201 return -EINVAL;
202
203 umem->size = size;
204 umem->headroom = headroom;
205 umem->chunk_size = chunk_size;
206 umem->chunks = chunks;
207 umem->npgs = (u32)npgs;
208 umem->pgs = NULL;
209 umem->user = NULL;
210 umem->flags = mr->flags;
211
212 INIT_LIST_HEAD(&umem->xsk_dma_list);
213 refcount_set(&umem->users, 1);
214
215 err = xdp_umem_account_pages(umem);
216 if (err)
217 return err;
218
219 err = xdp_umem_pin_pages(umem, (unsigned long)addr);
220 if (err)
221 goto out_account;
222
223 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
224 if (err)
225 goto out_unpin;
226
227 return 0;
228
229out_unpin:
230 xdp_umem_unpin_pages(umem);
231out_account:
232 xdp_umem_unaccount_pages(umem);
233 return err;
234}
235
236struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
237{
238 struct xdp_umem *umem;
239 int err;
240
241 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
242 if (!umem)
243 return ERR_PTR(-ENOMEM);
244
245 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
246 if (err < 0) {
247 kfree(umem);
248 return ERR_PTR(err);
249 }
250 umem->id = err;
251
252 err = xdp_umem_reg(umem, mr);
253 if (err) {
254 ida_simple_remove(&umem_ida, umem->id);
255 kfree(umem);
256 return ERR_PTR(err);
257 }
258
259 return umem;
260}