Loading...
1/*
2 * Copyright (c) 2016 Citrix Systems Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Softare Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * IN THE SOFTWARE.
27 */
28
29#define XEN_NETIF_DEFINE_TOEPLITZ
30
31#include "common.h"
32#include <linux/vmalloc.h>
33#include <linux/rculist.h>
34
35static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
36 unsigned int len, u32 val)
37{
38 struct xenvif_hash_cache_entry *new, *entry, *oldest;
39 unsigned long flags;
40 bool found;
41
42 new = kmalloc(sizeof(*entry), GFP_ATOMIC);
43 if (!new)
44 return;
45
46 memcpy(new->tag, tag, len);
47 new->len = len;
48 new->val = val;
49
50 spin_lock_irqsave(&vif->hash.cache.lock, flags);
51
52 found = false;
53 oldest = NULL;
54 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
55 lockdep_is_held(&vif->hash.cache.lock)) {
56 /* Make sure we don't add duplicate entries */
57 if (entry->len == len &&
58 memcmp(entry->tag, tag, len) == 0)
59 found = true;
60 if (!oldest || entry->seq < oldest->seq)
61 oldest = entry;
62 }
63
64 if (!found) {
65 new->seq = atomic_inc_return(&vif->hash.cache.seq);
66 list_add_rcu(&new->link, &vif->hash.cache.list);
67
68 if (++vif->hash.cache.count > xenvif_hash_cache_size) {
69 list_del_rcu(&oldest->link);
70 vif->hash.cache.count--;
71 kfree_rcu(oldest, rcu);
72 }
73 }
74
75 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
76
77 if (found)
78 kfree(new);
79}
80
81static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
82 unsigned int len)
83{
84 u32 val;
85
86 val = xen_netif_toeplitz_hash(vif->hash.key,
87 sizeof(vif->hash.key),
88 data, len);
89
90 if (xenvif_hash_cache_size != 0)
91 xenvif_add_hash(vif, data, len, val);
92
93 return val;
94}
95
96static void xenvif_flush_hash(struct xenvif *vif)
97{
98 struct xenvif_hash_cache_entry *entry;
99 unsigned long flags;
100
101 if (xenvif_hash_cache_size == 0)
102 return;
103
104 spin_lock_irqsave(&vif->hash.cache.lock, flags);
105
106 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
107 lockdep_is_held(&vif->hash.cache.lock)) {
108 list_del_rcu(&entry->link);
109 vif->hash.cache.count--;
110 kfree_rcu(entry, rcu);
111 }
112
113 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
114}
115
116static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
117 unsigned int len)
118{
119 struct xenvif_hash_cache_entry *entry;
120 u32 val;
121 bool found;
122
123 if (len >= XEN_NETBK_HASH_TAG_SIZE)
124 return 0;
125
126 if (xenvif_hash_cache_size == 0)
127 return xenvif_new_hash(vif, data, len);
128
129 rcu_read_lock();
130
131 found = false;
132
133 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
134 if (entry->len == len &&
135 memcmp(entry->tag, data, len) == 0) {
136 val = entry->val;
137 entry->seq = atomic_inc_return(&vif->hash.cache.seq);
138 found = true;
139 break;
140 }
141 }
142
143 rcu_read_unlock();
144
145 if (!found)
146 val = xenvif_new_hash(vif, data, len);
147
148 return val;
149}
150
151void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
152{
153 struct flow_keys flow;
154 u32 hash = 0;
155 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
156 u32 flags = vif->hash.flags;
157 bool has_tcp_hdr;
158
159 /* Quick rejection test: If the network protocol doesn't
160 * correspond to any enabled hash type then there's no point
161 * in parsing the packet header.
162 */
163 switch (skb->protocol) {
164 case htons(ETH_P_IP):
165 if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
166 XEN_NETIF_CTRL_HASH_TYPE_IPV4))
167 break;
168
169 goto done;
170
171 case htons(ETH_P_IPV6):
172 if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
173 XEN_NETIF_CTRL_HASH_TYPE_IPV6))
174 break;
175
176 goto done;
177
178 default:
179 goto done;
180 }
181
182 memset(&flow, 0, sizeof(flow));
183 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
184 goto done;
185
186 has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
187 !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
188
189 switch (skb->protocol) {
190 case htons(ETH_P_IP):
191 if (has_tcp_hdr &&
192 (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
193 u8 data[12];
194
195 memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
196 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
197 memcpy(&data[8], &flow.ports.src, 2);
198 memcpy(&data[10], &flow.ports.dst, 2);
199
200 hash = xenvif_find_hash(vif, data, sizeof(data));
201 type = PKT_HASH_TYPE_L4;
202 } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
203 u8 data[8];
204
205 memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
206 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
207
208 hash = xenvif_find_hash(vif, data, sizeof(data));
209 type = PKT_HASH_TYPE_L3;
210 }
211
212 break;
213
214 case htons(ETH_P_IPV6):
215 if (has_tcp_hdr &&
216 (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
217 u8 data[36];
218
219 memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
220 memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
221 memcpy(&data[32], &flow.ports.src, 2);
222 memcpy(&data[34], &flow.ports.dst, 2);
223
224 hash = xenvif_find_hash(vif, data, sizeof(data));
225 type = PKT_HASH_TYPE_L4;
226 } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
227 u8 data[32];
228
229 memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
230 memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
231
232 hash = xenvif_find_hash(vif, data, sizeof(data));
233 type = PKT_HASH_TYPE_L3;
234 }
235
236 break;
237 }
238
239done:
240 if (type == PKT_HASH_TYPE_NONE)
241 skb_clear_hash(skb);
242 else
243 __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
244}
245
246u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
247{
248 switch (alg) {
249 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
250 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
251 break;
252
253 default:
254 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
255 }
256
257 vif->hash.alg = alg;
258
259 return XEN_NETIF_CTRL_STATUS_SUCCESS;
260}
261
262u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
263{
264 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
265 return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
266
267 *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
268 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
269 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
270 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
271
272 return XEN_NETIF_CTRL_STATUS_SUCCESS;
273}
274
275u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
276{
277 if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
278 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
279 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
280 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
281 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
282
283 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
284 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
285
286 vif->hash.flags = flags;
287
288 return XEN_NETIF_CTRL_STATUS_SUCCESS;
289}
290
291u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
292{
293 u8 *key = vif->hash.key;
294 struct gnttab_copy copy_op = {
295 .source.u.ref = gref,
296 .source.domid = vif->domid,
297 .dest.u.gmfn = virt_to_gfn(key),
298 .dest.domid = DOMID_SELF,
299 .dest.offset = xen_offset_in_page(key),
300 .len = len,
301 .flags = GNTCOPY_source_gref
302 };
303
304 if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
305 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
306
307 if (copy_op.len != 0) {
308 gnttab_batch_copy(©_op, 1);
309
310 if (copy_op.status != GNTST_okay)
311 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
312 }
313
314 /* Clear any remaining key octets */
315 if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
316 memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
317
318 xenvif_flush_hash(vif);
319
320 return XEN_NETIF_CTRL_STATUS_SUCCESS;
321}
322
323u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
324{
325 if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
326 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
327
328 vif->hash.size = size;
329 memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
330 sizeof(u32) * size);
331
332 return XEN_NETIF_CTRL_STATUS_SUCCESS;
333}
334
335u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
336 u32 off)
337{
338 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
339 unsigned int nr = 1;
340 struct gnttab_copy copy_op[2] = {{
341 .source.u.ref = gref,
342 .source.domid = vif->domid,
343 .dest.domid = DOMID_SELF,
344 .len = len * sizeof(*mapping),
345 .flags = GNTCOPY_source_gref
346 }};
347
348 if ((off + len < off) || (off + len > vif->hash.size) ||
349 len > XEN_PAGE_SIZE / sizeof(*mapping))
350 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
351
352 copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
353 copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
354 if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
355 copy_op[1] = copy_op[0];
356 copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
357 copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
358 copy_op[1].dest.offset = 0;
359 copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
360 copy_op[0].len = copy_op[1].source.offset;
361 nr = 2;
362 }
363
364 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
365 vif->hash.size * sizeof(*mapping));
366
367 if (copy_op[0].len != 0) {
368 gnttab_batch_copy(copy_op, nr);
369
370 if (copy_op[0].status != GNTST_okay ||
371 copy_op[nr - 1].status != GNTST_okay)
372 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
373 }
374
375 while (len-- != 0)
376 if (mapping[off++] >= vif->num_queues)
377 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
378
379 vif->hash.mapping_sel = !vif->hash.mapping_sel;
380
381 return XEN_NETIF_CTRL_STATUS_SUCCESS;
382}
383
384#ifdef CONFIG_DEBUG_FS
385void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
386{
387 unsigned int i;
388
389 switch (vif->hash.alg) {
390 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
391 seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
392 break;
393
394 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
395 seq_puts(m, "Hash Algorithm: NONE\n");
396 fallthrough;
397 default:
398 return;
399 }
400
401 if (vif->hash.flags) {
402 seq_puts(m, "\nHash Flags:\n");
403
404 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
405 seq_puts(m, "- IPv4\n");
406 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
407 seq_puts(m, "- IPv4 + TCP\n");
408 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
409 seq_puts(m, "- IPv6\n");
410 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
411 seq_puts(m, "- IPv6 + TCP\n");
412 }
413
414 seq_puts(m, "\nHash Key:\n");
415
416 for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
417 unsigned int j, n;
418
419 n = 8;
420 if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
421 n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
422
423 seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
424
425 for (j = 0; j < n; j++, i++)
426 seq_printf(m, "%02x ", vif->hash.key[i]);
427
428 seq_puts(m, "\n");
429 }
430
431 if (vif->hash.size != 0) {
432 const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
433
434 seq_puts(m, "\nHash Mapping:\n");
435
436 for (i = 0; i < vif->hash.size; ) {
437 unsigned int j, n;
438
439 n = 8;
440 if (i + n >= vif->hash.size)
441 n = vif->hash.size - i;
442
443 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
444
445 for (j = 0; j < n; j++, i++)
446 seq_printf(m, "%4u ", mapping[i]);
447
448 seq_puts(m, "\n");
449 }
450 }
451}
452#endif /* CONFIG_DEBUG_FS */
453
454void xenvif_init_hash(struct xenvif *vif)
455{
456 if (xenvif_hash_cache_size == 0)
457 return;
458
459 BUG_ON(vif->hash.cache.count);
460
461 spin_lock_init(&vif->hash.cache.lock);
462 INIT_LIST_HEAD(&vif->hash.cache.list);
463}
464
465void xenvif_deinit_hash(struct xenvif *vif)
466{
467 xenvif_flush_hash(vif);
468}
1/*
2 * Copyright (c) 2016 Citrix Systems Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Softare Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * IN THE SOFTWARE.
27 */
28
29#define XEN_NETIF_DEFINE_TOEPLITZ
30
31#include "common.h"
32#include <linux/vmalloc.h>
33#include <linux/rculist.h>
34
35static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
36 unsigned int len, u32 val)
37{
38 struct xenvif_hash_cache_entry *new, *entry, *oldest;
39 unsigned long flags;
40 bool found;
41
42 new = kmalloc(sizeof(*entry), GFP_ATOMIC);
43 if (!new)
44 return;
45
46 memcpy(new->tag, tag, len);
47 new->len = len;
48 new->val = val;
49
50 spin_lock_irqsave(&vif->hash.cache.lock, flags);
51
52 found = false;
53 oldest = NULL;
54 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
55 lockdep_is_held(&vif->hash.cache.lock)) {
56 /* Make sure we don't add duplicate entries */
57 if (entry->len == len &&
58 memcmp(entry->tag, tag, len) == 0)
59 found = true;
60 if (!oldest || entry->seq < oldest->seq)
61 oldest = entry;
62 }
63
64 if (!found) {
65 new->seq = atomic_inc_return(&vif->hash.cache.seq);
66 list_add_rcu(&new->link, &vif->hash.cache.list);
67
68 if (++vif->hash.cache.count > xenvif_hash_cache_size) {
69 list_del_rcu(&oldest->link);
70 vif->hash.cache.count--;
71 kfree_rcu(oldest, rcu);
72 }
73 }
74
75 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
76
77 if (found)
78 kfree(new);
79}
80
81static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
82 unsigned int len)
83{
84 u32 val;
85
86 val = xen_netif_toeplitz_hash(vif->hash.key,
87 sizeof(vif->hash.key),
88 data, len);
89
90 if (xenvif_hash_cache_size != 0)
91 xenvif_add_hash(vif, data, len, val);
92
93 return val;
94}
95
96static void xenvif_flush_hash(struct xenvif *vif)
97{
98 struct xenvif_hash_cache_entry *entry, *n;
99 unsigned long flags;
100
101 if (xenvif_hash_cache_size == 0)
102 return;
103
104 spin_lock_irqsave(&vif->hash.cache.lock, flags);
105
106 list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) {
107 list_del_rcu(&entry->link);
108 vif->hash.cache.count--;
109 kfree_rcu(entry, rcu);
110 }
111
112 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
113}
114
115static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
116 unsigned int len)
117{
118 struct xenvif_hash_cache_entry *entry;
119 u32 val;
120 bool found;
121
122 if (len >= XEN_NETBK_HASH_TAG_SIZE)
123 return 0;
124
125 if (xenvif_hash_cache_size == 0)
126 return xenvif_new_hash(vif, data, len);
127
128 rcu_read_lock();
129
130 found = false;
131
132 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
133 if (entry->len == len &&
134 memcmp(entry->tag, data, len) == 0) {
135 val = entry->val;
136 entry->seq = atomic_inc_return(&vif->hash.cache.seq);
137 found = true;
138 break;
139 }
140 }
141
142 rcu_read_unlock();
143
144 if (!found)
145 val = xenvif_new_hash(vif, data, len);
146
147 return val;
148}
149
150void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
151{
152 struct flow_keys flow;
153 u32 hash = 0;
154 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
155 u32 flags = vif->hash.flags;
156 bool has_tcp_hdr;
157
158 /* Quick rejection test: If the network protocol doesn't
159 * correspond to any enabled hash type then there's no point
160 * in parsing the packet header.
161 */
162 switch (skb->protocol) {
163 case htons(ETH_P_IP):
164 if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
165 XEN_NETIF_CTRL_HASH_TYPE_IPV4))
166 break;
167
168 goto done;
169
170 case htons(ETH_P_IPV6):
171 if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
172 XEN_NETIF_CTRL_HASH_TYPE_IPV6))
173 break;
174
175 goto done;
176
177 default:
178 goto done;
179 }
180
181 memset(&flow, 0, sizeof(flow));
182 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
183 goto done;
184
185 has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
186 !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
187
188 switch (skb->protocol) {
189 case htons(ETH_P_IP):
190 if (has_tcp_hdr &&
191 (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
192 u8 data[12];
193
194 memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
195 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
196 memcpy(&data[8], &flow.ports.src, 2);
197 memcpy(&data[10], &flow.ports.dst, 2);
198
199 hash = xenvif_find_hash(vif, data, sizeof(data));
200 type = PKT_HASH_TYPE_L4;
201 } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
202 u8 data[8];
203
204 memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
205 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
206
207 hash = xenvif_find_hash(vif, data, sizeof(data));
208 type = PKT_HASH_TYPE_L3;
209 }
210
211 break;
212
213 case htons(ETH_P_IPV6):
214 if (has_tcp_hdr &&
215 (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
216 u8 data[36];
217
218 memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
219 memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
220 memcpy(&data[32], &flow.ports.src, 2);
221 memcpy(&data[34], &flow.ports.dst, 2);
222
223 hash = xenvif_find_hash(vif, data, sizeof(data));
224 type = PKT_HASH_TYPE_L4;
225 } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
226 u8 data[32];
227
228 memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
229 memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
230
231 hash = xenvif_find_hash(vif, data, sizeof(data));
232 type = PKT_HASH_TYPE_L3;
233 }
234
235 break;
236 }
237
238done:
239 if (type == PKT_HASH_TYPE_NONE)
240 skb_clear_hash(skb);
241 else
242 __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
243}
244
245u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
246{
247 switch (alg) {
248 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
249 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
250 break;
251
252 default:
253 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
254 }
255
256 vif->hash.alg = alg;
257
258 return XEN_NETIF_CTRL_STATUS_SUCCESS;
259}
260
261u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
262{
263 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
264 return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
265
266 *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
267 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
268 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
269 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
270
271 return XEN_NETIF_CTRL_STATUS_SUCCESS;
272}
273
274u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
275{
276 if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
277 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
278 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
279 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
280 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
281
282 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
283 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
284
285 vif->hash.flags = flags;
286
287 return XEN_NETIF_CTRL_STATUS_SUCCESS;
288}
289
290u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
291{
292 u8 *key = vif->hash.key;
293 struct gnttab_copy copy_op = {
294 .source.u.ref = gref,
295 .source.domid = vif->domid,
296 .dest.u.gmfn = virt_to_gfn(key),
297 .dest.domid = DOMID_SELF,
298 .dest.offset = xen_offset_in_page(key),
299 .len = len,
300 .flags = GNTCOPY_source_gref
301 };
302
303 if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
304 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
305
306 if (copy_op.len != 0) {
307 gnttab_batch_copy(©_op, 1);
308
309 if (copy_op.status != GNTST_okay)
310 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
311 }
312
313 /* Clear any remaining key octets */
314 if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
315 memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
316
317 xenvif_flush_hash(vif);
318
319 return XEN_NETIF_CTRL_STATUS_SUCCESS;
320}
321
322u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
323{
324 if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
325 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
326
327 vif->hash.size = size;
328 memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
329 sizeof(u32) * size);
330
331 return XEN_NETIF_CTRL_STATUS_SUCCESS;
332}
333
334u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
335 u32 off)
336{
337 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
338 unsigned int nr = 1;
339 struct gnttab_copy copy_op[2] = {{
340 .source.u.ref = gref,
341 .source.domid = vif->domid,
342 .dest.domid = DOMID_SELF,
343 .len = len * sizeof(*mapping),
344 .flags = GNTCOPY_source_gref
345 }};
346
347 if ((off + len < off) || (off + len > vif->hash.size) ||
348 len > XEN_PAGE_SIZE / sizeof(*mapping))
349 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
350
351 copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
352 copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
353 if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
354 copy_op[1] = copy_op[0];
355 copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
356 copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
357 copy_op[1].dest.offset = 0;
358 copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
359 copy_op[0].len = copy_op[1].source.offset;
360 nr = 2;
361 }
362
363 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
364 vif->hash.size * sizeof(*mapping));
365
366 if (copy_op[0].len != 0) {
367 gnttab_batch_copy(copy_op, nr);
368
369 if (copy_op[0].status != GNTST_okay ||
370 copy_op[nr - 1].status != GNTST_okay)
371 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
372 }
373
374 while (len-- != 0)
375 if (mapping[off++] >= vif->num_queues)
376 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
377
378 vif->hash.mapping_sel = !vif->hash.mapping_sel;
379
380 return XEN_NETIF_CTRL_STATUS_SUCCESS;
381}
382
383#ifdef CONFIG_DEBUG_FS
384void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
385{
386 unsigned int i;
387
388 switch (vif->hash.alg) {
389 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
390 seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
391 break;
392
393 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
394 seq_puts(m, "Hash Algorithm: NONE\n");
395 fallthrough;
396 default:
397 return;
398 }
399
400 if (vif->hash.flags) {
401 seq_puts(m, "\nHash Flags:\n");
402
403 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
404 seq_puts(m, "- IPv4\n");
405 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
406 seq_puts(m, "- IPv4 + TCP\n");
407 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
408 seq_puts(m, "- IPv6\n");
409 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
410 seq_puts(m, "- IPv6 + TCP\n");
411 }
412
413 seq_puts(m, "\nHash Key:\n");
414
415 for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
416 unsigned int j, n;
417
418 n = 8;
419 if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
420 n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
421
422 seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
423
424 for (j = 0; j < n; j++, i++)
425 seq_printf(m, "%02x ", vif->hash.key[i]);
426
427 seq_puts(m, "\n");
428 }
429
430 if (vif->hash.size != 0) {
431 const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
432
433 seq_puts(m, "\nHash Mapping:\n");
434
435 for (i = 0; i < vif->hash.size; ) {
436 unsigned int j, n;
437
438 n = 8;
439 if (i + n >= vif->hash.size)
440 n = vif->hash.size - i;
441
442 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
443
444 for (j = 0; j < n; j++, i++)
445 seq_printf(m, "%4u ", mapping[i]);
446
447 seq_puts(m, "\n");
448 }
449 }
450}
451#endif /* CONFIG_DEBUG_FS */
452
453void xenvif_init_hash(struct xenvif *vif)
454{
455 if (xenvif_hash_cache_size == 0)
456 return;
457
458 BUG_ON(vif->hash.cache.count);
459
460 spin_lock_init(&vif->hash.cache.lock);
461 INIT_LIST_HEAD(&vif->hash.cache.list);
462}
463
464void xenvif_deinit_hash(struct xenvif *vif)
465{
466 xenvif_flush_hash(vif);
467}