Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Copyright (c) 2016 Citrix Systems Inc.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License version 2
  6 * as published by the Free Softare Foundation; or, when distributed
  7 * separately from the Linux kernel or incorporated into other
  8 * software packages, subject to the following license:
  9 *
 10 * Permission is hereby granted, free of charge, to any person obtaining a copy
 11 * of this source file (the "Software"), to deal in the Software without
 12 * restriction, including without limitation the rights to use, copy, modify,
 13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 14 * and to permit persons to whom the Software is furnished to do so, subject to
 15 * the following conditions:
 16 *
 17 * The above copyright notice and this permission notice shall be included in
 18 * all copies or substantial portions of the Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 26 * IN THE SOFTWARE.
 27 */
 28
 29#define XEN_NETIF_DEFINE_TOEPLITZ
 30
 31#include "common.h"
 32#include <linux/vmalloc.h>
 33#include <linux/rculist.h>
 34
 35static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
 36			    unsigned int len, u32 val)
 37{
 38	struct xenvif_hash_cache_entry *new, *entry, *oldest;
 39	unsigned long flags;
 40	bool found;
 41
 42	new = kmalloc(sizeof(*entry), GFP_ATOMIC);
 43	if (!new)
 44		return;
 45
 46	memcpy(new->tag, tag, len);
 47	new->len = len;
 48	new->val = val;
 49
 50	spin_lock_irqsave(&vif->hash.cache.lock, flags);
 51
 52	found = false;
 53	oldest = NULL;
 54	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
 55				lockdep_is_held(&vif->hash.cache.lock)) {
 56		/* Make sure we don't add duplicate entries */
 57		if (entry->len == len &&
 58		    memcmp(entry->tag, tag, len) == 0)
 59			found = true;
 60		if (!oldest || entry->seq < oldest->seq)
 61			oldest = entry;
 62	}
 63
 64	if (!found) {
 65		new->seq = atomic_inc_return(&vif->hash.cache.seq);
 66		list_add_rcu(&new->link, &vif->hash.cache.list);
 67
 68		if (++vif->hash.cache.count > xenvif_hash_cache_size) {
 69			list_del_rcu(&oldest->link);
 70			vif->hash.cache.count--;
 71			kfree_rcu(oldest, rcu);
 72		}
 73	}
 74
 75	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
 76
 77	if (found)
 78		kfree(new);
 79}
 80
 81static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
 82			   unsigned int len)
 83{
 84	u32 val;
 85
 86	val = xen_netif_toeplitz_hash(vif->hash.key,
 87				      sizeof(vif->hash.key),
 88				      data, len);
 89
 90	if (xenvif_hash_cache_size != 0)
 91		xenvif_add_hash(vif, data, len, val);
 92
 93	return val;
 94}
 95
 96static void xenvif_flush_hash(struct xenvif *vif)
 97{
 98	struct xenvif_hash_cache_entry *entry, *n;
 99	unsigned long flags;
100
101	if (xenvif_hash_cache_size == 0)
102		return;
103
104	spin_lock_irqsave(&vif->hash.cache.lock, flags);
105
106	list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) {
107		list_del_rcu(&entry->link);
108		vif->hash.cache.count--;
109		kfree_rcu(entry, rcu);
110	}
111
112	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
113}
114
115static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
116			    unsigned int len)
117{
118	struct xenvif_hash_cache_entry *entry;
119	u32 val;
120	bool found;
121
122	if (len >= XEN_NETBK_HASH_TAG_SIZE)
123		return 0;
124
125	if (xenvif_hash_cache_size == 0)
126		return xenvif_new_hash(vif, data, len);
127
128	rcu_read_lock();
129
130	found = false;
131
132	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
133		if (entry->len == len &&
134		    memcmp(entry->tag, data, len) == 0) {
135			val = entry->val;
136			entry->seq = atomic_inc_return(&vif->hash.cache.seq);
137			found = true;
138			break;
139		}
140	}
141
142	rcu_read_unlock();
143
144	if (!found)
145		val = xenvif_new_hash(vif, data, len);
146
147	return val;
148}
149
150void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
151{
152	struct flow_keys flow;
153	u32 hash = 0;
154	enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
155	u32 flags = vif->hash.flags;
156	bool has_tcp_hdr;
157
158	/* Quick rejection test: If the network protocol doesn't
159	 * correspond to any enabled hash type then there's no point
160	 * in parsing the packet header.
161	 */
162	switch (skb->protocol) {
163	case htons(ETH_P_IP):
164		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
165			     XEN_NETIF_CTRL_HASH_TYPE_IPV4))
166			break;
167
168		goto done;
169
170	case htons(ETH_P_IPV6):
171		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
172			     XEN_NETIF_CTRL_HASH_TYPE_IPV6))
173			break;
174
175		goto done;
176
177	default:
178		goto done;
179	}
180
181	memset(&flow, 0, sizeof(flow));
182	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
183		goto done;
184
185	has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
186		      !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
187
188	switch (skb->protocol) {
189	case htons(ETH_P_IP):
190		if (has_tcp_hdr &&
191		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
192			u8 data[12];
193
194			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
195			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
196			memcpy(&data[8], &flow.ports.src, 2);
197			memcpy(&data[10], &flow.ports.dst, 2);
198
199			hash = xenvif_find_hash(vif, data, sizeof(data));
200			type = PKT_HASH_TYPE_L4;
201		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
202			u8 data[8];
203
204			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
205			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
206
207			hash = xenvif_find_hash(vif, data, sizeof(data));
208			type = PKT_HASH_TYPE_L3;
209		}
210
211		break;
212
213	case htons(ETH_P_IPV6):
214		if (has_tcp_hdr &&
215		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
216			u8 data[36];
217
218			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
219			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
220			memcpy(&data[32], &flow.ports.src, 2);
221			memcpy(&data[34], &flow.ports.dst, 2);
222
223			hash = xenvif_find_hash(vif, data, sizeof(data));
224			type = PKT_HASH_TYPE_L4;
225		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
226			u8 data[32];
227
228			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
229			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
230
231			hash = xenvif_find_hash(vif, data, sizeof(data));
232			type = PKT_HASH_TYPE_L3;
233		}
234
235		break;
236	}
237
238done:
239	if (type == PKT_HASH_TYPE_NONE)
240		skb_clear_hash(skb);
241	else
242		__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
243}
244
245u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
246{
247	switch (alg) {
248	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
249	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
250		break;
251
252	default:
253		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
254	}
255
256	vif->hash.alg = alg;
257
258	return XEN_NETIF_CTRL_STATUS_SUCCESS;
259}
260
261u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
262{
263	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
264		return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
265
266	*flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
267		 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
268		 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
269		 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
270
271	return XEN_NETIF_CTRL_STATUS_SUCCESS;
272}
273
274u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
275{
276	if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
277		      XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
278		      XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
279		      XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
280		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
281
282	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
283		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
284
285	vif->hash.flags = flags;
286
287	return XEN_NETIF_CTRL_STATUS_SUCCESS;
288}
289
290u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
291{
292	u8 *key = vif->hash.key;
293	struct gnttab_copy copy_op = {
294		.source.u.ref = gref,
295		.source.domid = vif->domid,
296		.dest.u.gmfn = virt_to_gfn(key),
297		.dest.domid = DOMID_SELF,
298		.dest.offset = xen_offset_in_page(key),
299		.len = len,
300		.flags = GNTCOPY_source_gref
301	};
302
303	if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
304		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
305
306	if (copy_op.len != 0) {
307		gnttab_batch_copy(&copy_op, 1);
308
309		if (copy_op.status != GNTST_okay)
310			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
311	}
312
313	/* Clear any remaining key octets */
314	if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
315		memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
316
317	xenvif_flush_hash(vif);
318
319	return XEN_NETIF_CTRL_STATUS_SUCCESS;
320}
321
322u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
323{
324	if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
325		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
326
327	vif->hash.size = size;
328	memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
329	       sizeof(u32) * size);
330
331	return XEN_NETIF_CTRL_STATUS_SUCCESS;
332}
333
334u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
335			    u32 off)
336{
337	u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
338	unsigned int nr = 1;
339	struct gnttab_copy copy_op[2] = {{
340		.source.u.ref = gref,
341		.source.domid = vif->domid,
342		.dest.domid = DOMID_SELF,
343		.len = len * sizeof(*mapping),
344		.flags = GNTCOPY_source_gref
345	}};
346
347	if ((off + len < off) || (off + len > vif->hash.size) ||
348	    len > XEN_PAGE_SIZE / sizeof(*mapping))
349		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
350
351	copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
352	copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
353	if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
354		copy_op[1] = copy_op[0];
355		copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
356		copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
357		copy_op[1].dest.offset = 0;
358		copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
359		copy_op[0].len = copy_op[1].source.offset;
360		nr = 2;
361	}
362
363	memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
364	       vif->hash.size * sizeof(*mapping));
365
366	if (copy_op[0].len != 0) {
367		gnttab_batch_copy(copy_op, nr);
368
369		if (copy_op[0].status != GNTST_okay ||
370		    copy_op[nr - 1].status != GNTST_okay)
371			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
372	}
373
374	while (len-- != 0)
375		if (mapping[off++] >= vif->num_queues)
376			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
377
378	vif->hash.mapping_sel = !vif->hash.mapping_sel;
379
380	return XEN_NETIF_CTRL_STATUS_SUCCESS;
381}
382
383#ifdef CONFIG_DEBUG_FS
384void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
385{
386	unsigned int i;
387
388	switch (vif->hash.alg) {
389	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
390		seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
391		break;
392
393	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
394		seq_puts(m, "Hash Algorithm: NONE\n");
395		fallthrough;
396	default:
397		return;
398	}
399
400	if (vif->hash.flags) {
401		seq_puts(m, "\nHash Flags:\n");
402
403		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
404			seq_puts(m, "- IPv4\n");
405		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
406			seq_puts(m, "- IPv4 + TCP\n");
407		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
408			seq_puts(m, "- IPv6\n");
409		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
410			seq_puts(m, "- IPv6 + TCP\n");
411	}
412
413	seq_puts(m, "\nHash Key:\n");
414
415	for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
416		unsigned int j, n;
417
418		n = 8;
419		if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
420			n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
421
422		seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
423
424		for (j = 0; j < n; j++, i++)
425			seq_printf(m, "%02x ", vif->hash.key[i]);
426
427		seq_puts(m, "\n");
428	}
429
430	if (vif->hash.size != 0) {
431		const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
432
433		seq_puts(m, "\nHash Mapping:\n");
434
435		for (i = 0; i < vif->hash.size; ) {
436			unsigned int j, n;
437
438			n = 8;
439			if (i + n >= vif->hash.size)
440				n = vif->hash.size - i;
441
442			seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
443
444			for (j = 0; j < n; j++, i++)
445				seq_printf(m, "%4u ", mapping[i]);
446
447			seq_puts(m, "\n");
448		}
449	}
450}
451#endif /* CONFIG_DEBUG_FS */
452
453void xenvif_init_hash(struct xenvif *vif)
454{
455	if (xenvif_hash_cache_size == 0)
456		return;
457
458	BUG_ON(vif->hash.cache.count);
459
460	spin_lock_init(&vif->hash.cache.lock);
461	INIT_LIST_HEAD(&vif->hash.cache.list);
462}
463
464void xenvif_deinit_hash(struct xenvif *vif)
465{
466	xenvif_flush_hash(vif);
467}