Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/if.h>
  3#include <linux/if_ether.h>
  4#include <linux/if_link.h>
  5#include <linux/netdevice.h>
  6#include <linux/in.h>
  7#include <linux/types.h>
  8#include <linux/skbuff.h>
  9#include <net/flow_dissector.h>
 10#include "enic_res.h"
 11#include "enic_clsf.h"
 12
 13/* enic_addfltr_5t - Add ipv4 5tuple filter
 14 *	@enic: enic struct of vnic
 15 *	@keys: flow_keys of ipv4 5tuple
 16 *	@rq: rq number to steer to
 17 *
 18 * This function returns filter_id(hardware_id) of the filter
 19 * added. In case of error it returns a negative number.
 20 */
 21int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
 22{
 23	int res;
 24	struct filter data;
 25
 26	switch (keys->basic.ip_proto) {
 27	case IPPROTO_TCP:
 28		data.u.ipv4.protocol = PROTO_TCP;
 29		break;
 30	case IPPROTO_UDP:
 31		data.u.ipv4.protocol = PROTO_UDP;
 32		break;
 33	default:
 34		return -EPROTONOSUPPORT;
 35	}
 36
 37	data.type = FILTER_IPV4_5TUPLE;
 38	data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
 39	data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
 40	data.u.ipv4.src_port = ntohs(keys->ports.src);
 41	data.u.ipv4.dst_port = ntohs(keys->ports.dst);
 42	data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 43
 44	spin_lock_bh(&enic->devcmd_lock);
 45	res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
 46	spin_unlock_bh(&enic->devcmd_lock);
 47	res = (res == 0) ? rq : res;
 48
 49	return res;
 50}
 51
 52/* enic_delfltr - Delete clsf filter
 53 *	@enic: enic struct of vnic
 54 *	@filter_id: filter_is(hardware_id) of filter to be deleted
 55 *
 56 * This function returns zero in case of success, negative number incase of
 57 * error.
 58 */
 59int enic_delfltr(struct enic *enic, u16 filter_id)
 60{
 61	int ret;
 62
 63	spin_lock_bh(&enic->devcmd_lock);
 64	ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
 65	spin_unlock_bh(&enic->devcmd_lock);
 66
 67	return ret;
 68}
 69
 70/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
 71 *	@enic: enic data
 72 */
 73void enic_rfs_flw_tbl_init(struct enic *enic)
 74{
 75	int i;
 76
 77	spin_lock_init(&enic->rfs_h.lock);
 78	for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
 79		INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
 80	enic->rfs_h.max = enic->config.num_arfs;
 81	enic->rfs_h.free = enic->rfs_h.max;
 82	enic->rfs_h.toclean = 0;
 83}
 84
 85void enic_rfs_flw_tbl_free(struct enic *enic)
 86{
 87	int i;
 88
 89	enic_rfs_timer_stop(enic);
 90	spin_lock_bh(&enic->rfs_h.lock);
 91	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
 92		struct hlist_head *hhead;
 93		struct hlist_node *tmp;
 94		struct enic_rfs_fltr_node *n;
 95
 96		hhead = &enic->rfs_h.ht_head[i];
 97		hlist_for_each_entry_safe(n, tmp, hhead, node) {
 98			enic_delfltr(enic, n->fltr_id);
 99			hlist_del(&n->node);
100			kfree(n);
101			enic->rfs_h.free++;
102		}
103	}
104	spin_unlock_bh(&enic->rfs_h.lock);
105}
106
107struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
108{
109	int i;
110
111	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
112		struct hlist_head *hhead;
113		struct hlist_node *tmp;
114		struct enic_rfs_fltr_node *n;
115
116		hhead = &enic->rfs_h.ht_head[i];
117		hlist_for_each_entry_safe(n, tmp, hhead, node)
118			if (n->fltr_id == fltr_id)
119				return n;
120	}
121
122	return NULL;
123}
124
125#ifdef CONFIG_RFS_ACCEL
126void enic_flow_may_expire(struct timer_list *t)
127{
128	struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
129	bool res;
130	int j;
131
132	spin_lock_bh(&enic->rfs_h.lock);
133	for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
134		struct hlist_head *hhead;
135		struct hlist_node *tmp;
136		struct enic_rfs_fltr_node *n;
137
138		hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
139		hlist_for_each_entry_safe(n, tmp, hhead, node) {
140			res = rps_may_expire_flow(enic->netdev, n->rq_id,
141						  n->flow_id, n->fltr_id);
142			if (res) {
143				res = enic_delfltr(enic, n->fltr_id);
144				if (unlikely(res))
145					continue;
146				hlist_del(&n->node);
147				kfree(n);
148				enic->rfs_h.free++;
149			}
150		}
151	}
152	spin_unlock_bh(&enic->rfs_h.lock);
153	mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
154}
155
156static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
157						  struct flow_keys *k)
158{
159	struct enic_rfs_fltr_node *tpos;
160
161	hlist_for_each_entry(tpos, h, node)
162		if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
163		    tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
164		    tpos->keys.ports.ports == k->ports.ports &&
165		    tpos->keys.basic.ip_proto == k->basic.ip_proto &&
166		    tpos->keys.basic.n_proto == k->basic.n_proto)
167			return tpos;
168	return NULL;
169}
170
171int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
172		       u16 rxq_index, u32 flow_id)
173{
174	struct flow_keys keys;
175	struct enic_rfs_fltr_node *n;
176	struct enic *enic;
177	u16 tbl_idx;
178	int res, i;
179
180	enic = netdev_priv(dev);
181	res = skb_flow_dissect_flow_keys(skb, &keys, 0);
182	if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
183	    (keys.basic.ip_proto != IPPROTO_TCP &&
184	     keys.basic.ip_proto != IPPROTO_UDP))
185		return -EPROTONOSUPPORT;
186
187	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
188	spin_lock_bh(&enic->rfs_h.lock);
189	n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
190
191	if (n) { /* entry already present  */
192		if (rxq_index == n->rq_id) {
193			res = -EEXIST;
194			goto ret_unlock;
195		}
196
197		/* desired rq changed for the flow, we need to delete
198		 * old fltr and add new one
199		 *
200		 * The moment we delete the fltr, the upcoming pkts
201		 * are put it default rq based on rss. When we add
202		 * new filter, upcoming pkts are put in desired queue.
203		 * This could cause ooo pkts.
204		 *
205		 * Lets 1st try adding new fltr and then del old one.
206		 */
207		i = --enic->rfs_h.free;
208		/* clsf tbl is full, we have to del old fltr first*/
209		if (unlikely(i < 0)) {
210			enic->rfs_h.free++;
211			res = enic_delfltr(enic, n->fltr_id);
212			if (unlikely(res < 0))
213				goto ret_unlock;
214			res = enic_addfltr_5t(enic, &keys, rxq_index);
215			if (res < 0) {
216				hlist_del(&n->node);
217				enic->rfs_h.free++;
218				goto ret_unlock;
219			}
220		/* add new fltr 1st then del old fltr */
221		} else {
222			int ret;
223
224			res = enic_addfltr_5t(enic, &keys, rxq_index);
225			if (res < 0) {
226				enic->rfs_h.free++;
227				goto ret_unlock;
228			}
229			ret = enic_delfltr(enic, n->fltr_id);
230			/* deleting old fltr failed. Add old fltr to list.
231			 * enic_flow_may_expire() will try to delete it later.
232			 */
233			if (unlikely(ret < 0)) {
234				struct enic_rfs_fltr_node *d;
235				struct hlist_head *head;
236
237				head = &enic->rfs_h.ht_head[tbl_idx];
238				d = kmalloc(sizeof(*d), GFP_ATOMIC);
239				if (d) {
240					d->fltr_id = n->fltr_id;
241					INIT_HLIST_NODE(&d->node);
242					hlist_add_head(&d->node, head);
243				}
244			} else {
245				enic->rfs_h.free++;
246			}
247		}
248		n->rq_id = rxq_index;
249		n->fltr_id = res;
250		n->flow_id = flow_id;
251	/* entry not present */
252	} else {
253		i = --enic->rfs_h.free;
254		if (i <= 0) {
255			enic->rfs_h.free++;
256			res = -EBUSY;
257			goto ret_unlock;
258		}
259
260		n = kmalloc(sizeof(*n), GFP_ATOMIC);
261		if (!n) {
262			res = -ENOMEM;
263			enic->rfs_h.free++;
264			goto ret_unlock;
265		}
266
267		res = enic_addfltr_5t(enic, &keys, rxq_index);
268		if (res < 0) {
269			kfree(n);
270			enic->rfs_h.free++;
271			goto ret_unlock;
272		}
273		n->rq_id = rxq_index;
274		n->fltr_id = res;
275		n->flow_id = flow_id;
276		n->keys = keys;
277		INIT_HLIST_NODE(&n->node);
278		hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
279	}
280
281ret_unlock:
282	spin_unlock_bh(&enic->rfs_h.lock);
283	return res;
284}
285
286#endif /* CONFIG_RFS_ACCEL */
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/if.h>
  3#include <linux/if_ether.h>
  4#include <linux/if_link.h>
  5#include <linux/netdevice.h>
  6#include <linux/in.h>
  7#include <linux/types.h>
  8#include <linux/skbuff.h>
  9#include <net/flow_dissector.h>
 10#include "enic_res.h"
 11#include "enic_clsf.h"
 12
 13/* enic_addfltr_5t - Add ipv4 5tuple filter
 14 *	@enic: enic struct of vnic
 15 *	@keys: flow_keys of ipv4 5tuple
 16 *	@rq: rq number to steer to
 17 *
 18 * This function returns filter_id(hardware_id) of the filter
 19 * added. In case of error it returns a negative number.
 20 */
 21int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
 22{
 23	int res;
 24	struct filter data;
 25
 26	switch (keys->basic.ip_proto) {
 27	case IPPROTO_TCP:
 28		data.u.ipv4.protocol = PROTO_TCP;
 29		break;
 30	case IPPROTO_UDP:
 31		data.u.ipv4.protocol = PROTO_UDP;
 32		break;
 33	default:
 34		return -EPROTONOSUPPORT;
 35	}
 36
 37	data.type = FILTER_IPV4_5TUPLE;
 38	data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
 39	data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
 40	data.u.ipv4.src_port = ntohs(keys->ports.src);
 41	data.u.ipv4.dst_port = ntohs(keys->ports.dst);
 42	data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 43
 44	spin_lock_bh(&enic->devcmd_lock);
 45	res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
 46	spin_unlock_bh(&enic->devcmd_lock);
 47	res = (res == 0) ? rq : res;
 48
 49	return res;
 50}
 51
 52/* enic_delfltr - Delete clsf filter
 53 *	@enic: enic struct of vnic
 54 *	@filter_id: filter_is(hardware_id) of filter to be deleted
 55 *
 56 * This function returns zero in case of success, negative number incase of
 57 * error.
 58 */
 59int enic_delfltr(struct enic *enic, u16 filter_id)
 60{
 61	int ret;
 62
 63	spin_lock_bh(&enic->devcmd_lock);
 64	ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
 65	spin_unlock_bh(&enic->devcmd_lock);
 66
 67	return ret;
 68}
 69
 70/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
 71 *	@enic: enic data
 72 */
 73void enic_rfs_flw_tbl_init(struct enic *enic)
 74{
 75	int i;
 76
 77	spin_lock_init(&enic->rfs_h.lock);
 78	for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
 79		INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
 80	enic->rfs_h.max = enic->config.num_arfs;
 81	enic->rfs_h.free = enic->rfs_h.max;
 82	enic->rfs_h.toclean = 0;
 83}
 84
 85void enic_rfs_flw_tbl_free(struct enic *enic)
 86{
 87	int i;
 88
 89	enic_rfs_timer_stop(enic);
 90	spin_lock_bh(&enic->rfs_h.lock);
 91	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
 92		struct hlist_head *hhead;
 93		struct hlist_node *tmp;
 94		struct enic_rfs_fltr_node *n;
 95
 96		hhead = &enic->rfs_h.ht_head[i];
 97		hlist_for_each_entry_safe(n, tmp, hhead, node) {
 98			enic_delfltr(enic, n->fltr_id);
 99			hlist_del(&n->node);
100			kfree(n);
101			enic->rfs_h.free++;
102		}
103	}
104	spin_unlock_bh(&enic->rfs_h.lock);
105}
106
107struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
108{
109	int i;
110
111	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
112		struct hlist_head *hhead;
113		struct hlist_node *tmp;
114		struct enic_rfs_fltr_node *n;
115
116		hhead = &enic->rfs_h.ht_head[i];
117		hlist_for_each_entry_safe(n, tmp, hhead, node)
118			if (n->fltr_id == fltr_id)
119				return n;
120	}
121
122	return NULL;
123}
124
125#ifdef CONFIG_RFS_ACCEL
126void enic_flow_may_expire(struct timer_list *t)
127{
128	struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
129	bool res;
130	int j;
131
132	spin_lock_bh(&enic->rfs_h.lock);
133	for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
134		struct hlist_head *hhead;
135		struct hlist_node *tmp;
136		struct enic_rfs_fltr_node *n;
137
138		hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
139		hlist_for_each_entry_safe(n, tmp, hhead, node) {
140			res = rps_may_expire_flow(enic->netdev, n->rq_id,
141						  n->flow_id, n->fltr_id);
142			if (res) {
143				res = enic_delfltr(enic, n->fltr_id);
144				if (unlikely(res))
145					continue;
146				hlist_del(&n->node);
147				kfree(n);
148				enic->rfs_h.free++;
149			}
150		}
151	}
152	spin_unlock_bh(&enic->rfs_h.lock);
153	mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
154}
155
156static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
157						  struct flow_keys *k)
158{
159	struct enic_rfs_fltr_node *tpos;
160
161	hlist_for_each_entry(tpos, h, node)
162		if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
163		    tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
164		    tpos->keys.ports.ports == k->ports.ports &&
165		    tpos->keys.basic.ip_proto == k->basic.ip_proto &&
166		    tpos->keys.basic.n_proto == k->basic.n_proto)
167			return tpos;
168	return NULL;
169}
170
171int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
172		       u16 rxq_index, u32 flow_id)
173{
174	struct flow_keys keys;
175	struct enic_rfs_fltr_node *n;
176	struct enic *enic;
177	u16 tbl_idx;
178	int res, i;
179
180	enic = netdev_priv(dev);
181	res = skb_flow_dissect_flow_keys(skb, &keys, 0);
182	if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
183	    (keys.basic.ip_proto != IPPROTO_TCP &&
184	     keys.basic.ip_proto != IPPROTO_UDP))
185		return -EPROTONOSUPPORT;
186
187	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
188	spin_lock_bh(&enic->rfs_h.lock);
189	n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
190
191	if (n) { /* entry already present  */
192		if (rxq_index == n->rq_id) {
193			res = -EEXIST;
194			goto ret_unlock;
195		}
196
197		/* desired rq changed for the flow, we need to delete
198		 * old fltr and add new one
199		 *
200		 * The moment we delete the fltr, the upcoming pkts
201		 * are put it default rq based on rss. When we add
202		 * new filter, upcoming pkts are put in desired queue.
203		 * This could cause ooo pkts.
204		 *
205		 * Lets 1st try adding new fltr and then del old one.
206		 */
207		i = --enic->rfs_h.free;
208		/* clsf tbl is full, we have to del old fltr first*/
209		if (unlikely(i < 0)) {
210			enic->rfs_h.free++;
211			res = enic_delfltr(enic, n->fltr_id);
212			if (unlikely(res < 0))
213				goto ret_unlock;
214			res = enic_addfltr_5t(enic, &keys, rxq_index);
215			if (res < 0) {
216				hlist_del(&n->node);
217				enic->rfs_h.free++;
218				goto ret_unlock;
219			}
220		/* add new fltr 1st then del old fltr */
221		} else {
222			int ret;
223
224			res = enic_addfltr_5t(enic, &keys, rxq_index);
225			if (res < 0) {
226				enic->rfs_h.free++;
227				goto ret_unlock;
228			}
229			ret = enic_delfltr(enic, n->fltr_id);
230			/* deleting old fltr failed. Add old fltr to list.
231			 * enic_flow_may_expire() will try to delete it later.
232			 */
233			if (unlikely(ret < 0)) {
234				struct enic_rfs_fltr_node *d;
235				struct hlist_head *head;
236
237				head = &enic->rfs_h.ht_head[tbl_idx];
238				d = kmalloc(sizeof(*d), GFP_ATOMIC);
239				if (d) {
240					d->fltr_id = n->fltr_id;
241					INIT_HLIST_NODE(&d->node);
242					hlist_add_head(&d->node, head);
243				}
244			} else {
245				enic->rfs_h.free++;
246			}
247		}
248		n->rq_id = rxq_index;
249		n->fltr_id = res;
250		n->flow_id = flow_id;
251	/* entry not present */
252	} else {
253		i = --enic->rfs_h.free;
254		if (i <= 0) {
255			enic->rfs_h.free++;
256			res = -EBUSY;
257			goto ret_unlock;
258		}
259
260		n = kmalloc(sizeof(*n), GFP_ATOMIC);
261		if (!n) {
262			res = -ENOMEM;
263			enic->rfs_h.free++;
264			goto ret_unlock;
265		}
266
267		res = enic_addfltr_5t(enic, &keys, rxq_index);
268		if (res < 0) {
269			kfree(n);
270			enic->rfs_h.free++;
271			goto ret_unlock;
272		}
273		n->rq_id = rxq_index;
274		n->fltr_id = res;
275		n->flow_id = flow_id;
276		n->keys = keys;
277		INIT_HLIST_NODE(&n->node);
278		hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
279	}
280
281ret_unlock:
282	spin_unlock_bh(&enic->rfs_h.lock);
283	return res;
284}
285
286#endif /* CONFIG_RFS_ACCEL */