Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2018-2020, Intel Corporation. */
  3
  4#include "ice.h"
  5#include <net/rps.h>
  6
  7/**
  8 * ice_is_arfs_active - helper to check is aRFS is active
  9 * @vsi: VSI to check
 10 */
 11static bool ice_is_arfs_active(struct ice_vsi *vsi)
 12{
 13	return !!vsi->arfs_fltr_list;
 14}
 15
 16/**
 17 * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
 18 * @hw: pointer to the HW structure
 19 * @flow_type: flow type as Flow Director understands it
 20 *
 21 * Flow Director will query this function to see if aRFS is currently using
 22 * the specified flow_type for perfect (4-tuple) filters.
 23 */
 24bool
 25ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
 26{
 27	struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
 28	struct ice_pf *pf = hw->back;
 29	struct ice_vsi *vsi;
 30
 31	vsi = ice_get_main_vsi(pf);
 32	if (!vsi)
 33		return false;
 34
 35	arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
 36
 37	/* active counters can be updated by multiple CPUs */
 38	smp_mb__before_atomic();
 39	switch (flow_type) {
 40	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 41		return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
 42	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 43		return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
 44	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 45		return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
 46	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 47		return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
 48	default:
 49		return false;
 50	}
 51}
 52
 53/**
 54 * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
 55 * @vsi: VSI that aRFS is active on
 56 * @entry: aRFS entry used to change counters
 57 * @add: true to increment counter, false to decrement
 58 */
 59static void
 60ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
 61				  struct ice_arfs_entry *entry, bool add)
 62{
 63	struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
 64
 65	switch (entry->fltr_info.flow_type) {
 66	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 67		if (add)
 68			atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
 69		else
 70			atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
 71		break;
 72	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 73		if (add)
 74			atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
 75		else
 76			atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
 77		break;
 78	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 79		if (add)
 80			atomic_inc(&fltr_cntrs->active_udpv4_cnt);
 81		else
 82			atomic_dec(&fltr_cntrs->active_udpv4_cnt);
 83		break;
 84	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 85		if (add)
 86			atomic_inc(&fltr_cntrs->active_udpv6_cnt);
 87		else
 88			atomic_dec(&fltr_cntrs->active_udpv6_cnt);
 89		break;
 90	default:
 91		dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
 92			entry->fltr_info.flow_type);
 93	}
 94}
 95
 96/**
 97 * ice_arfs_del_flow_rules - delete the rules passed in from HW
 98 * @vsi: VSI for the flow rules that need to be deleted
 99 * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
100 *
101 * Loop through the delete list passed in and remove the rules from HW. After
102 * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
103 * longer being referenced by the aRFS hash table.
104 */
105static void
106ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
107{
108	struct ice_arfs_entry *e;
109	struct hlist_node *n;
110	struct device *dev;
111
112	dev = ice_pf_to_dev(vsi->back);
113
114	hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
115		int result;
116
117		result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
118					     false);
119		if (!result)
120			ice_arfs_update_active_fltr_cntrs(vsi, e, false);
121		else
122			dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
123				result, e->fltr_state, e->fltr_info.fltr_id,
124				e->flow_id, e->fltr_info.q_index);
125
126		/* The aRFS hash table is no longer referencing this entry */
127		hlist_del(&e->list_entry);
128		devm_kfree(dev, e);
129	}
130}
131
132/**
133 * ice_arfs_add_flow_rules - add the rules passed in from HW
134 * @vsi: VSI for the flow rules that need to be added
135 * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
136 *
137 * Loop through the add list passed in and remove the rules from HW. After each
138 * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
139 * the ice_arfs_entry(s) because they are still being referenced in the aRFS
140 * hash table.
141 */
142static void
143ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
144{
145	struct ice_arfs_entry_ptr *ep;
146	struct hlist_node *n;
147	struct device *dev;
148
149	dev = ice_pf_to_dev(vsi->back);
150
151	hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
152		int result;
153
154		result = ice_fdir_write_fltr(vsi->back,
155					     &ep->arfs_entry->fltr_info, true,
156					     false);
157		if (!result)
158			ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
159							  true);
160		else
161			dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
162				result, ep->arfs_entry->fltr_state,
163				ep->arfs_entry->fltr_info.fltr_id,
164				ep->arfs_entry->flow_id,
165				ep->arfs_entry->fltr_info.q_index);
166
167		hlist_del(&ep->list_entry);
168		devm_kfree(dev, ep);
169	}
170}
171
172/**
173 * ice_arfs_is_flow_expired - check if the aRFS entry has expired
174 * @vsi: VSI containing the aRFS entry
175 * @arfs_entry: aRFS entry that's being checked for expiration
176 *
177 * Return true if the flow has expired, else false. This function should be used
178 * to determine whether or not an aRFS entry should be removed from the hardware
179 * and software structures.
180 */
181static bool
182ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
183{
184#define ICE_ARFS_TIME_DELTA_EXPIRATION	msecs_to_jiffies(5000)
185	if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
186				arfs_entry->flow_id,
187				arfs_entry->fltr_info.fltr_id))
188		return true;
189
190	/* expiration timer only used for UDP filters */
191	if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
192	    arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
193		return false;
194
195	return time_in_range64(arfs_entry->time_activated +
196			       ICE_ARFS_TIME_DELTA_EXPIRATION,
197			       arfs_entry->time_activated, get_jiffies_64());
198}
199
200/**
201 * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
202 * @vsi: the VSI to be forwarded to
203 * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
204 * @add_list: list to populate with filters to be added to Flow Director
205 * @del_list: list to populate with filters to be deleted from Flow Director
206 *
207 * Iterate over the hlist at the index given in the aRFS hash table and
208 * determine if there are any aRFS entries that need to be either added or
209 * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
210 * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
211 * the flow has expired delete the filter from HW. The caller of this function
212 * is expected to add/delete rules on the add_list/del_list respectively.
213 */
214static void
215ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
216			   struct hlist_head *add_list,
217			   struct hlist_head *del_list)
218{
219	struct ice_arfs_entry *e;
220	struct hlist_node *n;
221	struct device *dev;
222
223	dev = ice_pf_to_dev(vsi->back);
224
225	/* go through the aRFS hlist at this idx and check for needed updates */
226	hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
227		/* check if filter needs to be added to HW */
228		if (e->fltr_state == ICE_ARFS_INACTIVE) {
229			enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
230			struct ice_arfs_entry_ptr *ep =
231				devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
232
233			if (!ep)
234				continue;
235			INIT_HLIST_NODE(&ep->list_entry);
236			/* reference aRFS entry to add HW filter */
237			ep->arfs_entry = e;
238			hlist_add_head(&ep->list_entry, add_list);
239			e->fltr_state = ICE_ARFS_ACTIVE;
240			/* expiration timer only used for UDP flows */
241			if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
242			    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
243				e->time_activated = get_jiffies_64();
244		} else if (e->fltr_state == ICE_ARFS_ACTIVE) {
245			/* check if filter needs to be removed from HW */
246			if (ice_arfs_is_flow_expired(vsi, e)) {
247				/* remove aRFS entry from hash table for delete
248				 * and to prevent referencing it the next time
249				 * through this hlist index
250				 */
251				hlist_del(&e->list_entry);
252				e->fltr_state = ICE_ARFS_TODEL;
253				/* save reference to aRFS entry for delete */
254				hlist_add_head(&e->list_entry, del_list);
255			}
256		}
257}
258
259/**
260 * ice_sync_arfs_fltrs - update all aRFS filters
261 * @pf: board private structure
262 */
263void ice_sync_arfs_fltrs(struct ice_pf *pf)
264{
265	HLIST_HEAD(tmp_del_list);
266	HLIST_HEAD(tmp_add_list);
267	struct ice_vsi *pf_vsi;
268	unsigned int i;
269
270	pf_vsi = ice_get_main_vsi(pf);
271	if (!pf_vsi)
272		return;
273
274	if (!ice_is_arfs_active(pf_vsi))
275		return;
276
277	spin_lock_bh(&pf_vsi->arfs_lock);
278	/* Once we process aRFS for the PF VSI get out */
279	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
280		ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
281					   &tmp_del_list);
282	spin_unlock_bh(&pf_vsi->arfs_lock);
283
284	/* use list of ice_arfs_entry(s) for delete */
285	ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
286
287	/* use list of ice_arfs_entry_ptr(s) for add */
288	ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
289}
290
291/**
292 * ice_arfs_build_entry - builds an aRFS entry based on input
293 * @vsi: destination VSI for this flow
294 * @fk: flow dissector keys for creating the tuple
295 * @rxq_idx: Rx queue to steer this flow to
296 * @flow_id: passed down from the stack and saved for flow expiration
297 *
298 * returns an aRFS entry on success and NULL on failure
299 */
300static struct ice_arfs_entry *
301ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
302		     u16 rxq_idx, u32 flow_id)
303{
304	struct ice_arfs_entry *arfs_entry;
305	struct ice_fdir_fltr *fltr_info;
306	u8 ip_proto;
307
308	arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
309				  sizeof(*arfs_entry),
310				  GFP_ATOMIC | __GFP_NOWARN);
311	if (!arfs_entry)
312		return NULL;
313
314	fltr_info = &arfs_entry->fltr_info;
315	fltr_info->q_index = rxq_idx;
316	fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
317	fltr_info->dest_vsi = vsi->idx;
318	ip_proto = fk->basic.ip_proto;
319
320	if (fk->basic.n_proto == htons(ETH_P_IP)) {
321		fltr_info->ip.v4.proto = ip_proto;
322		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
323			ICE_FLTR_PTYPE_NONF_IPV4_TCP :
324			ICE_FLTR_PTYPE_NONF_IPV4_UDP;
325		fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
326		fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
327		fltr_info->ip.v4.src_port = fk->ports.src;
328		fltr_info->ip.v4.dst_port = fk->ports.dst;
329	} else { /* ETH_P_IPV6 */
330		fltr_info->ip.v6.proto = ip_proto;
331		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
332			ICE_FLTR_PTYPE_NONF_IPV6_TCP :
333			ICE_FLTR_PTYPE_NONF_IPV6_UDP;
334		memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
335		       sizeof(struct in6_addr));
336		memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
337		       sizeof(struct in6_addr));
338		fltr_info->ip.v6.src_port = fk->ports.src;
339		fltr_info->ip.v6.dst_port = fk->ports.dst;
340	}
341
342	arfs_entry->flow_id = flow_id;
343	fltr_info->fltr_id =
344		atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
345
346	return arfs_entry;
347}
348
349/**
350 * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
351 * @hw: pointer to HW structure
352 * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
353 * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
354 *
355 * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
356 * to check if perfect (4-tuple) flow rules are currently in place by Flow
357 * Director.
358 */
359static bool
360ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
361{
362	unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
363
364	/* advanced Flow Director disabled, perfect filters always supported */
365	if (!perfect_fltr)
366		return true;
367
368	if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
369		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
370	else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
371		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
372	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
373		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
374	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
375		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
376
377	return false;
378}
379
380/**
381 * ice_rx_flow_steer - steer the Rx flow to where application is being run
382 * @netdev: ptr to the netdev being adjusted
383 * @skb: buffer with required header information
384 * @rxq_idx: queue to which the flow needs to move
385 * @flow_id: flow identifier provided by the netdev
386 *
387 * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
388 * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
389 * if the flow_id already exists in the hash table but the rxq_idx has changed
390 * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
391 * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
392 * If neither of the previous conditions are true then add a new entry in the
393 * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
394 * added to HW.
395 */
396int
397ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
398		  u16 rxq_idx, u32 flow_id)
399{
400	struct ice_netdev_priv *np = netdev_priv(netdev);
401	struct ice_arfs_entry *arfs_entry;
402	struct ice_vsi *vsi = np->vsi;
403	struct flow_keys fk;
404	struct ice_pf *pf;
405	__be16 n_proto;
406	u8 ip_proto;
407	u16 idx;
408	int ret;
409
410	/* failed to allocate memory for aRFS so don't crash */
411	if (unlikely(!vsi->arfs_fltr_list))
412		return -ENODEV;
413
414	pf = vsi->back;
415
416	if (skb->encapsulation)
417		return -EPROTONOSUPPORT;
418
419	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
420		return -EPROTONOSUPPORT;
421
422	n_proto = fk.basic.n_proto;
423	/* Support only IPV4 and IPV6 */
424	if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
425	    n_proto == htons(ETH_P_IPV6))
426		ip_proto = fk.basic.ip_proto;
427	else
428		return -EPROTONOSUPPORT;
429
430	/* Support only TCP and UDP */
431	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
432		return -EPROTONOSUPPORT;
433
434	/* only support 4-tuple filters for aRFS */
435	if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
436		return -EOPNOTSUPP;
437
438	/* choose the aRFS list bucket based on skb hash */
439	idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
440	/* search for entry in the bucket */
441	spin_lock_bh(&vsi->arfs_lock);
442	hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
443			     list_entry) {
444		struct ice_fdir_fltr *fltr_info;
445
446		/* keep searching for the already existing arfs_entry flow */
447		if (arfs_entry->flow_id != flow_id)
448			continue;
449
450		fltr_info = &arfs_entry->fltr_info;
451		ret = fltr_info->fltr_id;
452
453		if (fltr_info->q_index == rxq_idx ||
454		    arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
455			goto out;
456
457		/* update the queue to forward to on an already existing flow */
458		fltr_info->q_index = rxq_idx;
459		arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
460		ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
461		goto out_schedule_service_task;
462	}
463
464	arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
465	if (!arfs_entry) {
466		ret = -ENOMEM;
467		goto out;
468	}
469
470	ret = arfs_entry->fltr_info.fltr_id;
471	INIT_HLIST_NODE(&arfs_entry->list_entry);
472	hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
473out_schedule_service_task:
474	ice_service_task_schedule(pf);
475out:
476	spin_unlock_bh(&vsi->arfs_lock);
477	return ret;
478}
479
480/**
481 * ice_init_arfs_cntrs - initialize aRFS counter values
482 * @vsi: VSI that aRFS counters need to be initialized on
483 */
484static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
485{
486	if (!vsi || vsi->type != ICE_VSI_PF)
487		return -EINVAL;
488
489	vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
490				       GFP_KERNEL);
491	if (!vsi->arfs_fltr_cntrs)
492		return -ENOMEM;
493
494	vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
495					 GFP_KERNEL);
496	if (!vsi->arfs_last_fltr_id) {
497		kfree(vsi->arfs_fltr_cntrs);
498		vsi->arfs_fltr_cntrs = NULL;
499		return -ENOMEM;
500	}
501
502	return 0;
503}
504
505/**
506 * ice_init_arfs - initialize aRFS resources
507 * @vsi: the VSI to be forwarded to
508 */
509void ice_init_arfs(struct ice_vsi *vsi)
510{
511	struct hlist_head *arfs_fltr_list;
512	unsigned int i;
513
514	if (!vsi || vsi->type != ICE_VSI_PF)
515		return;
516
517	arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
518				 GFP_KERNEL);
519	if (!arfs_fltr_list)
520		return;
521
522	if (ice_init_arfs_cntrs(vsi))
523		goto free_arfs_fltr_list;
524
525	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
526		INIT_HLIST_HEAD(&arfs_fltr_list[i]);
527
528	spin_lock_init(&vsi->arfs_lock);
529
530	vsi->arfs_fltr_list = arfs_fltr_list;
531
532	return;
533
534free_arfs_fltr_list:
535	kfree(arfs_fltr_list);
536}
537
538/**
539 * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
540 * @vsi: the VSI to be forwarded to
541 */
542void ice_clear_arfs(struct ice_vsi *vsi)
543{
544	struct device *dev;
545	unsigned int i;
546
547	if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
548	    !vsi->arfs_fltr_list)
549		return;
550
551	dev = ice_pf_to_dev(vsi->back);
552	for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
553		struct ice_arfs_entry *r;
554		struct hlist_node *n;
555
556		spin_lock_bh(&vsi->arfs_lock);
557		hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
558					  list_entry) {
559			hlist_del(&r->list_entry);
560			devm_kfree(dev, r);
561		}
562		spin_unlock_bh(&vsi->arfs_lock);
563	}
564
565	kfree(vsi->arfs_fltr_list);
566	vsi->arfs_fltr_list = NULL;
567	kfree(vsi->arfs_last_fltr_id);
568	vsi->arfs_last_fltr_id = NULL;
569	kfree(vsi->arfs_fltr_cntrs);
570	vsi->arfs_fltr_cntrs = NULL;
571}
572
573/**
574 * ice_free_cpu_rx_rmap - free setup CPU reverse map
575 * @vsi: the VSI to be forwarded to
576 */
577void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
578{
579	struct net_device *netdev;
580
581	if (!vsi || vsi->type != ICE_VSI_PF)
582		return;
583
584	netdev = vsi->netdev;
585	if (!netdev || !netdev->rx_cpu_rmap)
586		return;
587
588	free_irq_cpu_rmap(netdev->rx_cpu_rmap);
589	netdev->rx_cpu_rmap = NULL;
590}
591
592/**
593 * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
594 * @vsi: the VSI to be forwarded to
595 */
596int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
597{
598	struct net_device *netdev;
599	struct ice_pf *pf;
600	int i;
601
602	if (!vsi || vsi->type != ICE_VSI_PF)
603		return 0;
604
605	pf = vsi->back;
606	netdev = vsi->netdev;
607	if (!pf || !netdev || !vsi->num_q_vectors)
608		return -EINVAL;
609
610	netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
611		   vsi->type, netdev->name, vsi->num_q_vectors);
612
613	netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
614	if (unlikely(!netdev->rx_cpu_rmap))
615		return -EINVAL;
616
617	ice_for_each_q_vector(vsi, i)
618		if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
619				     vsi->q_vectors[i]->irq.virq)) {
620			ice_free_cpu_rx_rmap(vsi);
621			return -EINVAL;
622		}
623
624	return 0;
625}
626
627/**
628 * ice_remove_arfs - remove/clear all aRFS resources
629 * @pf: device private structure
630 */
631void ice_remove_arfs(struct ice_pf *pf)
632{
633	struct ice_vsi *pf_vsi;
634
635	pf_vsi = ice_get_main_vsi(pf);
636	if (!pf_vsi)
637		return;
638
639	ice_clear_arfs(pf_vsi);
640}
641
642/**
643 * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
644 * @pf: device private structure
645 */
646void ice_rebuild_arfs(struct ice_pf *pf)
647{
648	struct ice_vsi *pf_vsi;
649
650	pf_vsi = ice_get_main_vsi(pf);
651	if (!pf_vsi)
652		return;
653
654	ice_remove_arfs(pf);
655	ice_init_arfs(pf_vsi);
656}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2018-2020, Intel Corporation. */
  3
  4#include "ice.h"
 
  5
  6/**
  7 * ice_is_arfs_active - helper to check is aRFS is active
  8 * @vsi: VSI to check
  9 */
 10static bool ice_is_arfs_active(struct ice_vsi *vsi)
 11{
 12	return !!vsi->arfs_fltr_list;
 13}
 14
 15/**
 16 * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
 17 * @hw: pointer to the HW structure
 18 * @flow_type: flow type as Flow Director understands it
 19 *
 20 * Flow Director will query this function to see if aRFS is currently using
 21 * the specified flow_type for perfect (4-tuple) filters.
 22 */
 23bool
 24ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
 25{
 26	struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
 27	struct ice_pf *pf = hw->back;
 28	struct ice_vsi *vsi;
 29
 30	vsi = ice_get_main_vsi(pf);
 31	if (!vsi)
 32		return false;
 33
 34	arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
 35
 36	/* active counters can be updated by multiple CPUs */
 37	smp_mb__before_atomic();
 38	switch (flow_type) {
 39	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 40		return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
 41	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 42		return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
 43	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 44		return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
 45	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 46		return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
 47	default:
 48		return false;
 49	}
 50}
 51
 52/**
 53 * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
 54 * @vsi: VSI that aRFS is active on
 55 * @entry: aRFS entry used to change counters
 56 * @add: true to increment counter, false to decrement
 57 */
 58static void
 59ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
 60				  struct ice_arfs_entry *entry, bool add)
 61{
 62	struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
 63
 64	switch (entry->fltr_info.flow_type) {
 65	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 66		if (add)
 67			atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
 68		else
 69			atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
 70		break;
 71	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 72		if (add)
 73			atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
 74		else
 75			atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
 76		break;
 77	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 78		if (add)
 79			atomic_inc(&fltr_cntrs->active_udpv4_cnt);
 80		else
 81			atomic_dec(&fltr_cntrs->active_udpv4_cnt);
 82		break;
 83	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 84		if (add)
 85			atomic_inc(&fltr_cntrs->active_udpv6_cnt);
 86		else
 87			atomic_dec(&fltr_cntrs->active_udpv6_cnt);
 88		break;
 89	default:
 90		dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
 91			entry->fltr_info.flow_type);
 92	}
 93}
 94
 95/**
 96 * ice_arfs_del_flow_rules - delete the rules passed in from HW
 97 * @vsi: VSI for the flow rules that need to be deleted
 98 * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
 99 *
100 * Loop through the delete list passed in and remove the rules from HW. After
101 * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
102 * longer being referenced by the aRFS hash table.
103 */
104static void
105ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
106{
107	struct ice_arfs_entry *e;
108	struct hlist_node *n;
109	struct device *dev;
110
111	dev = ice_pf_to_dev(vsi->back);
112
113	hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
114		int result;
115
116		result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
117					     false);
118		if (!result)
119			ice_arfs_update_active_fltr_cntrs(vsi, e, false);
120		else
121			dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
122				result, e->fltr_state, e->fltr_info.fltr_id,
123				e->flow_id, e->fltr_info.q_index);
124
125		/* The aRFS hash table is no longer referencing this entry */
126		hlist_del(&e->list_entry);
127		devm_kfree(dev, e);
128	}
129}
130
131/**
132 * ice_arfs_add_flow_rules - add the rules passed in from HW
133 * @vsi: VSI for the flow rules that need to be added
134 * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
135 *
136 * Loop through the add list passed in and remove the rules from HW. After each
137 * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
138 * the ice_arfs_entry(s) because they are still being referenced in the aRFS
139 * hash table.
140 */
141static void
142ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
143{
144	struct ice_arfs_entry_ptr *ep;
145	struct hlist_node *n;
146	struct device *dev;
147
148	dev = ice_pf_to_dev(vsi->back);
149
150	hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
151		int result;
152
153		result = ice_fdir_write_fltr(vsi->back,
154					     &ep->arfs_entry->fltr_info, true,
155					     false);
156		if (!result)
157			ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
158							  true);
159		else
160			dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
161				result, ep->arfs_entry->fltr_state,
162				ep->arfs_entry->fltr_info.fltr_id,
163				ep->arfs_entry->flow_id,
164				ep->arfs_entry->fltr_info.q_index);
165
166		hlist_del(&ep->list_entry);
167		devm_kfree(dev, ep);
168	}
169}
170
171/**
172 * ice_arfs_is_flow_expired - check if the aRFS entry has expired
173 * @vsi: VSI containing the aRFS entry
174 * @arfs_entry: aRFS entry that's being checked for expiration
175 *
176 * Return true if the flow has expired, else false. This function should be used
177 * to determine whether or not an aRFS entry should be removed from the hardware
178 * and software structures.
179 */
180static bool
181ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
182{
183#define ICE_ARFS_TIME_DELTA_EXPIRATION	msecs_to_jiffies(5000)
184	if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
185				arfs_entry->flow_id,
186				arfs_entry->fltr_info.fltr_id))
187		return true;
188
189	/* expiration timer only used for UDP filters */
190	if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
191	    arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
192		return false;
193
194	return time_in_range64(arfs_entry->time_activated +
195			       ICE_ARFS_TIME_DELTA_EXPIRATION,
196			       arfs_entry->time_activated, get_jiffies_64());
197}
198
199/**
200 * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
201 * @vsi: the VSI to be forwarded to
202 * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
203 * @add_list: list to populate with filters to be added to Flow Director
204 * @del_list: list to populate with filters to be deleted from Flow Director
205 *
206 * Iterate over the hlist at the index given in the aRFS hash table and
207 * determine if there are any aRFS entries that need to be either added or
208 * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
209 * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
210 * the flow has expired delete the filter from HW. The caller of this function
211 * is expected to add/delete rules on the add_list/del_list respectively.
212 */
213static void
214ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
215			   struct hlist_head *add_list,
216			   struct hlist_head *del_list)
217{
218	struct ice_arfs_entry *e;
219	struct hlist_node *n;
220	struct device *dev;
221
222	dev = ice_pf_to_dev(vsi->back);
223
224	/* go through the aRFS hlist at this idx and check for needed updates */
225	hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
226		/* check if filter needs to be added to HW */
227		if (e->fltr_state == ICE_ARFS_INACTIVE) {
228			enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
229			struct ice_arfs_entry_ptr *ep =
230				devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
231
232			if (!ep)
233				continue;
234			INIT_HLIST_NODE(&ep->list_entry);
235			/* reference aRFS entry to add HW filter */
236			ep->arfs_entry = e;
237			hlist_add_head(&ep->list_entry, add_list);
238			e->fltr_state = ICE_ARFS_ACTIVE;
239			/* expiration timer only used for UDP flows */
240			if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
241			    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
242				e->time_activated = get_jiffies_64();
243		} else if (e->fltr_state == ICE_ARFS_ACTIVE) {
244			/* check if filter needs to be removed from HW */
245			if (ice_arfs_is_flow_expired(vsi, e)) {
246				/* remove aRFS entry from hash table for delete
247				 * and to prevent referencing it the next time
248				 * through this hlist index
249				 */
250				hlist_del(&e->list_entry);
251				e->fltr_state = ICE_ARFS_TODEL;
252				/* save reference to aRFS entry for delete */
253				hlist_add_head(&e->list_entry, del_list);
254			}
255		}
256}
257
258/**
259 * ice_sync_arfs_fltrs - update all aRFS filters
260 * @pf: board private structure
261 */
262void ice_sync_arfs_fltrs(struct ice_pf *pf)
263{
264	HLIST_HEAD(tmp_del_list);
265	HLIST_HEAD(tmp_add_list);
266	struct ice_vsi *pf_vsi;
267	unsigned int i;
268
269	pf_vsi = ice_get_main_vsi(pf);
270	if (!pf_vsi)
271		return;
272
273	if (!ice_is_arfs_active(pf_vsi))
274		return;
275
276	spin_lock_bh(&pf_vsi->arfs_lock);
277	/* Once we process aRFS for the PF VSI get out */
278	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
279		ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
280					   &tmp_del_list);
281	spin_unlock_bh(&pf_vsi->arfs_lock);
282
283	/* use list of ice_arfs_entry(s) for delete */
284	ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
285
286	/* use list of ice_arfs_entry_ptr(s) for add */
287	ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
288}
289
290/**
291 * ice_arfs_build_entry - builds an aRFS entry based on input
292 * @vsi: destination VSI for this flow
293 * @fk: flow dissector keys for creating the tuple
294 * @rxq_idx: Rx queue to steer this flow to
295 * @flow_id: passed down from the stack and saved for flow expiration
296 *
297 * returns an aRFS entry on success and NULL on failure
298 */
299static struct ice_arfs_entry *
300ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
301		     u16 rxq_idx, u32 flow_id)
302{
303	struct ice_arfs_entry *arfs_entry;
304	struct ice_fdir_fltr *fltr_info;
305	u8 ip_proto;
306
307	arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
308				  sizeof(*arfs_entry),
309				  GFP_ATOMIC | __GFP_NOWARN);
310	if (!arfs_entry)
311		return NULL;
312
313	fltr_info = &arfs_entry->fltr_info;
314	fltr_info->q_index = rxq_idx;
315	fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
316	fltr_info->dest_vsi = vsi->idx;
317	ip_proto = fk->basic.ip_proto;
318
319	if (fk->basic.n_proto == htons(ETH_P_IP)) {
320		fltr_info->ip.v4.proto = ip_proto;
321		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
322			ICE_FLTR_PTYPE_NONF_IPV4_TCP :
323			ICE_FLTR_PTYPE_NONF_IPV4_UDP;
324		fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
325		fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
326		fltr_info->ip.v4.src_port = fk->ports.src;
327		fltr_info->ip.v4.dst_port = fk->ports.dst;
328	} else { /* ETH_P_IPV6 */
329		fltr_info->ip.v6.proto = ip_proto;
330		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
331			ICE_FLTR_PTYPE_NONF_IPV6_TCP :
332			ICE_FLTR_PTYPE_NONF_IPV6_UDP;
333		memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
334		       sizeof(struct in6_addr));
335		memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
336		       sizeof(struct in6_addr));
337		fltr_info->ip.v6.src_port = fk->ports.src;
338		fltr_info->ip.v6.dst_port = fk->ports.dst;
339	}
340
341	arfs_entry->flow_id = flow_id;
342	fltr_info->fltr_id =
343		atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
344
345	return arfs_entry;
346}
347
348/**
349 * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
350 * @hw: pointer to HW structure
351 * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
352 * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
353 *
354 * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
355 * to check if perfect (4-tuple) flow rules are currently in place by Flow
356 * Director.
357 */
358static bool
359ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
360{
361	unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
362
363	/* advanced Flow Director disabled, perfect filters always supported */
364	if (!perfect_fltr)
365		return true;
366
367	if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
368		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
369	else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
370		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
371	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
372		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
373	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
374		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
375
376	return false;
377}
378
379/**
380 * ice_rx_flow_steer - steer the Rx flow to where application is being run
381 * @netdev: ptr to the netdev being adjusted
382 * @skb: buffer with required header information
383 * @rxq_idx: queue to which the flow needs to move
384 * @flow_id: flow identifier provided by the netdev
385 *
386 * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
387 * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
388 * if the flow_id already exists in the hash table but the rxq_idx has changed
389 * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
390 * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
391 * If neither of the previous conditions are true then add a new entry in the
392 * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
393 * added to HW.
394 */
395int
396ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
397		  u16 rxq_idx, u32 flow_id)
398{
399	struct ice_netdev_priv *np = netdev_priv(netdev);
400	struct ice_arfs_entry *arfs_entry;
401	struct ice_vsi *vsi = np->vsi;
402	struct flow_keys fk;
403	struct ice_pf *pf;
404	__be16 n_proto;
405	u8 ip_proto;
406	u16 idx;
407	int ret;
408
409	/* failed to allocate memory for aRFS so don't crash */
410	if (unlikely(!vsi->arfs_fltr_list))
411		return -ENODEV;
412
413	pf = vsi->back;
414
415	if (skb->encapsulation)
416		return -EPROTONOSUPPORT;
417
418	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
419		return -EPROTONOSUPPORT;
420
421	n_proto = fk.basic.n_proto;
422	/* Support only IPV4 and IPV6 */
423	if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
424	    n_proto == htons(ETH_P_IPV6))
425		ip_proto = fk.basic.ip_proto;
426	else
427		return -EPROTONOSUPPORT;
428
429	/* Support only TCP and UDP */
430	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
431		return -EPROTONOSUPPORT;
432
433	/* only support 4-tuple filters for aRFS */
434	if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
435		return -EOPNOTSUPP;
436
437	/* choose the aRFS list bucket based on skb hash */
438	idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
439	/* search for entry in the bucket */
440	spin_lock_bh(&vsi->arfs_lock);
441	hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
442			     list_entry) {
443		struct ice_fdir_fltr *fltr_info;
444
445		/* keep searching for the already existing arfs_entry flow */
446		if (arfs_entry->flow_id != flow_id)
447			continue;
448
449		fltr_info = &arfs_entry->fltr_info;
450		ret = fltr_info->fltr_id;
451
452		if (fltr_info->q_index == rxq_idx ||
453		    arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
454			goto out;
455
456		/* update the queue to forward to on an already existing flow */
457		fltr_info->q_index = rxq_idx;
458		arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
459		ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
460		goto out_schedule_service_task;
461	}
462
463	arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
464	if (!arfs_entry) {
465		ret = -ENOMEM;
466		goto out;
467	}
468
469	ret = arfs_entry->fltr_info.fltr_id;
470	INIT_HLIST_NODE(&arfs_entry->list_entry);
471	hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
472out_schedule_service_task:
473	ice_service_task_schedule(pf);
474out:
475	spin_unlock_bh(&vsi->arfs_lock);
476	return ret;
477}
478
479/**
480 * ice_init_arfs_cntrs - initialize aRFS counter values
481 * @vsi: VSI that aRFS counters need to be initialized on
482 */
483static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
484{
485	if (!vsi || vsi->type != ICE_VSI_PF)
486		return -EINVAL;
487
488	vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
489				       GFP_KERNEL);
490	if (!vsi->arfs_fltr_cntrs)
491		return -ENOMEM;
492
493	vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
494					 GFP_KERNEL);
495	if (!vsi->arfs_last_fltr_id) {
496		kfree(vsi->arfs_fltr_cntrs);
497		vsi->arfs_fltr_cntrs = NULL;
498		return -ENOMEM;
499	}
500
501	return 0;
502}
503
504/**
505 * ice_init_arfs - initialize aRFS resources
506 * @vsi: the VSI to be forwarded to
507 */
508void ice_init_arfs(struct ice_vsi *vsi)
509{
510	struct hlist_head *arfs_fltr_list;
511	unsigned int i;
512
513	if (!vsi || vsi->type != ICE_VSI_PF)
514		return;
515
516	arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
517				 GFP_KERNEL);
518	if (!arfs_fltr_list)
519		return;
520
521	if (ice_init_arfs_cntrs(vsi))
522		goto free_arfs_fltr_list;
523
524	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
525		INIT_HLIST_HEAD(&arfs_fltr_list[i]);
526
527	spin_lock_init(&vsi->arfs_lock);
528
529	vsi->arfs_fltr_list = arfs_fltr_list;
530
531	return;
532
533free_arfs_fltr_list:
534	kfree(arfs_fltr_list);
535}
536
537/**
538 * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
539 * @vsi: the VSI to be forwarded to
540 */
541void ice_clear_arfs(struct ice_vsi *vsi)
542{
543	struct device *dev;
544	unsigned int i;
545
546	if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
547	    !vsi->arfs_fltr_list)
548		return;
549
550	dev = ice_pf_to_dev(vsi->back);
551	for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
552		struct ice_arfs_entry *r;
553		struct hlist_node *n;
554
555		spin_lock_bh(&vsi->arfs_lock);
556		hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
557					  list_entry) {
558			hlist_del(&r->list_entry);
559			devm_kfree(dev, r);
560		}
561		spin_unlock_bh(&vsi->arfs_lock);
562	}
563
564	kfree(vsi->arfs_fltr_list);
565	vsi->arfs_fltr_list = NULL;
566	kfree(vsi->arfs_last_fltr_id);
567	vsi->arfs_last_fltr_id = NULL;
568	kfree(vsi->arfs_fltr_cntrs);
569	vsi->arfs_fltr_cntrs = NULL;
570}
571
572/**
573 * ice_free_cpu_rx_rmap - free setup CPU reverse map
574 * @vsi: the VSI to be forwarded to
575 */
576void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
577{
578	struct net_device *netdev;
579
580	if (!vsi || vsi->type != ICE_VSI_PF)
581		return;
582
583	netdev = vsi->netdev;
584	if (!netdev || !netdev->rx_cpu_rmap)
585		return;
586
587	free_irq_cpu_rmap(netdev->rx_cpu_rmap);
588	netdev->rx_cpu_rmap = NULL;
589}
590
591/**
592 * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
593 * @vsi: the VSI to be forwarded to
594 */
595int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
596{
597	struct net_device *netdev;
598	struct ice_pf *pf;
599	int i;
600
601	if (!vsi || vsi->type != ICE_VSI_PF)
602		return 0;
603
604	pf = vsi->back;
605	netdev = vsi->netdev;
606	if (!pf || !netdev || !vsi->num_q_vectors)
607		return -EINVAL;
608
609	netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
610		   vsi->type, netdev->name, vsi->num_q_vectors);
611
612	netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
613	if (unlikely(!netdev->rx_cpu_rmap))
614		return -EINVAL;
615
616	ice_for_each_q_vector(vsi, i)
617		if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
618				     vsi->q_vectors[i]->irq.virq)) {
619			ice_free_cpu_rx_rmap(vsi);
620			return -EINVAL;
621		}
622
623	return 0;
624}
625
626/**
627 * ice_remove_arfs - remove/clear all aRFS resources
628 * @pf: device private structure
629 */
630void ice_remove_arfs(struct ice_pf *pf)
631{
632	struct ice_vsi *pf_vsi;
633
634	pf_vsi = ice_get_main_vsi(pf);
635	if (!pf_vsi)
636		return;
637
638	ice_clear_arfs(pf_vsi);
639}
640
641/**
642 * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
643 * @pf: device private structure
644 */
645void ice_rebuild_arfs(struct ice_pf *pf)
646{
647	struct ice_vsi *pf_vsi;
648
649	pf_vsi = ice_get_main_vsi(pf);
650	if (!pf_vsi)
651		return;
652
653	ice_remove_arfs(pf);
654	ice_init_arfs(pf_vsi);
655}