Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/****************************************************************************
  3 * Driver for Solarflare network controllers and boards
  4 * Copyright 2019 Solarflare Communications Inc.
  5 * Copyright 2020-2022 Xilinx Inc.
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms of the GNU General Public License version 2 as published
  9 * by the Free Software Foundation, incorporated herein by reference.
 10 */
 11
 12#include <net/pkt_cls.h>
 13#include "tc.h"
 14#include "tc_bindings.h"
 15#include "mae.h"
 16#include "ef100_rep.h"
 17#include "efx.h"
 18
 19#define EFX_EFV_PF	NULL
 20/* Look up the representor information (efv) for a device.
 21 * May return NULL for the PF (us), or an error pointer for a device that
 22 * isn't supported as a TC offload endpoint
 23 */
 24static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
 25						struct net_device *dev)
 26{
 27	struct efx_rep *efv;
 28
 29	if (!dev)
 30		return ERR_PTR(-EOPNOTSUPP);
 31	/* Is it us (the PF)? */
 32	if (dev == efx->net_dev)
 33		return EFX_EFV_PF;
 34	/* Is it an efx vfrep at all? */
 35	if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
 36		return ERR_PTR(-EOPNOTSUPP);
 37	/* Is it ours?  We don't support TC rules that include another
 38	 * EF100's netdevices (not even on another port of the same NIC).
 39	 */
 40	efv = netdev_priv(dev);
 41	if (efv->parent != efx)
 42		return ERR_PTR(-EOPNOTSUPP);
 43	return efv;
 44}
 45
 46/* Convert a driver-internal vport ID into an external device (wire or VF) */
 47static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
 48{
 49	u32 mport;
 50
 51	if (IS_ERR(efv))
 52		return PTR_ERR(efv);
 53	if (!efv) /* device is PF (us) */
 54		efx_mae_mport_wire(efx, &mport);
 55	else /* device is repr */
 56		efx_mae_mport_mport(efx, efv->mport, &mport);
 57	return mport;
 58}
 59
 60static const struct rhashtable_params efx_tc_match_action_ht_params = {
 61	.key_len	= sizeof(unsigned long),
 62	.key_offset	= offsetof(struct efx_tc_flow_rule, cookie),
 63	.head_offset	= offsetof(struct efx_tc_flow_rule, linkage),
 64};
 65
 66static void efx_tc_free_action_set(struct efx_nic *efx,
 67				   struct efx_tc_action_set *act, bool in_hw)
 68{
 69	/* Failure paths calling this on the 'running action' set in_hw=false,
 70	 * because if the alloc had succeeded we'd've put it in acts.list and
 71	 * not still have it in act.
 72	 */
 73	if (in_hw) {
 74		efx_mae_free_action_set(efx, act->fw_id);
 75		/* in_hw is true iff we are on an acts.list; make sure to
 76		 * remove ourselves from that list before we are freed.
 77		 */
 78		list_del(&act->list);
 79	}
 80	if (act->count)
 81		efx_tc_flower_put_counter_index(efx, act->count);
 82	kfree(act);
 83}
 84
 85static void efx_tc_free_action_set_list(struct efx_nic *efx,
 86					struct efx_tc_action_set_list *acts,
 87					bool in_hw)
 88{
 89	struct efx_tc_action_set *act, *next;
 90
 91	/* Failure paths set in_hw=false, because usually the acts didn't get
 92	 * to efx_mae_alloc_action_set_list(); if they did, the failure tree
 93	 * has a separate efx_mae_free_action_set_list() before calling us.
 94	 */
 95	if (in_hw)
 96		efx_mae_free_action_set_list(efx, acts);
 97	/* Any act that's on the list will be in_hw even if the list isn't */
 98	list_for_each_entry_safe(act, next, &acts->list, list)
 99		efx_tc_free_action_set(efx, act, true);
100	/* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
101}
102
103static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
104{
105	efx_mae_delete_rule(efx, rule->fw_id);
106
107	/* Release entries in subsidiary tables */
108	efx_tc_free_action_set_list(efx, &rule->acts, true);
109	rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
110}
111
112static void efx_tc_flow_free(void *ptr, void *arg)
113{
114	struct efx_tc_flow_rule *rule = ptr;
115	struct efx_nic *efx = arg;
116
117	netif_err(efx, drv, efx->net_dev,
118		  "tc rule %lx still present at teardown, removing\n",
119		  rule->cookie);
120
121	efx_mae_delete_rule(efx, rule->fw_id);
122
123	/* Release entries in subsidiary tables */
124	efx_tc_free_action_set_list(efx, &rule->acts, true);
125
126	kfree(rule);
127}
128
129/* Boilerplate for the simple 'copy a field' cases */
130#define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field)	\
131if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) {		\
132	struct flow_match_##_type fm;					\
133									\
134	flow_rule_match_##_tcget(rule, &fm);				\
135	match->value._field = fm.key->_tcfield;				\
136	match->mask._field = fm.mask->_tcfield;				\
137}
138#define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field)	\
139	_MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field)
140#define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field)	\
141	_MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field)
142
143static int efx_tc_flower_parse_match(struct efx_nic *efx,
144				     struct flow_rule *rule,
145				     struct efx_tc_match *match,
146				     struct netlink_ext_ack *extack)
147{
148	struct flow_dissector *dissector = rule->match.dissector;
149	unsigned char ipv = 0;
150
151	/* Owing to internal TC infelicities, the IPV6_ADDRS key might be set
152	 * even on IPv4 filters; so rather than relying on dissector->used_keys
153	 * we check the addr_type in the CONTROL key.  If we don't find it (or
154	 * it's masked, which should never happen), we treat both IPV4_ADDRS
155	 * and IPV6_ADDRS as absent.
156	 */
157	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
158		struct flow_match_control fm;
159
160		flow_rule_match_control(rule, &fm);
161		if (IS_ALL_ONES(fm.mask->addr_type))
162			switch (fm.key->addr_type) {
163			case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
164				ipv = 4;
165				break;
166			case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
167				ipv = 6;
168				break;
169			default:
170				break;
171			}
172
173		if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) {
174			match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT;
175			match->mask.ip_frag = true;
176		}
177		if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) {
178			match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG;
179			match->mask.ip_firstfrag = true;
180		}
181		if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
182			NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
183					       fm.mask->flags);
184			return -EOPNOTSUPP;
185		}
186	}
187	if (dissector->used_keys &
188	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
189	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
190	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
191	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
192	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
193	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
194	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
195	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
196	      BIT(FLOW_DISSECTOR_KEY_TCP) |
197	      BIT(FLOW_DISSECTOR_KEY_IP))) {
198		NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x",
199				       dissector->used_keys);
200		return -EOPNOTSUPP;
201	}
202
203	MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto);
204	/* Make sure we're IP if any L3/L4 keys used. */
205	if (!IS_ALL_ONES(match->mask.eth_proto) ||
206	    !(match->value.eth_proto == htons(ETH_P_IP) ||
207	      match->value.eth_proto == htons(ETH_P_IPV6)))
208		if (dissector->used_keys &
209		    (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
210		     BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
211		     BIT(FLOW_DISSECTOR_KEY_PORTS) |
212		     BIT(FLOW_DISSECTOR_KEY_IP) |
213		     BIT(FLOW_DISSECTOR_KEY_TCP))) {
214			NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]",
215					       dissector->used_keys);
216			return -EINVAL;
217		}
218
219	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
220		struct flow_match_vlan fm;
221
222		flow_rule_match_vlan(rule, &fm);
223		if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) {
224			match->value.vlan_proto[0] = fm.key->vlan_tpid;
225			match->mask.vlan_proto[0] = fm.mask->vlan_tpid;
226			match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 |
227							       fm.key->vlan_id);
228			match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 |
229							      fm.mask->vlan_id);
230		}
231	}
232
233	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
234		struct flow_match_vlan fm;
235
236		flow_rule_match_cvlan(rule, &fm);
237		if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) {
238			match->value.vlan_proto[1] = fm.key->vlan_tpid;
239			match->mask.vlan_proto[1] = fm.mask->vlan_tpid;
240			match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 |
241							       fm.key->vlan_id);
242			match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 |
243							      fm.mask->vlan_id);
244		}
245	}
246
247	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
248		struct flow_match_eth_addrs fm;
249
250		flow_rule_match_eth_addrs(rule, &fm);
251		ether_addr_copy(match->value.eth_saddr, fm.key->src);
252		ether_addr_copy(match->value.eth_daddr, fm.key->dst);
253		ether_addr_copy(match->mask.eth_saddr, fm.mask->src);
254		ether_addr_copy(match->mask.eth_daddr, fm.mask->dst);
255	}
256
257	MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto);
258	/* Make sure we're TCP/UDP if any L4 keys used. */
259	if ((match->value.ip_proto != IPPROTO_UDP &&
260	     match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto))
261		if (dissector->used_keys &
262		    (BIT(FLOW_DISSECTOR_KEY_PORTS) |
263		     BIT(FLOW_DISSECTOR_KEY_TCP))) {
264			NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp",
265					       dissector->used_keys);
266			return -EINVAL;
267		}
268	MAP_KEY_AND_MASK(IP, ip, tos, ip_tos);
269	MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl);
270	if (ipv == 4) {
271		MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip);
272		MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip);
273	}
274#ifdef CONFIG_IPV6
275	else if (ipv == 6) {
276		MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6);
277		MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6);
278	}
279#endif
280	MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport);
281	MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport);
282	MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags);
283
284	return 0;
285}
286
287/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */
288enum efx_tc_action_order {
289	EFX_TC_AO_COUNT,
290	EFX_TC_AO_DELIVER
291};
292/* Determine whether we can add @new action without violating order */
293static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
294					  enum efx_tc_action_order new)
295{
296	switch (new) {
297	case EFX_TC_AO_COUNT:
298		if (act->count)
299			return false;
300		fallthrough;
301	case EFX_TC_AO_DELIVER:
302		return !act->deliver;
303	default:
304		/* Bad caller.  Whatever they wanted to do, say they can't. */
305		WARN_ON_ONCE(1);
306		return false;
307	}
308}
309
310static int efx_tc_flower_replace(struct efx_nic *efx,
311				 struct net_device *net_dev,
312				 struct flow_cls_offload *tc,
313				 struct efx_rep *efv)
314{
315	struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
316	struct netlink_ext_ack *extack = tc->common.extack;
317	struct efx_tc_flow_rule *rule = NULL, *old;
318	struct efx_tc_action_set *act = NULL;
319	const struct flow_action_entry *fa;
320	struct efx_rep *from_efv, *to_efv;
321	struct efx_tc_match match;
322	s64 rc;
323	int i;
324
325	if (!tc_can_offload_extack(efx->net_dev, extack))
326		return -EOPNOTSUPP;
327	if (WARN_ON(!efx->tc))
328		return -ENETDOWN;
329	if (WARN_ON(!efx->tc->up))
330		return -ENETDOWN;
331
332	from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
333	if (IS_ERR(from_efv)) {
334		/* Might be a tunnel decap rule from an indirect block.
335		 * Support for those not implemented yet.
336		 */
337		return -EOPNOTSUPP;
338	}
339
340	if (efv != from_efv) {
341		/* can't happen */
342		NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)",
343				       netdev_name(net_dev), efv ? "non-" : "",
344				       from_efv ? "non-" : "");
345		return -EINVAL;
346	}
347
348	/* Parse match */
349	memset(&match, 0, sizeof(match));
350	rc = efx_tc_flower_external_mport(efx, from_efv);
351	if (rc < 0) {
352		NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port");
353		return rc;
354	}
355	match.value.ingress_port = rc;
356	match.mask.ingress_port = ~0;
357	rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
358	if (rc)
359		return rc;
360
361	if (tc->common.chain_index) {
362		NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
363		return -EOPNOTSUPP;
364	}
365	match.mask.recirc_id = 0xff;
366
367	rc = efx_mae_match_check_caps(efx, &match.mask, extack);
368	if (rc)
369		return rc;
370
371	rule = kzalloc(sizeof(*rule), GFP_USER);
372	if (!rule)
373		return -ENOMEM;
374	INIT_LIST_HEAD(&rule->acts.list);
375	rule->cookie = tc->cookie;
376	old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
377						&rule->linkage,
378						efx_tc_match_action_ht_params);
379	if (old) {
380		netif_dbg(efx, drv, efx->net_dev,
381			  "Already offloaded rule (cookie %lx)\n", tc->cookie);
382		rc = -EEXIST;
383		NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
384		goto release;
385	}
386
387	/* Parse actions */
388	act = kzalloc(sizeof(*act), GFP_USER);
389	if (!act) {
390		rc = -ENOMEM;
391		goto release;
392	}
393
394	flow_action_for_each(i, fa, &fr->action) {
395		struct efx_tc_action_set save;
396
397		if (!act) {
398			/* more actions after a non-pipe action */
399			NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
400			rc = -EINVAL;
401			goto release;
402		}
403
404		if ((fa->id == FLOW_ACTION_REDIRECT ||
405		     fa->id == FLOW_ACTION_MIRRED ||
406		     fa->id == FLOW_ACTION_DROP) && fa->hw_stats) {
407			struct efx_tc_counter_index *ctr;
408
409			/* Currently the only actions that want stats are
410			 * mirred and gact (ok, shot, trap, goto-chain), which
411			 * means we want stats just before delivery.  Also,
412			 * note that tunnel_key set shouldn't change the length
413			 * — it's only the subsequent mirred that does that,
414			 * and the stats are taken _before_ the mirred action
415			 * happens.
416			 */
417			if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) {
418				/* All supported actions that count either steal
419				 * (gact shot, mirred redirect) or clone act
420				 * (mirred mirror), so we should never get two
421				 * count actions on one action_set.
422				 */
423				NL_SET_ERR_MSG_MOD(extack, "Count-action conflict (can't happen)");
424				rc = -EOPNOTSUPP;
425				goto release;
426			}
427
428			if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
429				NL_SET_ERR_MSG_FMT_MOD(extack, "hw_stats_type %u not supported (only 'delayed')",
430						       fa->hw_stats);
431				rc = -EOPNOTSUPP;
432				goto release;
433			}
434
435			ctr = efx_tc_flower_get_counter_index(efx, tc->cookie,
436							      EFX_TC_COUNTER_TYPE_AR);
437			if (IS_ERR(ctr)) {
438				rc = PTR_ERR(ctr);
439				NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
440				goto release;
441			}
442			act->count = ctr;
443		}
444
445		switch (fa->id) {
446		case FLOW_ACTION_DROP:
447			rc = efx_mae_alloc_action_set(efx, act);
448			if (rc) {
449				NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)");
450				goto release;
451			}
452			list_add_tail(&act->list, &rule->acts.list);
453			act = NULL; /* end of the line */
454			break;
455		case FLOW_ACTION_REDIRECT:
456		case FLOW_ACTION_MIRRED:
457			save = *act;
458
459			if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) {
460				/* can't happen */
461				rc = -EOPNOTSUPP;
462				NL_SET_ERR_MSG_MOD(extack, "Deliver action violates action order (can't happen)");
463				goto release;
464			}
465
466			to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
467			if (IS_ERR(to_efv)) {
468				NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch");
469				rc = PTR_ERR(to_efv);
470				goto release;
471			}
472			rc = efx_tc_flower_external_mport(efx, to_efv);
473			if (rc < 0) {
474				NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port");
475				goto release;
476			}
477			act->dest_mport = rc;
478			act->deliver = 1;
479			rc = efx_mae_alloc_action_set(efx, act);
480			if (rc) {
481				NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)");
482				goto release;
483			}
484			list_add_tail(&act->list, &rule->acts.list);
485			act = NULL;
486			if (fa->id == FLOW_ACTION_REDIRECT)
487				break; /* end of the line */
488			/* Mirror, so continue on with saved act */
489			save.count = NULL;
490			act = kzalloc(sizeof(*act), GFP_USER);
491			if (!act) {
492				rc = -ENOMEM;
493				goto release;
494			}
495			*act = save;
496			break;
497		default:
498			NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
499					       fa->id);
500			rc = -EOPNOTSUPP;
501			goto release;
502		}
503	}
504
505	if (act) {
506		/* Not shot/redirected, so deliver to default dest */
507		if (from_efv == EFX_EFV_PF)
508			/* Rule applies to traffic from the wire,
509			 * and default dest is thus the PF
510			 */
511			efx_mae_mport_uplink(efx, &act->dest_mport);
512		else
513			/* Representor, so rule applies to traffic from
514			 * representee, and default dest is thus the rep.
515			 * All reps use the same mport for delivery
516			 */
517			efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
518					    &act->dest_mport);
519		act->deliver = 1;
520		rc = efx_mae_alloc_action_set(efx, act);
521		if (rc) {
522			NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)");
523			goto release;
524		}
525		list_add_tail(&act->list, &rule->acts.list);
526		act = NULL; /* Prevent double-free in error path */
527	}
528
529	netif_dbg(efx, drv, efx->net_dev,
530		  "Successfully parsed filter (cookie %lx)\n",
531		  tc->cookie);
532
533	rule->match = match;
534
535	rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
536	if (rc) {
537		NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw");
538		goto release;
539	}
540	rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
541				 rule->acts.fw_id, &rule->fw_id);
542	if (rc) {
543		NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
544		goto release_acts;
545	}
546	return 0;
547
548release_acts:
549	efx_mae_free_action_set_list(efx, &rule->acts);
550release:
551	/* We failed to insert the rule, so free up any entries we created in
552	 * subsidiary tables.
553	 */
554	if (act)
555		efx_tc_free_action_set(efx, act, false);
556	if (rule) {
557		rhashtable_remove_fast(&efx->tc->match_action_ht,
558				       &rule->linkage,
559				       efx_tc_match_action_ht_params);
560		efx_tc_free_action_set_list(efx, &rule->acts, false);
561	}
562	kfree(rule);
563	return rc;
564}
565
566static int efx_tc_flower_destroy(struct efx_nic *efx,
567				 struct net_device *net_dev,
568				 struct flow_cls_offload *tc)
569{
570	struct netlink_ext_ack *extack = tc->common.extack;
571	struct efx_tc_flow_rule *rule;
572
573	rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
574				      efx_tc_match_action_ht_params);
575	if (!rule) {
576		/* Only log a message if we're the ingress device.  Otherwise
577		 * it's a foreign filter and we might just not have been
578		 * interested (e.g. we might not have been the egress device
579		 * either).
580		 */
581		if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
582			netif_warn(efx, drv, efx->net_dev,
583				   "Filter %lx not found to remove\n", tc->cookie);
584		NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
585		return -ENOENT;
586	}
587
588	/* Remove it from HW */
589	efx_tc_delete_rule(efx, rule);
590	/* Delete it from SW */
591	rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
592			       efx_tc_match_action_ht_params);
593	netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
594	kfree(rule);
595	return 0;
596}
597
598static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev,
599			       struct flow_cls_offload *tc)
600{
601	struct netlink_ext_ack *extack = tc->common.extack;
602	struct efx_tc_counter_index *ctr;
603	struct efx_tc_counter *cnt;
604	u64 packets, bytes;
605
606	ctr = efx_tc_flower_find_counter_index(efx, tc->cookie);
607	if (!ctr) {
608		/* See comment in efx_tc_flower_destroy() */
609		if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
610			if (net_ratelimit())
611				netif_warn(efx, drv, efx->net_dev,
612					   "Filter %lx not found for stats\n",
613					   tc->cookie);
614		NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
615		return -ENOENT;
616	}
617	if (WARN_ON(!ctr->cnt)) /* can't happen */
618		return -EIO;
619	cnt = ctr->cnt;
620
621	spin_lock_bh(&cnt->lock);
622	/* Report only new pkts/bytes since last time TC asked */
623	packets = cnt->packets;
624	bytes = cnt->bytes;
625	flow_stats_update(&tc->stats, bytes - cnt->old_bytes,
626			  packets - cnt->old_packets, 0, cnt->touched,
627			  FLOW_ACTION_HW_STATS_DELAYED);
628	cnt->old_packets = packets;
629	cnt->old_bytes = bytes;
630	spin_unlock_bh(&cnt->lock);
631	return 0;
632}
633
634int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
635		  struct flow_cls_offload *tc, struct efx_rep *efv)
636{
637	int rc;
638
639	if (!efx->tc)
640		return -EOPNOTSUPP;
641
642	mutex_lock(&efx->tc->mutex);
643	switch (tc->command) {
644	case FLOW_CLS_REPLACE:
645		rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
646		break;
647	case FLOW_CLS_DESTROY:
648		rc = efx_tc_flower_destroy(efx, net_dev, tc);
649		break;
650	case FLOW_CLS_STATS:
651		rc = efx_tc_flower_stats(efx, net_dev, tc);
652		break;
653	default:
654		rc = -EOPNOTSUPP;
655		break;
656	}
657	mutex_unlock(&efx->tc->mutex);
658	return rc;
659}
660
661static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
662					 u32 eg_port, struct efx_tc_flow_rule *rule)
663{
664	struct efx_tc_action_set_list *acts = &rule->acts;
665	struct efx_tc_match *match = &rule->match;
666	struct efx_tc_action_set *act;
667	int rc;
668
669	match->value.ingress_port = ing_port;
670	match->mask.ingress_port = ~0;
671	act = kzalloc(sizeof(*act), GFP_KERNEL);
672	if (!act)
673		return -ENOMEM;
674	act->deliver = 1;
675	act->dest_mport = eg_port;
676	rc = efx_mae_alloc_action_set(efx, act);
677	if (rc)
678		goto fail1;
679	EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
680	list_add_tail(&act->list, &acts->list);
681	rc = efx_mae_alloc_action_set_list(efx, acts);
682	if (rc)
683		goto fail2;
684	rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
685				 acts->fw_id, &rule->fw_id);
686	if (rc)
687		goto fail3;
688	return 0;
689fail3:
690	efx_mae_free_action_set_list(efx, acts);
691fail2:
692	list_del(&act->list);
693	efx_mae_free_action_set(efx, act->fw_id);
694fail1:
695	kfree(act);
696	return rc;
697}
698
699static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
700{
701	struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
702	u32 ing_port, eg_port;
703
704	efx_mae_mport_uplink(efx, &ing_port);
705	efx_mae_mport_wire(efx, &eg_port);
706	return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
707}
708
709static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
710{
711	struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
712	u32 ing_port, eg_port;
713
714	efx_mae_mport_wire(efx, &ing_port);
715	efx_mae_mport_uplink(efx, &eg_port);
716	return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
717}
718
719int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
720{
721	struct efx_tc_flow_rule *rule = &efv->dflt;
722	struct efx_nic *efx = efv->parent;
723	u32 ing_port, eg_port;
724
725	efx_mae_mport_mport(efx, efv->mport, &ing_port);
726	efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
727	return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
728}
729
730void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
731				     struct efx_tc_flow_rule *rule)
732{
733	if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
734		efx_tc_delete_rule(efx, rule);
735	rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
736}
737
738static int efx_tc_configure_rep_mport(struct efx_nic *efx)
739{
740	u32 rep_mport_label;
741	int rc;
742
743	rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
744	if (rc)
745		return rc;
746	pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
747		efx->tc->reps_mport_id, rep_mport_label);
748	/* Use mport *selector* as vport ID */
749	efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
750			    &efx->tc->reps_mport_vport_id);
751	return 0;
752}
753
754static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
755{
756	efx_mae_free_mport(efx, efx->tc->reps_mport_id);
757	efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
758}
759
760int efx_tc_insert_rep_filters(struct efx_nic *efx)
761{
762	struct efx_filter_spec promisc, allmulti;
763	int rc;
764
765	if (efx->type->is_vf)
766		return 0;
767	if (!efx->tc)
768		return 0;
769	efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
770	efx_filter_set_uc_def(&promisc);
771	efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
772	rc = efx_filter_insert_filter(efx, &promisc, false);
773	if (rc < 0)
774		return rc;
775	efx->tc->reps_filter_uc = rc;
776	efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
777	efx_filter_set_mc_def(&allmulti);
778	efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
779	rc = efx_filter_insert_filter(efx, &allmulti, false);
780	if (rc < 0)
781		return rc;
782	efx->tc->reps_filter_mc = rc;
783	return 0;
784}
785
786void efx_tc_remove_rep_filters(struct efx_nic *efx)
787{
788	if (efx->type->is_vf)
789		return;
790	if (!efx->tc)
791		return;
792	if (efx->tc->reps_filter_mc >= 0)
793		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
794	efx->tc->reps_filter_mc = -1;
795	if (efx->tc->reps_filter_uc >= 0)
796		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
797	efx->tc->reps_filter_uc = -1;
798}
799
800int efx_init_tc(struct efx_nic *efx)
801{
802	int rc;
803
804	rc = efx_mae_get_caps(efx, efx->tc->caps);
805	if (rc)
806		return rc;
807	if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
808		/* Firmware supports some match fields the driver doesn't know
809		 * about.  Not fatal, unless any of those fields are required
810		 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
811		 */
812		netif_warn(efx, probe, efx->net_dev,
813			   "FW reports additional match fields %u\n",
814			   efx->tc->caps->match_field_count);
815	if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
816		netif_err(efx, probe, efx->net_dev,
817			  "Too few action prios supported (have %u, need %u)\n",
818			  efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
819		return -EIO;
820	}
821	rc = efx_tc_configure_default_rule_pf(efx);
822	if (rc)
823		return rc;
824	rc = efx_tc_configure_default_rule_wire(efx);
825	if (rc)
826		return rc;
827	rc = efx_tc_configure_rep_mport(efx);
828	if (rc)
829		return rc;
830	efx->tc->up = true;
831	rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
832	if (rc)
833		return rc;
834	return 0;
835}
836
837void efx_fini_tc(struct efx_nic *efx)
838{
839	/* We can get called even if efx_init_struct_tc() failed */
840	if (!efx->tc)
841		return;
842	if (efx->tc->up)
843		flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
844	efx_tc_deconfigure_rep_mport(efx);
845	efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
846	efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
847	efx->tc->up = false;
848}
849
850int efx_init_struct_tc(struct efx_nic *efx)
851{
852	int rc;
853
854	if (efx->type->is_vf)
855		return 0;
856
857	efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
858	if (!efx->tc)
859		return -ENOMEM;
860	efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
861	if (!efx->tc->caps) {
862		rc = -ENOMEM;
863		goto fail_alloc_caps;
864	}
865	INIT_LIST_HEAD(&efx->tc->block_list);
866
867	mutex_init(&efx->tc->mutex);
868	init_waitqueue_head(&efx->tc->flush_wq);
869	rc = efx_tc_init_counters(efx);
870	if (rc < 0)
871		goto fail_counters;
872	rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
873	if (rc < 0)
874		goto fail_match_action_ht;
875	efx->tc->reps_filter_uc = -1;
876	efx->tc->reps_filter_mc = -1;
877	INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
878	efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
879	INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
880	efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
881	efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
882	return 0;
883fail_match_action_ht:
884	efx_tc_destroy_counters(efx);
885fail_counters:
886	mutex_destroy(&efx->tc->mutex);
887	kfree(efx->tc->caps);
888fail_alloc_caps:
889	kfree(efx->tc);
890	efx->tc = NULL;
891	return rc;
892}
893
894void efx_fini_struct_tc(struct efx_nic *efx)
895{
896	if (!efx->tc)
897		return;
898
899	mutex_lock(&efx->tc->mutex);
900	EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
901			     MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
902	EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
903			     MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
904	rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
905				    efx);
906	efx_tc_fini_counters(efx);
907	mutex_unlock(&efx->tc->mutex);
908	mutex_destroy(&efx->tc->mutex);
909	kfree(efx->tc->caps);
910	kfree(efx->tc);
911	efx->tc = NULL;
912}