Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2020 NXP
  3 */
  4#include "sja1105.h"
  5#include "sja1105_vl.h"
  6
  7struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
  8				       unsigned long cookie)
  9{
 10	struct sja1105_rule *rule;
 11
 12	list_for_each_entry(rule, &priv->flow_block.rules, list)
 13		if (rule->cookie == cookie)
 14			return rule;
 15
 16	return NULL;
 17}
 18
 19static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
 20{
 21	int i;
 22
 23	for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
 24		if (!priv->flow_block.l2_policer_used[i])
 25			return i;
 26
 27	return -1;
 28}
 29
 30static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
 31				       struct netlink_ext_ack *extack,
 32				       unsigned long cookie, int port,
 33				       u64 rate_bytes_per_sec,
 34				       u32 burst)
 35{
 36	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
 37	struct sja1105_l2_policing_entry *policing;
 38	struct dsa_switch *ds = priv->ds;
 39	bool new_rule = false;
 40	unsigned long p;
 41	int rc;
 42
 43	if (!rule) {
 44		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
 45		if (!rule)
 46			return -ENOMEM;
 47
 48		rule->cookie = cookie;
 49		rule->type = SJA1105_RULE_BCAST_POLICER;
 50		rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
 51		rule->key.type = SJA1105_KEY_BCAST;
 52		new_rule = true;
 53	}
 54
 55	if (rule->bcast_pol.sharindx == -1) {
 56		NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
 57		rc = -ENOSPC;
 58		goto out;
 59	}
 60
 61	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
 62
 63	if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
 64		NL_SET_ERR_MSG_MOD(extack,
 65				   "Port already has a broadcast policer");
 66		rc = -EEXIST;
 67		goto out;
 68	}
 69
 70	rule->port_mask |= BIT(port);
 71
 72	/* Make the broadcast policers of all ports attached to this block
 73	 * point to the newly allocated policer
 74	 */
 75	for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
 76		int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
 77
 78		policing[bcast].sharindx = rule->bcast_pol.sharindx;
 79	}
 80
 81	policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
 82							  512, 1000000);
 83	policing[rule->bcast_pol.sharindx].smax = burst;
 84
 85	/* TODO: support per-flow MTU */
 86	policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
 87						    ETH_FCS_LEN;
 88
 89	rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
 90
 91out:
 92	if (rc == 0 && new_rule) {
 93		priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
 94		list_add(&rule->list, &priv->flow_block.rules);
 95	} else if (new_rule) {
 96		kfree(rule);
 97	}
 98
 99	return rc;
100}
101
102static int sja1105_setup_tc_policer(struct sja1105_private *priv,
103				    struct netlink_ext_ack *extack,
104				    unsigned long cookie, int port, int tc,
105				    u64 rate_bytes_per_sec,
106				    u32 burst)
107{
108	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
109	struct sja1105_l2_policing_entry *policing;
110	bool new_rule = false;
111	unsigned long p;
112	int rc;
113
114	if (!rule) {
115		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
116		if (!rule)
117			return -ENOMEM;
118
119		rule->cookie = cookie;
120		rule->type = SJA1105_RULE_TC_POLICER;
121		rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
122		rule->key.type = SJA1105_KEY_TC;
123		rule->key.tc.pcp = tc;
124		new_rule = true;
125	}
126
127	if (rule->tc_pol.sharindx == -1) {
128		NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
129		rc = -ENOSPC;
130		goto out;
131	}
132
133	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
134
135	if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
136		NL_SET_ERR_MSG_MOD(extack,
137				   "Port-TC pair already has an L2 policer");
138		rc = -EEXIST;
139		goto out;
140	}
141
142	rule->port_mask |= BIT(port);
143
144	/* Make the policers for traffic class @tc of all ports attached to
145	 * this block point to the newly allocated policer
146	 */
147	for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
148		int index = (p * SJA1105_NUM_TC) + tc;
149
150		policing[index].sharindx = rule->tc_pol.sharindx;
151	}
152
153	policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
154						       512, 1000000);
155	policing[rule->tc_pol.sharindx].smax = burst;
156
157	/* TODO: support per-flow MTU */
158	policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
159						 ETH_FCS_LEN;
160
161	rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
162
163out:
164	if (rc == 0 && new_rule) {
165		priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
166		list_add(&rule->list, &priv->flow_block.rules);
167	} else if (new_rule) {
168		kfree(rule);
169	}
170
171	return rc;
172}
173
174static int sja1105_flower_policer(struct sja1105_private *priv, int port,
175				  struct netlink_ext_ack *extack,
176				  unsigned long cookie,
177				  struct sja1105_key *key,
178				  u64 rate_bytes_per_sec,
179				  u32 burst)
180{
181	switch (key->type) {
182	case SJA1105_KEY_BCAST:
183		return sja1105_setup_bcast_policer(priv, extack, cookie, port,
184						   rate_bytes_per_sec, burst);
185	case SJA1105_KEY_TC:
186		return sja1105_setup_tc_policer(priv, extack, cookie, port,
187						key->tc.pcp, rate_bytes_per_sec,
188						burst);
189	default:
190		NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
191		return -EOPNOTSUPP;
192	}
193}
194
195static int sja1105_flower_parse_key(struct sja1105_private *priv,
196				    struct netlink_ext_ack *extack,
197				    struct flow_cls_offload *cls,
198				    struct sja1105_key *key)
199{
200	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
201	struct flow_dissector *dissector = rule->match.dissector;
202	bool is_bcast_dmac = false;
203	u64 dmac = U64_MAX;
204	u16 vid = U16_MAX;
205	u16 pcp = U16_MAX;
206
207	if (dissector->used_keys &
208	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
209	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
210	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
211	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
212		NL_SET_ERR_MSG_MOD(extack,
213				   "Unsupported keys used");
214		return -EOPNOTSUPP;
215	}
216
217	if (flow_rule_match_has_control_flags(rule, extack))
218		return -EOPNOTSUPP;
219
220	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
221		struct flow_match_basic match;
222
223		flow_rule_match_basic(rule, &match);
224		if (match.key->n_proto) {
225			NL_SET_ERR_MSG_MOD(extack,
226					   "Matching on protocol not supported");
227			return -EOPNOTSUPP;
228		}
229	}
230
231	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
232		u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
233		u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
234		struct flow_match_eth_addrs match;
235
236		flow_rule_match_eth_addrs(rule, &match);
237
238		if (!ether_addr_equal_masked(match.key->src, null,
239					     match.mask->src)) {
240			NL_SET_ERR_MSG_MOD(extack,
241					   "Matching on source MAC not supported");
242			return -EOPNOTSUPP;
243		}
244
245		if (!ether_addr_equal(match.mask->dst, bcast)) {
246			NL_SET_ERR_MSG_MOD(extack,
247					   "Masked matching on MAC not supported");
248			return -EOPNOTSUPP;
249		}
250
251		dmac = ether_addr_to_u64(match.key->dst);
252		is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
253	}
254
255	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
256		struct flow_match_vlan match;
257
258		flow_rule_match_vlan(rule, &match);
259
260		if (match.mask->vlan_id &&
261		    match.mask->vlan_id != VLAN_VID_MASK) {
262			NL_SET_ERR_MSG_MOD(extack,
263					   "Masked matching on VID is not supported");
264			return -EOPNOTSUPP;
265		}
266
267		if (match.mask->vlan_priority &&
268		    match.mask->vlan_priority != 0x7) {
269			NL_SET_ERR_MSG_MOD(extack,
270					   "Masked matching on PCP is not supported");
271			return -EOPNOTSUPP;
272		}
273
274		if (match.mask->vlan_id)
275			vid = match.key->vlan_id;
276		if (match.mask->vlan_priority)
277			pcp = match.key->vlan_priority;
278	}
279
280	if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
281		key->type = SJA1105_KEY_BCAST;
282		return 0;
283	}
284	if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
285		key->type = SJA1105_KEY_TC;
286		key->tc.pcp = pcp;
287		return 0;
288	}
289	if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
290		key->type = SJA1105_KEY_VLAN_AWARE_VL;
291		key->vl.dmac = dmac;
292		key->vl.vid = vid;
293		key->vl.pcp = pcp;
294		return 0;
295	}
296	if (dmac != U64_MAX) {
297		key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
298		key->vl.dmac = dmac;
299		return 0;
300	}
301
302	NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
303	return -EOPNOTSUPP;
304}
305
306static int sja1105_policer_validate(const struct flow_action *action,
307				    const struct flow_action_entry *act,
308				    struct netlink_ext_ack *extack)
309{
310	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
311		NL_SET_ERR_MSG_MOD(extack,
312				   "Offload not supported when exceed action is not drop");
313		return -EOPNOTSUPP;
314	}
315
316	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
317	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
318		NL_SET_ERR_MSG_MOD(extack,
319				   "Offload not supported when conform action is not pipe or ok");
320		return -EOPNOTSUPP;
321	}
322
323	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
324	    !flow_action_is_last_entry(action, act)) {
325		NL_SET_ERR_MSG_MOD(extack,
326				   "Offload not supported when conform action is ok, but action is not last");
327		return -EOPNOTSUPP;
328	}
329
330	if (act->police.peakrate_bytes_ps ||
331	    act->police.avrate || act->police.overhead) {
332		NL_SET_ERR_MSG_MOD(extack,
333				   "Offload not supported when peakrate/avrate/overhead is configured");
334		return -EOPNOTSUPP;
335	}
336
337	if (act->police.rate_pkt_ps) {
338		NL_SET_ERR_MSG_MOD(extack,
339				   "QoS offload not support packets per second");
340		return -EOPNOTSUPP;
341	}
342
343	return 0;
344}
345
346int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
347			   struct flow_cls_offload *cls, bool ingress)
348{
349	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
350	struct netlink_ext_ack *extack = cls->common.extack;
351	struct sja1105_private *priv = ds->priv;
352	const struct flow_action_entry *act;
353	unsigned long cookie = cls->cookie;
354	bool routing_rule = false;
355	struct sja1105_key key;
356	bool gate_rule = false;
357	bool vl_rule = false;
358	int rc, i;
359
360	rc = sja1105_flower_parse_key(priv, extack, cls, &key);
361	if (rc)
362		return rc;
363
364	flow_action_for_each(i, act, &rule->action) {
365		switch (act->id) {
366		case FLOW_ACTION_POLICE:
367			rc = sja1105_policer_validate(&rule->action, act, extack);
368			if (rc)
369				goto out;
370
371			rc = sja1105_flower_policer(priv, port, extack, cookie,
372						    &key,
373						    act->police.rate_bytes_ps,
374						    act->police.burst);
375			if (rc)
376				goto out;
377			break;
378		case FLOW_ACTION_TRAP: {
379			int cpu = dsa_upstream_port(ds, port);
380
381			routing_rule = true;
382			vl_rule = true;
383
384			rc = sja1105_vl_redirect(priv, port, extack, cookie,
385						 &key, BIT(cpu), true);
386			if (rc)
387				goto out;
388			break;
389		}
390		case FLOW_ACTION_REDIRECT: {
391			struct dsa_port *to_dp;
392
393			to_dp = dsa_port_from_netdev(act->dev);
394			if (IS_ERR(to_dp)) {
395				NL_SET_ERR_MSG_MOD(extack,
396						   "Destination not a switch port");
397				return -EOPNOTSUPP;
398			}
399
400			routing_rule = true;
401			vl_rule = true;
402
403			rc = sja1105_vl_redirect(priv, port, extack, cookie,
404						 &key, BIT(to_dp->index), true);
405			if (rc)
406				goto out;
407			break;
408		}
409		case FLOW_ACTION_DROP:
410			vl_rule = true;
411
412			rc = sja1105_vl_redirect(priv, port, extack, cookie,
413						 &key, 0, false);
414			if (rc)
415				goto out;
416			break;
417		case FLOW_ACTION_GATE:
418			gate_rule = true;
419			vl_rule = true;
420
421			rc = sja1105_vl_gate(priv, port, extack, cookie,
422					     &key, act->hw_index,
423					     act->gate.prio,
424					     act->gate.basetime,
425					     act->gate.cycletime,
426					     act->gate.cycletimeext,
427					     act->gate.num_entries,
428					     act->gate.entries);
429			if (rc)
430				goto out;
431			break;
432		default:
433			NL_SET_ERR_MSG_MOD(extack,
434					   "Action not supported");
435			rc = -EOPNOTSUPP;
436			goto out;
437		}
438	}
439
440	if (vl_rule && !rc) {
441		/* Delay scheduling configuration until DESTPORTS has been
442		 * populated by all other actions.
443		 */
444		if (gate_rule) {
445			if (!routing_rule) {
446				NL_SET_ERR_MSG_MOD(extack,
447						   "Can only offload gate action together with redirect or trap");
448				return -EOPNOTSUPP;
449			}
450			rc = sja1105_init_scheduling(priv);
451			if (rc)
452				goto out;
453		}
454
455		rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
456	}
457
458out:
459	return rc;
460}
461
462int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
463			   struct flow_cls_offload *cls, bool ingress)
464{
465	struct sja1105_private *priv = ds->priv;
466	struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
467	struct sja1105_l2_policing_entry *policing;
468	int old_sharindx;
469
470	if (!rule)
471		return 0;
472
473	if (rule->type == SJA1105_RULE_VL)
474		return sja1105_vl_delete(priv, port, rule, cls->common.extack);
475
476	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
477
478	if (rule->type == SJA1105_RULE_BCAST_POLICER) {
479		int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
480
481		old_sharindx = policing[bcast].sharindx;
482		policing[bcast].sharindx = port;
483	} else if (rule->type == SJA1105_RULE_TC_POLICER) {
484		int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
485
486		old_sharindx = policing[index].sharindx;
487		policing[index].sharindx = port;
488	} else {
489		return -EINVAL;
490	}
491
492	rule->port_mask &= ~BIT(port);
493	if (!rule->port_mask) {
494		priv->flow_block.l2_policer_used[old_sharindx] = false;
495		list_del(&rule->list);
496		kfree(rule);
497	}
498
499	return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
500}
501
502int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
503			     struct flow_cls_offload *cls, bool ingress)
504{
505	struct sja1105_private *priv = ds->priv;
506	struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
507	int rc;
508
509	if (!rule)
510		return 0;
511
512	if (rule->type != SJA1105_RULE_VL)
513		return 0;
514
515	rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
516			      cls->common.extack);
517	if (rc)
518		return rc;
519
520	return 0;
521}
522
523void sja1105_flower_setup(struct dsa_switch *ds)
524{
525	struct sja1105_private *priv = ds->priv;
526	int port;
527
528	INIT_LIST_HEAD(&priv->flow_block.rules);
529
530	for (port = 0; port < ds->num_ports; port++)
531		priv->flow_block.l2_policer_used[port] = true;
532}
533
534void sja1105_flower_teardown(struct dsa_switch *ds)
535{
536	struct sja1105_private *priv = ds->priv;
537	struct sja1105_rule *rule;
538	struct list_head *pos, *n;
539
540	list_for_each_safe(pos, n, &priv->flow_block.rules) {
541		rule = list_entry(pos, struct sja1105_rule, list);
542		list_del(&rule->list);
543		kfree(rule);
544	}
545}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2020 NXP
  3 */
  4#include "sja1105.h"
  5#include "sja1105_vl.h"
  6
  7struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
  8				       unsigned long cookie)
  9{
 10	struct sja1105_rule *rule;
 11
 12	list_for_each_entry(rule, &priv->flow_block.rules, list)
 13		if (rule->cookie == cookie)
 14			return rule;
 15
 16	return NULL;
 17}
 18
 19static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
 20{
 21	int i;
 22
 23	for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
 24		if (!priv->flow_block.l2_policer_used[i])
 25			return i;
 26
 27	return -1;
 28}
 29
 30static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
 31				       struct netlink_ext_ack *extack,
 32				       unsigned long cookie, int port,
 33				       u64 rate_bytes_per_sec,
 34				       u32 burst)
 35{
 36	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
 37	struct sja1105_l2_policing_entry *policing;
 38	struct dsa_switch *ds = priv->ds;
 39	bool new_rule = false;
 40	unsigned long p;
 41	int rc;
 42
 43	if (!rule) {
 44		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
 45		if (!rule)
 46			return -ENOMEM;
 47
 48		rule->cookie = cookie;
 49		rule->type = SJA1105_RULE_BCAST_POLICER;
 50		rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
 51		rule->key.type = SJA1105_KEY_BCAST;
 52		new_rule = true;
 53	}
 54
 55	if (rule->bcast_pol.sharindx == -1) {
 56		NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
 57		rc = -ENOSPC;
 58		goto out;
 59	}
 60
 61	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
 62
 63	if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
 64		NL_SET_ERR_MSG_MOD(extack,
 65				   "Port already has a broadcast policer");
 66		rc = -EEXIST;
 67		goto out;
 68	}
 69
 70	rule->port_mask |= BIT(port);
 71
 72	/* Make the broadcast policers of all ports attached to this block
 73	 * point to the newly allocated policer
 74	 */
 75	for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
 76		int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
 77
 78		policing[bcast].sharindx = rule->bcast_pol.sharindx;
 79	}
 80
 81	policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
 82							  512, 1000000);
 83	policing[rule->bcast_pol.sharindx].smax = burst;
 84
 85	/* TODO: support per-flow MTU */
 86	policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
 87						    ETH_FCS_LEN;
 88
 89	rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
 90
 91out:
 92	if (rc == 0 && new_rule) {
 93		priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
 94		list_add(&rule->list, &priv->flow_block.rules);
 95	} else if (new_rule) {
 96		kfree(rule);
 97	}
 98
 99	return rc;
100}
101
102static int sja1105_setup_tc_policer(struct sja1105_private *priv,
103				    struct netlink_ext_ack *extack,
104				    unsigned long cookie, int port, int tc,
105				    u64 rate_bytes_per_sec,
106				    u32 burst)
107{
108	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
109	struct sja1105_l2_policing_entry *policing;
110	bool new_rule = false;
111	unsigned long p;
112	int rc;
113
114	if (!rule) {
115		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
116		if (!rule)
117			return -ENOMEM;
118
119		rule->cookie = cookie;
120		rule->type = SJA1105_RULE_TC_POLICER;
121		rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
122		rule->key.type = SJA1105_KEY_TC;
123		rule->key.tc.pcp = tc;
124		new_rule = true;
125	}
126
127	if (rule->tc_pol.sharindx == -1) {
128		NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
129		rc = -ENOSPC;
130		goto out;
131	}
132
133	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
134
135	if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
136		NL_SET_ERR_MSG_MOD(extack,
137				   "Port-TC pair already has an L2 policer");
138		rc = -EEXIST;
139		goto out;
140	}
141
142	rule->port_mask |= BIT(port);
143
144	/* Make the policers for traffic class @tc of all ports attached to
145	 * this block point to the newly allocated policer
146	 */
147	for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
148		int index = (p * SJA1105_NUM_TC) + tc;
149
150		policing[index].sharindx = rule->tc_pol.sharindx;
151	}
152
153	policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
154						       512, 1000000);
155	policing[rule->tc_pol.sharindx].smax = burst;
156
157	/* TODO: support per-flow MTU */
158	policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
159						 ETH_FCS_LEN;
160
161	rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
162
163out:
164	if (rc == 0 && new_rule) {
165		priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
166		list_add(&rule->list, &priv->flow_block.rules);
167	} else if (new_rule) {
168		kfree(rule);
169	}
170
171	return rc;
172}
173
174static int sja1105_flower_policer(struct sja1105_private *priv, int port,
175				  struct netlink_ext_ack *extack,
176				  unsigned long cookie,
177				  struct sja1105_key *key,
178				  u64 rate_bytes_per_sec,
179				  u32 burst)
180{
181	switch (key->type) {
182	case SJA1105_KEY_BCAST:
183		return sja1105_setup_bcast_policer(priv, extack, cookie, port,
184						   rate_bytes_per_sec, burst);
185	case SJA1105_KEY_TC:
186		return sja1105_setup_tc_policer(priv, extack, cookie, port,
187						key->tc.pcp, rate_bytes_per_sec,
188						burst);
189	default:
190		NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
191		return -EOPNOTSUPP;
192	}
193}
194
195static int sja1105_flower_parse_key(struct sja1105_private *priv,
196				    struct netlink_ext_ack *extack,
197				    struct flow_cls_offload *cls,
198				    struct sja1105_key *key)
199{
200	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
201	struct flow_dissector *dissector = rule->match.dissector;
202	bool is_bcast_dmac = false;
203	u64 dmac = U64_MAX;
204	u16 vid = U16_MAX;
205	u16 pcp = U16_MAX;
206
207	if (dissector->used_keys &
208	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
209	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
210	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
211	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
212		NL_SET_ERR_MSG_MOD(extack,
213				   "Unsupported keys used");
214		return -EOPNOTSUPP;
215	}
216
 
 
 
217	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
218		struct flow_match_basic match;
219
220		flow_rule_match_basic(rule, &match);
221		if (match.key->n_proto) {
222			NL_SET_ERR_MSG_MOD(extack,
223					   "Matching on protocol not supported");
224			return -EOPNOTSUPP;
225		}
226	}
227
228	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
229		u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
230		u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
231		struct flow_match_eth_addrs match;
232
233		flow_rule_match_eth_addrs(rule, &match);
234
235		if (!ether_addr_equal_masked(match.key->src, null,
236					     match.mask->src)) {
237			NL_SET_ERR_MSG_MOD(extack,
238					   "Matching on source MAC not supported");
239			return -EOPNOTSUPP;
240		}
241
242		if (!ether_addr_equal(match.mask->dst, bcast)) {
243			NL_SET_ERR_MSG_MOD(extack,
244					   "Masked matching on MAC not supported");
245			return -EOPNOTSUPP;
246		}
247
248		dmac = ether_addr_to_u64(match.key->dst);
249		is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
250	}
251
252	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
253		struct flow_match_vlan match;
254
255		flow_rule_match_vlan(rule, &match);
256
257		if (match.mask->vlan_id &&
258		    match.mask->vlan_id != VLAN_VID_MASK) {
259			NL_SET_ERR_MSG_MOD(extack,
260					   "Masked matching on VID is not supported");
261			return -EOPNOTSUPP;
262		}
263
264		if (match.mask->vlan_priority &&
265		    match.mask->vlan_priority != 0x7) {
266			NL_SET_ERR_MSG_MOD(extack,
267					   "Masked matching on PCP is not supported");
268			return -EOPNOTSUPP;
269		}
270
271		if (match.mask->vlan_id)
272			vid = match.key->vlan_id;
273		if (match.mask->vlan_priority)
274			pcp = match.key->vlan_priority;
275	}
276
277	if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
278		key->type = SJA1105_KEY_BCAST;
279		return 0;
280	}
281	if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
282		key->type = SJA1105_KEY_TC;
283		key->tc.pcp = pcp;
284		return 0;
285	}
286	if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
287		key->type = SJA1105_KEY_VLAN_AWARE_VL;
288		key->vl.dmac = dmac;
289		key->vl.vid = vid;
290		key->vl.pcp = pcp;
291		return 0;
292	}
293	if (dmac != U64_MAX) {
294		key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
295		key->vl.dmac = dmac;
296		return 0;
297	}
298
299	NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
300	return -EOPNOTSUPP;
301}
302
303static int sja1105_policer_validate(const struct flow_action *action,
304				    const struct flow_action_entry *act,
305				    struct netlink_ext_ack *extack)
306{
307	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
308		NL_SET_ERR_MSG_MOD(extack,
309				   "Offload not supported when exceed action is not drop");
310		return -EOPNOTSUPP;
311	}
312
313	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
314	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
315		NL_SET_ERR_MSG_MOD(extack,
316				   "Offload not supported when conform action is not pipe or ok");
317		return -EOPNOTSUPP;
318	}
319
320	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
321	    !flow_action_is_last_entry(action, act)) {
322		NL_SET_ERR_MSG_MOD(extack,
323				   "Offload not supported when conform action is ok, but action is not last");
324		return -EOPNOTSUPP;
325	}
326
327	if (act->police.peakrate_bytes_ps ||
328	    act->police.avrate || act->police.overhead) {
329		NL_SET_ERR_MSG_MOD(extack,
330				   "Offload not supported when peakrate/avrate/overhead is configured");
331		return -EOPNOTSUPP;
332	}
333
334	if (act->police.rate_pkt_ps) {
335		NL_SET_ERR_MSG_MOD(extack,
336				   "QoS offload not support packets per second");
337		return -EOPNOTSUPP;
338	}
339
340	return 0;
341}
342
343int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
344			   struct flow_cls_offload *cls, bool ingress)
345{
346	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
347	struct netlink_ext_ack *extack = cls->common.extack;
348	struct sja1105_private *priv = ds->priv;
349	const struct flow_action_entry *act;
350	unsigned long cookie = cls->cookie;
351	bool routing_rule = false;
352	struct sja1105_key key;
353	bool gate_rule = false;
354	bool vl_rule = false;
355	int rc, i;
356
357	rc = sja1105_flower_parse_key(priv, extack, cls, &key);
358	if (rc)
359		return rc;
360
361	flow_action_for_each(i, act, &rule->action) {
362		switch (act->id) {
363		case FLOW_ACTION_POLICE:
364			rc = sja1105_policer_validate(&rule->action, act, extack);
365			if (rc)
366				goto out;
367
368			rc = sja1105_flower_policer(priv, port, extack, cookie,
369						    &key,
370						    act->police.rate_bytes_ps,
371						    act->police.burst);
372			if (rc)
373				goto out;
374			break;
375		case FLOW_ACTION_TRAP: {
376			int cpu = dsa_upstream_port(ds, port);
377
378			routing_rule = true;
379			vl_rule = true;
380
381			rc = sja1105_vl_redirect(priv, port, extack, cookie,
382						 &key, BIT(cpu), true);
383			if (rc)
384				goto out;
385			break;
386		}
387		case FLOW_ACTION_REDIRECT: {
388			struct dsa_port *to_dp;
389
390			to_dp = dsa_port_from_netdev(act->dev);
391			if (IS_ERR(to_dp)) {
392				NL_SET_ERR_MSG_MOD(extack,
393						   "Destination not a switch port");
394				return -EOPNOTSUPP;
395			}
396
397			routing_rule = true;
398			vl_rule = true;
399
400			rc = sja1105_vl_redirect(priv, port, extack, cookie,
401						 &key, BIT(to_dp->index), true);
402			if (rc)
403				goto out;
404			break;
405		}
406		case FLOW_ACTION_DROP:
407			vl_rule = true;
408
409			rc = sja1105_vl_redirect(priv, port, extack, cookie,
410						 &key, 0, false);
411			if (rc)
412				goto out;
413			break;
414		case FLOW_ACTION_GATE:
415			gate_rule = true;
416			vl_rule = true;
417
418			rc = sja1105_vl_gate(priv, port, extack, cookie,
419					     &key, act->hw_index,
420					     act->gate.prio,
421					     act->gate.basetime,
422					     act->gate.cycletime,
423					     act->gate.cycletimeext,
424					     act->gate.num_entries,
425					     act->gate.entries);
426			if (rc)
427				goto out;
428			break;
429		default:
430			NL_SET_ERR_MSG_MOD(extack,
431					   "Action not supported");
432			rc = -EOPNOTSUPP;
433			goto out;
434		}
435	}
436
437	if (vl_rule && !rc) {
438		/* Delay scheduling configuration until DESTPORTS has been
439		 * populated by all other actions.
440		 */
441		if (gate_rule) {
442			if (!routing_rule) {
443				NL_SET_ERR_MSG_MOD(extack,
444						   "Can only offload gate action together with redirect or trap");
445				return -EOPNOTSUPP;
446			}
447			rc = sja1105_init_scheduling(priv);
448			if (rc)
449				goto out;
450		}
451
452		rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
453	}
454
455out:
456	return rc;
457}
458
459int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
460			   struct flow_cls_offload *cls, bool ingress)
461{
462	struct sja1105_private *priv = ds->priv;
463	struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
464	struct sja1105_l2_policing_entry *policing;
465	int old_sharindx;
466
467	if (!rule)
468		return 0;
469
470	if (rule->type == SJA1105_RULE_VL)
471		return sja1105_vl_delete(priv, port, rule, cls->common.extack);
472
473	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
474
475	if (rule->type == SJA1105_RULE_BCAST_POLICER) {
476		int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
477
478		old_sharindx = policing[bcast].sharindx;
479		policing[bcast].sharindx = port;
480	} else if (rule->type == SJA1105_RULE_TC_POLICER) {
481		int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
482
483		old_sharindx = policing[index].sharindx;
484		policing[index].sharindx = port;
485	} else {
486		return -EINVAL;
487	}
488
489	rule->port_mask &= ~BIT(port);
490	if (!rule->port_mask) {
491		priv->flow_block.l2_policer_used[old_sharindx] = false;
492		list_del(&rule->list);
493		kfree(rule);
494	}
495
496	return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
497}
498
499int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
500			     struct flow_cls_offload *cls, bool ingress)
501{
502	struct sja1105_private *priv = ds->priv;
503	struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
504	int rc;
505
506	if (!rule)
507		return 0;
508
509	if (rule->type != SJA1105_RULE_VL)
510		return 0;
511
512	rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
513			      cls->common.extack);
514	if (rc)
515		return rc;
516
517	return 0;
518}
519
520void sja1105_flower_setup(struct dsa_switch *ds)
521{
522	struct sja1105_private *priv = ds->priv;
523	int port;
524
525	INIT_LIST_HEAD(&priv->flow_block.rules);
526
527	for (port = 0; port < ds->num_ports; port++)
528		priv->flow_block.l2_policer_used[port] = true;
529}
530
531void sja1105_flower_teardown(struct dsa_switch *ds)
532{
533	struct sja1105_private *priv = ds->priv;
534	struct sja1105_rule *rule;
535	struct list_head *pos, *n;
536
537	list_for_each_safe(pos, n, &priv->flow_block.rules) {
538		rule = list_entry(pos, struct sja1105_rule, list);
539		list_del(&rule->list);
540		kfree(rule);
541	}
542}