Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DPAA2 Ethernet Switch flower support
4 *
5 * Copyright 2021 NXP
6 *
7 */
8
9#include "dpaa2-switch.h"
10
11static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 struct dpsw_acl_key *acl_key)
13{
14 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 struct flow_dissector *dissector = rule->match.dissector;
16 struct netlink_ext_ack *extack = cls->common.extack;
17 struct dpsw_acl_fields *acl_h, *acl_m;
18
19 if (dissector->used_keys &
20 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
21 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
22 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
24 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
25 BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
26 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 NL_SET_ERR_MSG_MOD(extack,
29 "Unsupported keys used");
30 return -EOPNOTSUPP;
31 }
32
33 acl_h = &acl_key->match;
34 acl_m = &acl_key->mask;
35
36 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37 struct flow_match_basic match;
38
39 flow_rule_match_basic(rule, &match);
40 acl_h->l3_protocol = match.key->ip_proto;
41 acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
42 acl_m->l3_protocol = match.mask->ip_proto;
43 acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
44 }
45
46 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
47 struct flow_match_eth_addrs match;
48
49 flow_rule_match_eth_addrs(rule, &match);
50 ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
51 ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
52 ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
53 ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
54 }
55
56 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
57 struct flow_match_vlan match;
58
59 flow_rule_match_vlan(rule, &match);
60 acl_h->l2_vlan_id = match.key->vlan_id;
61 acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
62 acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
63 match.key->vlan_dei;
64
65 acl_m->l2_vlan_id = match.mask->vlan_id;
66 acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
67 acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
68 match.mask->vlan_dei;
69 }
70
71 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
72 struct flow_match_ipv4_addrs match;
73
74 flow_rule_match_ipv4_addrs(rule, &match);
75 acl_h->l3_source_ip = be32_to_cpu(match.key->src);
76 acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
77 acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
78 acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
79 }
80
81 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
82 struct flow_match_ports match;
83
84 flow_rule_match_ports(rule, &match);
85 acl_h->l4_source_port = be16_to_cpu(match.key->src);
86 acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
87 acl_m->l4_source_port = be16_to_cpu(match.mask->src);
88 acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
89 }
90
91 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
92 struct flow_match_ip match;
93
94 flow_rule_match_ip(rule, &match);
95 if (match.mask->ttl != 0) {
96 NL_SET_ERR_MSG_MOD(extack,
97 "Matching on TTL not supported");
98 return -EOPNOTSUPP;
99 }
100
101 if ((match.mask->tos & 0x3) != 0) {
102 NL_SET_ERR_MSG_MOD(extack,
103 "Matching on ECN not supported, only DSCP");
104 return -EOPNOTSUPP;
105 }
106
107 acl_h->l3_dscp = match.key->tos >> 2;
108 acl_m->l3_dscp = match.mask->tos >> 2;
109 }
110
111 return 0;
112}
113
114int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
115 struct dpaa2_switch_acl_entry *entry)
116{
117 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
118 struct ethsw_core *ethsw = filter_block->ethsw;
119 struct dpsw_acl_key *acl_key = &entry->key;
120 struct device *dev = ethsw->dev;
121 u8 *cmd_buff;
122 int err;
123
124 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
125 if (!cmd_buff)
126 return -ENOMEM;
127
128 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
129
130 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
131 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
132 DMA_TO_DEVICE);
133 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
134 dev_err(dev, "DMA mapping failed\n");
135 kfree(cmd_buff);
136 return -EFAULT;
137 }
138
139 err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
140 filter_block->acl_id, acl_entry_cfg);
141
142 dma_unmap_single(dev, acl_entry_cfg->key_iova,
143 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
144 DMA_TO_DEVICE);
145 if (err) {
146 dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
147 kfree(cmd_buff);
148 return err;
149 }
150
151 kfree(cmd_buff);
152
153 return 0;
154}
155
156static int
157dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
158 struct dpaa2_switch_acl_entry *entry)
159{
160 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
161 struct dpsw_acl_key *acl_key = &entry->key;
162 struct ethsw_core *ethsw = block->ethsw;
163 struct device *dev = ethsw->dev;
164 u8 *cmd_buff;
165 int err;
166
167 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
168 if (!cmd_buff)
169 return -ENOMEM;
170
171 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
172
173 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
174 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
175 DMA_TO_DEVICE);
176 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
177 dev_err(dev, "DMA mapping failed\n");
178 kfree(cmd_buff);
179 return -EFAULT;
180 }
181
182 err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
183 block->acl_id, acl_entry_cfg);
184
185 dma_unmap_single(dev, acl_entry_cfg->key_iova,
186 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
187 if (err) {
188 dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
189 kfree(cmd_buff);
190 return err;
191 }
192
193 kfree(cmd_buff);
194
195 return 0;
196}
197
198static int
199dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
200 struct dpaa2_switch_acl_entry *entry)
201{
202 struct dpaa2_switch_acl_entry *tmp;
203 struct list_head *pos, *n;
204 int index = 0;
205
206 if (list_empty(&block->acl_entries)) {
207 list_add(&entry->list, &block->acl_entries);
208 return index;
209 }
210
211 list_for_each_safe(pos, n, &block->acl_entries) {
212 tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
213 if (entry->prio < tmp->prio)
214 break;
215 index++;
216 }
217 list_add(&entry->list, pos->prev);
218 return index;
219}
220
221static struct dpaa2_switch_acl_entry*
222dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
223 int index)
224{
225 struct dpaa2_switch_acl_entry *tmp;
226 int i = 0;
227
228 list_for_each_entry(tmp, &block->acl_entries, list) {
229 if (i == index)
230 return tmp;
231 ++i;
232 }
233
234 return NULL;
235}
236
237static int
238dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
239 struct dpaa2_switch_acl_entry *entry,
240 int precedence)
241{
242 int err;
243
244 err = dpaa2_switch_acl_entry_remove(block, entry);
245 if (err)
246 return err;
247
248 entry->cfg.precedence = precedence;
249 return dpaa2_switch_acl_entry_add(block, entry);
250}
251
252static int
253dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
254 struct dpaa2_switch_acl_entry *entry)
255{
256 struct dpaa2_switch_acl_entry *tmp;
257 int index, i, precedence, err;
258
259 /* Add the new ACL entry to the linked list and get its index */
260 index = dpaa2_switch_acl_entry_add_to_list(block, entry);
261
262 /* Move up in priority the ACL entries to make space
263 * for the new filter.
264 */
265 precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
266 for (i = 0; i < index; i++) {
267 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
268
269 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
270 precedence);
271 if (err)
272 return err;
273
274 precedence++;
275 }
276
277 /* Add the new entry to hardware */
278 entry->cfg.precedence = precedence;
279 err = dpaa2_switch_acl_entry_add(block, entry);
280 block->num_acl_rules++;
281
282 return err;
283}
284
285static struct dpaa2_switch_acl_entry *
286dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
287 unsigned long cookie)
288{
289 struct dpaa2_switch_acl_entry *tmp, *n;
290
291 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
292 if (tmp->cookie == cookie)
293 return tmp;
294 }
295 return NULL;
296}
297
298static int
299dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
300 struct dpaa2_switch_acl_entry *entry)
301{
302 struct dpaa2_switch_acl_entry *tmp, *n;
303 int index = 0;
304
305 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
306 if (tmp->cookie == entry->cookie)
307 return index;
308 index++;
309 }
310 return -ENOENT;
311}
312
313static struct dpaa2_switch_mirror_entry *
314dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
315 unsigned long cookie)
316{
317 struct dpaa2_switch_mirror_entry *tmp, *n;
318
319 list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
320 if (tmp->cookie == cookie)
321 return tmp;
322 }
323 return NULL;
324}
325
326static int
327dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
328 struct dpaa2_switch_acl_entry *entry)
329{
330 struct dpaa2_switch_acl_entry *tmp;
331 int index, i, precedence, err;
332
333 index = dpaa2_switch_acl_entry_get_index(block, entry);
334
335 /* Remove from hardware the ACL entry */
336 err = dpaa2_switch_acl_entry_remove(block, entry);
337 if (err)
338 return err;
339
340 block->num_acl_rules--;
341
342 /* Remove it from the list also */
343 list_del(&entry->list);
344
345 /* Move down in priority the entries over the deleted one */
346 precedence = entry->cfg.precedence;
347 for (i = index - 1; i >= 0; i--) {
348 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
349 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
350 precedence);
351 if (err)
352 return err;
353
354 precedence--;
355 }
356
357 kfree(entry);
358
359 return 0;
360}
361
362static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
363 struct flow_action_entry *cls_act,
364 struct dpsw_acl_result *dpsw_act,
365 struct netlink_ext_ack *extack)
366{
367 int err = 0;
368
369 switch (cls_act->id) {
370 case FLOW_ACTION_TRAP:
371 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
372 break;
373 case FLOW_ACTION_REDIRECT:
374 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
375 NL_SET_ERR_MSG_MOD(extack,
376 "Destination not a DPAA2 switch port");
377 return -EOPNOTSUPP;
378 }
379
380 dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
381 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
382 break;
383 case FLOW_ACTION_DROP:
384 dpsw_act->action = DPSW_ACL_ACTION_DROP;
385 break;
386 default:
387 NL_SET_ERR_MSG_MOD(extack,
388 "Action not supported");
389 err = -EOPNOTSUPP;
390 goto out;
391 }
392
393out:
394 return err;
395}
396
397static int
398dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
399 struct dpaa2_switch_mirror_entry *entry,
400 u16 to, struct netlink_ext_ack *extack)
401{
402 unsigned long block_ports = block->ports;
403 struct ethsw_core *ethsw = block->ethsw;
404 struct ethsw_port_priv *port_priv;
405 unsigned long ports_added = 0;
406 u16 vlan = entry->cfg.vlan_id;
407 bool mirror_port_enabled;
408 int err, port;
409
410 /* Setup the mirroring port */
411 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
412 if (!mirror_port_enabled) {
413 err = dpsw_set_reflection_if(ethsw->mc_io, 0,
414 ethsw->dpsw_handle, to);
415 if (err)
416 return err;
417 ethsw->mirror_port = to;
418 }
419
420 /* Setup the same egress mirroring configuration on all the switch
421 * ports that share the same filter block.
422 */
423 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
424 port_priv = ethsw->ports[port];
425
426 /* We cannot add a per VLAN mirroring rule if the VLAN in
427 * question is not installed on the switch port.
428 */
429 if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
430 !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
431 NL_SET_ERR_MSG(extack,
432 "VLAN must be installed on the switch port");
433 err = -EINVAL;
434 goto err_remove_filters;
435 }
436
437 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
438 ethsw->dpsw_handle,
439 port, &entry->cfg);
440 if (err)
441 goto err_remove_filters;
442
443 ports_added |= BIT(port);
444 }
445
446 list_add(&entry->list, &block->mirror_entries);
447
448 return 0;
449
450err_remove_filters:
451 for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
452 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
453 port, &entry->cfg);
454 }
455
456 if (!mirror_port_enabled)
457 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
458
459 return err;
460}
461
462static int
463dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
464 struct dpaa2_switch_mirror_entry *entry)
465{
466 struct dpsw_reflection_cfg *cfg = &entry->cfg;
467 unsigned long block_ports = block->ports;
468 struct ethsw_core *ethsw = block->ethsw;
469 int port;
470
471 /* Remove this mirroring configuration from all the ports belonging to
472 * the filter block.
473 */
474 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
475 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
476 port, cfg);
477
478 /* Also remove it from the list of mirror filters */
479 list_del(&entry->list);
480 kfree(entry);
481
482 /* If this was the last mirror filter, then unset the mirror port */
483 if (list_empty(&block->mirror_entries))
484 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
485
486 return 0;
487}
488
489static int
490dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
491 struct flow_cls_offload *cls)
492{
493 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
494 struct netlink_ext_ack *extack = cls->common.extack;
495 struct dpaa2_switch_acl_entry *acl_entry;
496 struct ethsw_core *ethsw = block->ethsw;
497 struct flow_action_entry *act;
498 int err;
499
500 if (dpaa2_switch_acl_tbl_is_full(block)) {
501 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
502 return -ENOMEM;
503 }
504
505 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
506 if (!acl_entry)
507 return -ENOMEM;
508
509 err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
510 if (err)
511 goto free_acl_entry;
512
513 act = &rule->action.entries[0];
514 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
515 &acl_entry->cfg.result, extack);
516 if (err)
517 goto free_acl_entry;
518
519 acl_entry->prio = cls->common.prio;
520 acl_entry->cookie = cls->cookie;
521
522 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
523 if (err)
524 goto free_acl_entry;
525
526 return 0;
527
528free_acl_entry:
529 kfree(acl_entry);
530
531 return err;
532}
533
534static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
535 u16 *vlan)
536{
537 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
538 struct flow_dissector *dissector = rule->match.dissector;
539 struct netlink_ext_ack *extack = cls->common.extack;
540 int ret = -EOPNOTSUPP;
541
542 if (dissector->used_keys &
543 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
544 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
545 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
546 NL_SET_ERR_MSG_MOD(extack,
547 "Mirroring is supported only per VLAN");
548 return -EOPNOTSUPP;
549 }
550
551 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
552 struct flow_match_vlan match;
553
554 flow_rule_match_vlan(rule, &match);
555
556 if (match.mask->vlan_priority != 0 ||
557 match.mask->vlan_dei != 0) {
558 NL_SET_ERR_MSG_MOD(extack,
559 "Only matching on VLAN ID supported");
560 return -EOPNOTSUPP;
561 }
562
563 if (match.mask->vlan_id != 0xFFF) {
564 NL_SET_ERR_MSG_MOD(extack,
565 "Masked matching not supported");
566 return -EOPNOTSUPP;
567 }
568
569 *vlan = (u16)match.key->vlan_id;
570 ret = 0;
571 }
572
573 return ret;
574}
575
576static int
577dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
578 struct flow_cls_offload *cls)
579{
580 struct netlink_ext_ack *extack = cls->common.extack;
581 struct dpaa2_switch_mirror_entry *mirror_entry;
582 struct ethsw_core *ethsw = block->ethsw;
583 struct dpaa2_switch_mirror_entry *tmp;
584 struct flow_action_entry *cls_act;
585 struct list_head *pos, *n;
586 bool mirror_port_enabled;
587 u16 if_id, vlan;
588 int err;
589
590 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
591 cls_act = &cls->rule->action.entries[0];
592
593 /* Offload rules only when the destination is a DPAA2 switch port */
594 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
595 NL_SET_ERR_MSG_MOD(extack,
596 "Destination not a DPAA2 switch port");
597 return -EOPNOTSUPP;
598 }
599 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
600
601 /* We have a single mirror port but can configure egress mirroring on
602 * all the other switch ports. We need to allow mirroring rules only
603 * when the destination port is the same.
604 */
605 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
606 NL_SET_ERR_MSG_MOD(extack,
607 "Multiple mirror ports not supported");
608 return -EBUSY;
609 }
610
611 /* Parse the key */
612 err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
613 if (err)
614 return err;
615
616 /* Make sure that we don't already have a mirror rule with the same
617 * configuration.
618 */
619 list_for_each_safe(pos, n, &block->mirror_entries) {
620 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
621
622 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
623 tmp->cfg.vlan_id == vlan) {
624 NL_SET_ERR_MSG_MOD(extack,
625 "VLAN mirror filter already installed");
626 return -EBUSY;
627 }
628 }
629
630 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
631 if (!mirror_entry)
632 return -ENOMEM;
633
634 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
635 mirror_entry->cfg.vlan_id = vlan;
636 mirror_entry->cookie = cls->cookie;
637
638 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
639 extack);
640}
641
642int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
643 struct flow_cls_offload *cls)
644{
645 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
646 struct netlink_ext_ack *extack = cls->common.extack;
647 struct flow_action_entry *act;
648
649 if (!flow_offload_has_one_action(&rule->action)) {
650 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
651 return -EOPNOTSUPP;
652 }
653
654 act = &rule->action.entries[0];
655 switch (act->id) {
656 case FLOW_ACTION_REDIRECT:
657 case FLOW_ACTION_TRAP:
658 case FLOW_ACTION_DROP:
659 return dpaa2_switch_cls_flower_replace_acl(block, cls);
660 case FLOW_ACTION_MIRRED:
661 return dpaa2_switch_cls_flower_replace_mirror(block, cls);
662 default:
663 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
664 return -EOPNOTSUPP;
665 }
666}
667
668int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
669 struct flow_cls_offload *cls)
670{
671 struct dpaa2_switch_mirror_entry *mirror_entry;
672 struct dpaa2_switch_acl_entry *acl_entry;
673
674 /* If this filter is a an ACL one, remove it */
675 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
676 cls->cookie);
677 if (acl_entry)
678 return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
679
680 /* If not, then it has to be a mirror */
681 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
682 cls->cookie);
683 if (mirror_entry)
684 return dpaa2_switch_block_remove_mirror(block,
685 mirror_entry);
686
687 return 0;
688}
689
690static int
691dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
692 struct tc_cls_matchall_offload *cls)
693{
694 struct netlink_ext_ack *extack = cls->common.extack;
695 struct ethsw_core *ethsw = block->ethsw;
696 struct dpaa2_switch_acl_entry *acl_entry;
697 struct flow_action_entry *act;
698 int err;
699
700 if (dpaa2_switch_acl_tbl_is_full(block)) {
701 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
702 return -ENOMEM;
703 }
704
705 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
706 if (!acl_entry)
707 return -ENOMEM;
708
709 act = &cls->rule->action.entries[0];
710 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
711 &acl_entry->cfg.result, extack);
712 if (err)
713 goto free_acl_entry;
714
715 acl_entry->prio = cls->common.prio;
716 acl_entry->cookie = cls->cookie;
717
718 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
719 if (err)
720 goto free_acl_entry;
721
722 return 0;
723
724free_acl_entry:
725 kfree(acl_entry);
726
727 return err;
728}
729
730static int
731dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
732 struct tc_cls_matchall_offload *cls)
733{
734 struct netlink_ext_ack *extack = cls->common.extack;
735 struct dpaa2_switch_mirror_entry *mirror_entry;
736 struct ethsw_core *ethsw = block->ethsw;
737 struct dpaa2_switch_mirror_entry *tmp;
738 struct flow_action_entry *cls_act;
739 struct list_head *pos, *n;
740 bool mirror_port_enabled;
741 u16 if_id;
742
743 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
744 cls_act = &cls->rule->action.entries[0];
745
746 /* Offload rules only when the destination is a DPAA2 switch port */
747 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
748 NL_SET_ERR_MSG_MOD(extack,
749 "Destination not a DPAA2 switch port");
750 return -EOPNOTSUPP;
751 }
752 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
753
754 /* We have a single mirror port but can configure egress mirroring on
755 * all the other switch ports. We need to allow mirroring rules only
756 * when the destination port is the same.
757 */
758 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
759 NL_SET_ERR_MSG_MOD(extack,
760 "Multiple mirror ports not supported");
761 return -EBUSY;
762 }
763
764 /* Make sure that we don't already have a mirror rule with the same
765 * configuration. One matchall rule per block is the maximum.
766 */
767 list_for_each_safe(pos, n, &block->mirror_entries) {
768 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
769
770 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
771 NL_SET_ERR_MSG_MOD(extack,
772 "Matchall mirror filter already installed");
773 return -EBUSY;
774 }
775 }
776
777 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
778 if (!mirror_entry)
779 return -ENOMEM;
780
781 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
782 mirror_entry->cookie = cls->cookie;
783
784 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
785 extack);
786}
787
788int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
789 struct tc_cls_matchall_offload *cls)
790{
791 struct netlink_ext_ack *extack = cls->common.extack;
792 struct flow_action_entry *act;
793
794 if (!flow_offload_has_one_action(&cls->rule->action)) {
795 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
796 return -EOPNOTSUPP;
797 }
798
799 act = &cls->rule->action.entries[0];
800 switch (act->id) {
801 case FLOW_ACTION_REDIRECT:
802 case FLOW_ACTION_TRAP:
803 case FLOW_ACTION_DROP:
804 return dpaa2_switch_cls_matchall_replace_acl(block, cls);
805 case FLOW_ACTION_MIRRED:
806 return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
807 default:
808 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
809 return -EOPNOTSUPP;
810 }
811}
812
813int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
814 struct ethsw_port_priv *port_priv)
815{
816 struct ethsw_core *ethsw = port_priv->ethsw_data;
817 struct dpaa2_switch_mirror_entry *tmp;
818 int err;
819
820 list_for_each_entry(tmp, &block->mirror_entries, list) {
821 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
822 ethsw->dpsw_handle,
823 port_priv->idx, &tmp->cfg);
824 if (err)
825 goto unwind_add;
826 }
827
828 return 0;
829
830unwind_add:
831 list_for_each_entry(tmp, &block->mirror_entries, list)
832 dpsw_if_remove_reflection(ethsw->mc_io, 0,
833 ethsw->dpsw_handle,
834 port_priv->idx, &tmp->cfg);
835
836 return err;
837}
838
839int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
840 struct ethsw_port_priv *port_priv)
841{
842 struct ethsw_core *ethsw = port_priv->ethsw_data;
843 struct dpaa2_switch_mirror_entry *tmp;
844 int err;
845
846 list_for_each_entry(tmp, &block->mirror_entries, list) {
847 err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
848 ethsw->dpsw_handle,
849 port_priv->idx, &tmp->cfg);
850 if (err)
851 goto unwind_remove;
852 }
853
854 return 0;
855
856unwind_remove:
857 list_for_each_entry(tmp, &block->mirror_entries, list)
858 dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
859 port_priv->idx, &tmp->cfg);
860
861 return err;
862}
863
864int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
865 struct tc_cls_matchall_offload *cls)
866{
867 struct dpaa2_switch_mirror_entry *mirror_entry;
868 struct dpaa2_switch_acl_entry *acl_entry;
869
870 /* If this filter is a an ACL one, remove it */
871 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
872 cls->cookie);
873 if (acl_entry)
874 return dpaa2_switch_acl_tbl_remove_entry(block,
875 acl_entry);
876
877 /* If not, then it has to be a mirror */
878 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
879 cls->cookie);
880 if (mirror_entry)
881 return dpaa2_switch_block_remove_mirror(block,
882 mirror_entry);
883
884 return 0;
885}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DPAA2 Ethernet Switch flower support
4 *
5 * Copyright 2021 NXP
6 *
7 */
8
9#include "dpaa2-switch.h"
10
11static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 struct dpsw_acl_key *acl_key)
13{
14 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 struct flow_dissector *dissector = rule->match.dissector;
16 struct netlink_ext_ack *extack = cls->common.extack;
17 struct dpsw_acl_fields *acl_h, *acl_m;
18
19 if (dissector->used_keys &
20 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
21 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
22 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
24 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
25 BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
26 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 NL_SET_ERR_MSG_MOD(extack,
29 "Unsupported keys used");
30 return -EOPNOTSUPP;
31 }
32
33 acl_h = &acl_key->match;
34 acl_m = &acl_key->mask;
35
36 if (flow_rule_match_has_control_flags(rule, extack))
37 return -EOPNOTSUPP;
38
39 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
40 struct flow_match_basic match;
41
42 flow_rule_match_basic(rule, &match);
43 acl_h->l3_protocol = match.key->ip_proto;
44 acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
45 acl_m->l3_protocol = match.mask->ip_proto;
46 acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
47 }
48
49 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
50 struct flow_match_eth_addrs match;
51
52 flow_rule_match_eth_addrs(rule, &match);
53 ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
54 ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
55 ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
56 ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
57 }
58
59 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
60 struct flow_match_vlan match;
61
62 flow_rule_match_vlan(rule, &match);
63 acl_h->l2_vlan_id = match.key->vlan_id;
64 acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
65 acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
66 match.key->vlan_dei;
67
68 acl_m->l2_vlan_id = match.mask->vlan_id;
69 acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
70 acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
71 match.mask->vlan_dei;
72 }
73
74 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
75 struct flow_match_ipv4_addrs match;
76
77 flow_rule_match_ipv4_addrs(rule, &match);
78 acl_h->l3_source_ip = be32_to_cpu(match.key->src);
79 acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
80 acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
81 acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
82 }
83
84 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
85 struct flow_match_ports match;
86
87 flow_rule_match_ports(rule, &match);
88 acl_h->l4_source_port = be16_to_cpu(match.key->src);
89 acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
90 acl_m->l4_source_port = be16_to_cpu(match.mask->src);
91 acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
92 }
93
94 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
95 struct flow_match_ip match;
96
97 flow_rule_match_ip(rule, &match);
98 if (match.mask->ttl != 0) {
99 NL_SET_ERR_MSG_MOD(extack,
100 "Matching on TTL not supported");
101 return -EOPNOTSUPP;
102 }
103
104 if ((match.mask->tos & 0x3) != 0) {
105 NL_SET_ERR_MSG_MOD(extack,
106 "Matching on ECN not supported, only DSCP");
107 return -EOPNOTSUPP;
108 }
109
110 acl_h->l3_dscp = match.key->tos >> 2;
111 acl_m->l3_dscp = match.mask->tos >> 2;
112 }
113
114 return 0;
115}
116
117int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
118 struct dpaa2_switch_acl_entry *entry)
119{
120 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
121 struct ethsw_core *ethsw = filter_block->ethsw;
122 struct dpsw_acl_key *acl_key = &entry->key;
123 struct device *dev = ethsw->dev;
124 u8 *cmd_buff;
125 int err;
126
127 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
128 if (!cmd_buff)
129 return -ENOMEM;
130
131 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
132
133 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
134 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
135 DMA_TO_DEVICE);
136 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
137 dev_err(dev, "DMA mapping failed\n");
138 kfree(cmd_buff);
139 return -EFAULT;
140 }
141
142 err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
143 filter_block->acl_id, acl_entry_cfg);
144
145 dma_unmap_single(dev, acl_entry_cfg->key_iova,
146 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
147 DMA_TO_DEVICE);
148 if (err) {
149 dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
150 kfree(cmd_buff);
151 return err;
152 }
153
154 kfree(cmd_buff);
155
156 return 0;
157}
158
159static int
160dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
161 struct dpaa2_switch_acl_entry *entry)
162{
163 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
164 struct dpsw_acl_key *acl_key = &entry->key;
165 struct ethsw_core *ethsw = block->ethsw;
166 struct device *dev = ethsw->dev;
167 u8 *cmd_buff;
168 int err;
169
170 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
171 if (!cmd_buff)
172 return -ENOMEM;
173
174 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
175
176 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
177 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
178 DMA_TO_DEVICE);
179 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
180 dev_err(dev, "DMA mapping failed\n");
181 kfree(cmd_buff);
182 return -EFAULT;
183 }
184
185 err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
186 block->acl_id, acl_entry_cfg);
187
188 dma_unmap_single(dev, acl_entry_cfg->key_iova,
189 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
190 if (err) {
191 dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
192 kfree(cmd_buff);
193 return err;
194 }
195
196 kfree(cmd_buff);
197
198 return 0;
199}
200
201static int
202dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
203 struct dpaa2_switch_acl_entry *entry)
204{
205 struct dpaa2_switch_acl_entry *tmp;
206 struct list_head *pos, *n;
207 int index = 0;
208
209 if (list_empty(&block->acl_entries)) {
210 list_add(&entry->list, &block->acl_entries);
211 return index;
212 }
213
214 list_for_each_safe(pos, n, &block->acl_entries) {
215 tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
216 if (entry->prio < tmp->prio)
217 break;
218 index++;
219 }
220 list_add(&entry->list, pos->prev);
221 return index;
222}
223
224static struct dpaa2_switch_acl_entry*
225dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
226 int index)
227{
228 struct dpaa2_switch_acl_entry *tmp;
229 int i = 0;
230
231 list_for_each_entry(tmp, &block->acl_entries, list) {
232 if (i == index)
233 return tmp;
234 ++i;
235 }
236
237 return NULL;
238}
239
240static int
241dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
242 struct dpaa2_switch_acl_entry *entry,
243 int precedence)
244{
245 int err;
246
247 err = dpaa2_switch_acl_entry_remove(block, entry);
248 if (err)
249 return err;
250
251 entry->cfg.precedence = precedence;
252 return dpaa2_switch_acl_entry_add(block, entry);
253}
254
255static int
256dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
257 struct dpaa2_switch_acl_entry *entry)
258{
259 struct dpaa2_switch_acl_entry *tmp;
260 int index, i, precedence, err;
261
262 /* Add the new ACL entry to the linked list and get its index */
263 index = dpaa2_switch_acl_entry_add_to_list(block, entry);
264
265 /* Move up in priority the ACL entries to make space
266 * for the new filter.
267 */
268 precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
269 for (i = 0; i < index; i++) {
270 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
271
272 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
273 precedence);
274 if (err)
275 return err;
276
277 precedence++;
278 }
279
280 /* Add the new entry to hardware */
281 entry->cfg.precedence = precedence;
282 err = dpaa2_switch_acl_entry_add(block, entry);
283 block->num_acl_rules++;
284
285 return err;
286}
287
288static struct dpaa2_switch_acl_entry *
289dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
290 unsigned long cookie)
291{
292 struct dpaa2_switch_acl_entry *tmp, *n;
293
294 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
295 if (tmp->cookie == cookie)
296 return tmp;
297 }
298 return NULL;
299}
300
301static int
302dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
303 struct dpaa2_switch_acl_entry *entry)
304{
305 struct dpaa2_switch_acl_entry *tmp, *n;
306 int index = 0;
307
308 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
309 if (tmp->cookie == entry->cookie)
310 return index;
311 index++;
312 }
313 return -ENOENT;
314}
315
316static struct dpaa2_switch_mirror_entry *
317dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
318 unsigned long cookie)
319{
320 struct dpaa2_switch_mirror_entry *tmp, *n;
321
322 list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
323 if (tmp->cookie == cookie)
324 return tmp;
325 }
326 return NULL;
327}
328
329static int
330dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
331 struct dpaa2_switch_acl_entry *entry)
332{
333 struct dpaa2_switch_acl_entry *tmp;
334 int index, i, precedence, err;
335
336 index = dpaa2_switch_acl_entry_get_index(block, entry);
337
338 /* Remove from hardware the ACL entry */
339 err = dpaa2_switch_acl_entry_remove(block, entry);
340 if (err)
341 return err;
342
343 block->num_acl_rules--;
344
345 /* Remove it from the list also */
346 list_del(&entry->list);
347
348 /* Move down in priority the entries over the deleted one */
349 precedence = entry->cfg.precedence;
350 for (i = index - 1; i >= 0; i--) {
351 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
352 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
353 precedence);
354 if (err)
355 return err;
356
357 precedence--;
358 }
359
360 kfree(entry);
361
362 return 0;
363}
364
365static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
366 struct flow_action_entry *cls_act,
367 struct dpsw_acl_result *dpsw_act,
368 struct netlink_ext_ack *extack)
369{
370 int err = 0;
371
372 switch (cls_act->id) {
373 case FLOW_ACTION_TRAP:
374 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
375 break;
376 case FLOW_ACTION_REDIRECT:
377 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
378 NL_SET_ERR_MSG_MOD(extack,
379 "Destination not a DPAA2 switch port");
380 return -EOPNOTSUPP;
381 }
382
383 dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
384 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
385 break;
386 case FLOW_ACTION_DROP:
387 dpsw_act->action = DPSW_ACL_ACTION_DROP;
388 break;
389 default:
390 NL_SET_ERR_MSG_MOD(extack,
391 "Action not supported");
392 err = -EOPNOTSUPP;
393 goto out;
394 }
395
396out:
397 return err;
398}
399
400static int
401dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
402 struct dpaa2_switch_mirror_entry *entry,
403 u16 to, struct netlink_ext_ack *extack)
404{
405 unsigned long block_ports = block->ports;
406 struct ethsw_core *ethsw = block->ethsw;
407 struct ethsw_port_priv *port_priv;
408 unsigned long ports_added = 0;
409 u16 vlan = entry->cfg.vlan_id;
410 bool mirror_port_enabled;
411 int err, port;
412
413 /* Setup the mirroring port */
414 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
415 if (!mirror_port_enabled) {
416 err = dpsw_set_reflection_if(ethsw->mc_io, 0,
417 ethsw->dpsw_handle, to);
418 if (err)
419 return err;
420 ethsw->mirror_port = to;
421 }
422
423 /* Setup the same egress mirroring configuration on all the switch
424 * ports that share the same filter block.
425 */
426 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
427 port_priv = ethsw->ports[port];
428
429 /* We cannot add a per VLAN mirroring rule if the VLAN in
430 * question is not installed on the switch port.
431 */
432 if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
433 !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
434 NL_SET_ERR_MSG(extack,
435 "VLAN must be installed on the switch port");
436 err = -EINVAL;
437 goto err_remove_filters;
438 }
439
440 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
441 ethsw->dpsw_handle,
442 port, &entry->cfg);
443 if (err)
444 goto err_remove_filters;
445
446 ports_added |= BIT(port);
447 }
448
449 list_add(&entry->list, &block->mirror_entries);
450
451 return 0;
452
453err_remove_filters:
454 for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
455 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
456 port, &entry->cfg);
457 }
458
459 if (!mirror_port_enabled)
460 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
461
462 return err;
463}
464
465static int
466dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
467 struct dpaa2_switch_mirror_entry *entry)
468{
469 struct dpsw_reflection_cfg *cfg = &entry->cfg;
470 unsigned long block_ports = block->ports;
471 struct ethsw_core *ethsw = block->ethsw;
472 int port;
473
474 /* Remove this mirroring configuration from all the ports belonging to
475 * the filter block.
476 */
477 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
478 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
479 port, cfg);
480
481 /* Also remove it from the list of mirror filters */
482 list_del(&entry->list);
483 kfree(entry);
484
485 /* If this was the last mirror filter, then unset the mirror port */
486 if (list_empty(&block->mirror_entries))
487 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
488
489 return 0;
490}
491
492static int
493dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
494 struct flow_cls_offload *cls)
495{
496 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
497 struct netlink_ext_ack *extack = cls->common.extack;
498 struct dpaa2_switch_acl_entry *acl_entry;
499 struct ethsw_core *ethsw = block->ethsw;
500 struct flow_action_entry *act;
501 int err;
502
503 if (dpaa2_switch_acl_tbl_is_full(block)) {
504 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
505 return -ENOMEM;
506 }
507
508 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
509 if (!acl_entry)
510 return -ENOMEM;
511
512 err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
513 if (err)
514 goto free_acl_entry;
515
516 act = &rule->action.entries[0];
517 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
518 &acl_entry->cfg.result, extack);
519 if (err)
520 goto free_acl_entry;
521
522 acl_entry->prio = cls->common.prio;
523 acl_entry->cookie = cls->cookie;
524
525 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
526 if (err)
527 goto free_acl_entry;
528
529 return 0;
530
531free_acl_entry:
532 kfree(acl_entry);
533
534 return err;
535}
536
537static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
538 u16 *vlan)
539{
540 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
541 struct flow_dissector *dissector = rule->match.dissector;
542 struct netlink_ext_ack *extack = cls->common.extack;
543 int ret = -EOPNOTSUPP;
544
545 if (dissector->used_keys &
546 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
547 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
548 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
549 NL_SET_ERR_MSG_MOD(extack,
550 "Mirroring is supported only per VLAN");
551 return -EOPNOTSUPP;
552 }
553
554 if (flow_rule_match_has_control_flags(rule, extack))
555 return -EOPNOTSUPP;
556
557 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
558 struct flow_match_vlan match;
559
560 flow_rule_match_vlan(rule, &match);
561
562 if (match.mask->vlan_priority != 0 ||
563 match.mask->vlan_dei != 0) {
564 NL_SET_ERR_MSG_MOD(extack,
565 "Only matching on VLAN ID supported");
566 return -EOPNOTSUPP;
567 }
568
569 if (match.mask->vlan_id != 0xFFF) {
570 NL_SET_ERR_MSG_MOD(extack,
571 "Masked matching not supported");
572 return -EOPNOTSUPP;
573 }
574
575 *vlan = (u16)match.key->vlan_id;
576 ret = 0;
577 }
578
579 return ret;
580}
581
582static int
583dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
584 struct flow_cls_offload *cls)
585{
586 struct netlink_ext_ack *extack = cls->common.extack;
587 struct dpaa2_switch_mirror_entry *mirror_entry;
588 struct ethsw_core *ethsw = block->ethsw;
589 struct dpaa2_switch_mirror_entry *tmp;
590 struct flow_action_entry *cls_act;
591 struct list_head *pos, *n;
592 bool mirror_port_enabled;
593 u16 if_id, vlan;
594 int err;
595
596 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
597 cls_act = &cls->rule->action.entries[0];
598
599 /* Offload rules only when the destination is a DPAA2 switch port */
600 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
601 NL_SET_ERR_MSG_MOD(extack,
602 "Destination not a DPAA2 switch port");
603 return -EOPNOTSUPP;
604 }
605 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
606
607 /* We have a single mirror port but can configure egress mirroring on
608 * all the other switch ports. We need to allow mirroring rules only
609 * when the destination port is the same.
610 */
611 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
612 NL_SET_ERR_MSG_MOD(extack,
613 "Multiple mirror ports not supported");
614 return -EBUSY;
615 }
616
617 /* Parse the key */
618 err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
619 if (err)
620 return err;
621
622 /* Make sure that we don't already have a mirror rule with the same
623 * configuration.
624 */
625 list_for_each_safe(pos, n, &block->mirror_entries) {
626 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
627
628 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
629 tmp->cfg.vlan_id == vlan) {
630 NL_SET_ERR_MSG_MOD(extack,
631 "VLAN mirror filter already installed");
632 return -EBUSY;
633 }
634 }
635
636 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
637 if (!mirror_entry)
638 return -ENOMEM;
639
640 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
641 mirror_entry->cfg.vlan_id = vlan;
642 mirror_entry->cookie = cls->cookie;
643
644 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
645 extack);
646}
647
648int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
649 struct flow_cls_offload *cls)
650{
651 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
652 struct netlink_ext_ack *extack = cls->common.extack;
653 struct flow_action_entry *act;
654
655 if (!flow_offload_has_one_action(&rule->action)) {
656 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
657 return -EOPNOTSUPP;
658 }
659
660 act = &rule->action.entries[0];
661 switch (act->id) {
662 case FLOW_ACTION_REDIRECT:
663 case FLOW_ACTION_TRAP:
664 case FLOW_ACTION_DROP:
665 return dpaa2_switch_cls_flower_replace_acl(block, cls);
666 case FLOW_ACTION_MIRRED:
667 return dpaa2_switch_cls_flower_replace_mirror(block, cls);
668 default:
669 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
670 return -EOPNOTSUPP;
671 }
672}
673
674int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
675 struct flow_cls_offload *cls)
676{
677 struct dpaa2_switch_mirror_entry *mirror_entry;
678 struct dpaa2_switch_acl_entry *acl_entry;
679
680 /* If this filter is a an ACL one, remove it */
681 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
682 cls->cookie);
683 if (acl_entry)
684 return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
685
686 /* If not, then it has to be a mirror */
687 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
688 cls->cookie);
689 if (mirror_entry)
690 return dpaa2_switch_block_remove_mirror(block,
691 mirror_entry);
692
693 return 0;
694}
695
696static int
697dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
698 struct tc_cls_matchall_offload *cls)
699{
700 struct netlink_ext_ack *extack = cls->common.extack;
701 struct ethsw_core *ethsw = block->ethsw;
702 struct dpaa2_switch_acl_entry *acl_entry;
703 struct flow_action_entry *act;
704 int err;
705
706 if (dpaa2_switch_acl_tbl_is_full(block)) {
707 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
708 return -ENOMEM;
709 }
710
711 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
712 if (!acl_entry)
713 return -ENOMEM;
714
715 act = &cls->rule->action.entries[0];
716 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
717 &acl_entry->cfg.result, extack);
718 if (err)
719 goto free_acl_entry;
720
721 acl_entry->prio = cls->common.prio;
722 acl_entry->cookie = cls->cookie;
723
724 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
725 if (err)
726 goto free_acl_entry;
727
728 return 0;
729
730free_acl_entry:
731 kfree(acl_entry);
732
733 return err;
734}
735
736static int
737dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
738 struct tc_cls_matchall_offload *cls)
739{
740 struct netlink_ext_ack *extack = cls->common.extack;
741 struct dpaa2_switch_mirror_entry *mirror_entry;
742 struct ethsw_core *ethsw = block->ethsw;
743 struct dpaa2_switch_mirror_entry *tmp;
744 struct flow_action_entry *cls_act;
745 struct list_head *pos, *n;
746 bool mirror_port_enabled;
747 u16 if_id;
748
749 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
750 cls_act = &cls->rule->action.entries[0];
751
752 /* Offload rules only when the destination is a DPAA2 switch port */
753 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
754 NL_SET_ERR_MSG_MOD(extack,
755 "Destination not a DPAA2 switch port");
756 return -EOPNOTSUPP;
757 }
758 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
759
760 /* We have a single mirror port but can configure egress mirroring on
761 * all the other switch ports. We need to allow mirroring rules only
762 * when the destination port is the same.
763 */
764 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
765 NL_SET_ERR_MSG_MOD(extack,
766 "Multiple mirror ports not supported");
767 return -EBUSY;
768 }
769
770 /* Make sure that we don't already have a mirror rule with the same
771 * configuration. One matchall rule per block is the maximum.
772 */
773 list_for_each_safe(pos, n, &block->mirror_entries) {
774 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
775
776 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
777 NL_SET_ERR_MSG_MOD(extack,
778 "Matchall mirror filter already installed");
779 return -EBUSY;
780 }
781 }
782
783 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
784 if (!mirror_entry)
785 return -ENOMEM;
786
787 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
788 mirror_entry->cookie = cls->cookie;
789
790 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
791 extack);
792}
793
794int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
795 struct tc_cls_matchall_offload *cls)
796{
797 struct netlink_ext_ack *extack = cls->common.extack;
798 struct flow_action_entry *act;
799
800 if (!flow_offload_has_one_action(&cls->rule->action)) {
801 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
802 return -EOPNOTSUPP;
803 }
804
805 act = &cls->rule->action.entries[0];
806 switch (act->id) {
807 case FLOW_ACTION_REDIRECT:
808 case FLOW_ACTION_TRAP:
809 case FLOW_ACTION_DROP:
810 return dpaa2_switch_cls_matchall_replace_acl(block, cls);
811 case FLOW_ACTION_MIRRED:
812 return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
813 default:
814 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
815 return -EOPNOTSUPP;
816 }
817}
818
819int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
820 struct ethsw_port_priv *port_priv)
821{
822 struct ethsw_core *ethsw = port_priv->ethsw_data;
823 struct dpaa2_switch_mirror_entry *tmp;
824 int err;
825
826 list_for_each_entry(tmp, &block->mirror_entries, list) {
827 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
828 ethsw->dpsw_handle,
829 port_priv->idx, &tmp->cfg);
830 if (err)
831 goto unwind_add;
832 }
833
834 return 0;
835
836unwind_add:
837 list_for_each_entry(tmp, &block->mirror_entries, list)
838 dpsw_if_remove_reflection(ethsw->mc_io, 0,
839 ethsw->dpsw_handle,
840 port_priv->idx, &tmp->cfg);
841
842 return err;
843}
844
845int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
846 struct ethsw_port_priv *port_priv)
847{
848 struct ethsw_core *ethsw = port_priv->ethsw_data;
849 struct dpaa2_switch_mirror_entry *tmp;
850 int err;
851
852 list_for_each_entry(tmp, &block->mirror_entries, list) {
853 err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
854 ethsw->dpsw_handle,
855 port_priv->idx, &tmp->cfg);
856 if (err)
857 goto unwind_remove;
858 }
859
860 return 0;
861
862unwind_remove:
863 list_for_each_entry(tmp, &block->mirror_entries, list)
864 dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
865 port_priv->idx, &tmp->cfg);
866
867 return err;
868}
869
870int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
871 struct tc_cls_matchall_offload *cls)
872{
873 struct dpaa2_switch_mirror_entry *mirror_entry;
874 struct dpaa2_switch_acl_entry *acl_entry;
875
876 /* If this filter is a an ACL one, remove it */
877 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
878 cls->cookie);
879 if (acl_entry)
880 return dpaa2_switch_acl_tbl_remove_entry(block,
881 acl_entry);
882
883 /* If not, then it has to be a mirror */
884 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
885 cls->cookie);
886 if (mirror_entry)
887 return dpaa2_switch_block_remove_mirror(block,
888 mirror_entry);
889
890 return 0;
891}