Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
3
4#include "cxgb4.h"
5#include "cxgb4_tc_matchall.h"
6#include "sched.h"
7#include "cxgb4_uld.h"
8#include "cxgb4_filter.h"
9#include "cxgb4_tc_flower.h"
10
11static int cxgb4_policer_validate(const struct flow_action *action,
12 const struct flow_action_entry *act,
13 struct netlink_ext_ack *extack)
14{
15 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
16 NL_SET_ERR_MSG_MOD(extack,
17 "Offload not supported when exceed action is not drop");
18 return -EOPNOTSUPP;
19 }
20
21 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
22 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
23 NL_SET_ERR_MSG_MOD(extack,
24 "Offload not supported when conform action is not pipe or ok");
25 return -EOPNOTSUPP;
26 }
27
28 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
29 !flow_action_is_last_entry(action, act)) {
30 NL_SET_ERR_MSG_MOD(extack,
31 "Offload not supported when conform action is ok, but action is not last");
32 return -EOPNOTSUPP;
33 }
34
35 if (act->police.peakrate_bytes_ps ||
36 act->police.avrate || act->police.overhead) {
37 NL_SET_ERR_MSG_MOD(extack,
38 "Offload not supported when peakrate/avrate/overhead is configured");
39 return -EOPNOTSUPP;
40 }
41
42 if (act->police.rate_pkt_ps) {
43 NL_SET_ERR_MSG_MOD(extack,
44 "QoS offload not support packets per second");
45 return -EOPNOTSUPP;
46 }
47
48 return 0;
49}
50
51static int cxgb4_matchall_egress_validate(struct net_device *dev,
52 struct tc_cls_matchall_offload *cls)
53{
54 struct netlink_ext_ack *extack = cls->common.extack;
55 struct flow_action *actions = &cls->rule->action;
56 struct port_info *pi = netdev2pinfo(dev);
57 struct flow_action_entry *entry;
58 struct ch_sched_queue qe;
59 struct sched_class *e;
60 u64 max_link_rate;
61 u32 i, speed;
62 int ret;
63
64 if (!flow_action_has_entries(actions)) {
65 NL_SET_ERR_MSG_MOD(extack,
66 "Egress MATCHALL offload needs at least 1 policing action");
67 return -EINVAL;
68 } else if (!flow_offload_has_one_action(actions)) {
69 NL_SET_ERR_MSG_MOD(extack,
70 "Egress MATCHALL offload only supports 1 policing action");
71 return -EINVAL;
72 } else if (pi->tc_block_shared) {
73 NL_SET_ERR_MSG_MOD(extack,
74 "Egress MATCHALL offload not supported with shared blocks");
75 return -EINVAL;
76 }
77
78 ret = t4_get_link_params(pi, NULL, &speed, NULL);
79 if (ret) {
80 NL_SET_ERR_MSG_MOD(extack,
81 "Failed to get max speed supported by the link");
82 return -EINVAL;
83 }
84
85 /* Convert from Mbps to bps */
86 max_link_rate = (u64)speed * 1000 * 1000;
87
88 flow_action_for_each(i, entry, actions) {
89 switch (entry->id) {
90 case FLOW_ACTION_POLICE:
91 ret = cxgb4_policer_validate(actions, entry, extack);
92 if (ret)
93 return ret;
94
95 /* Convert bytes per second to bits per second */
96 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
97 NL_SET_ERR_MSG_MOD(extack,
98 "Specified policing max rate is larger than underlying link speed");
99 return -ERANGE;
100 }
101 break;
102 default:
103 NL_SET_ERR_MSG_MOD(extack,
104 "Only policing action supported with Egress MATCHALL offload");
105 return -EOPNOTSUPP;
106 }
107 }
108
109 for (i = 0; i < pi->nqsets; i++) {
110 memset(&qe, 0, sizeof(qe));
111 qe.queue = i;
112
113 e = cxgb4_sched_queue_lookup(dev, &qe);
114 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
115 NL_SET_ERR_MSG_MOD(extack,
116 "Some queues are already bound to different class");
117 return -EBUSY;
118 }
119 }
120
121 return 0;
122}
123
124static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
125{
126 struct port_info *pi = netdev2pinfo(dev);
127 struct ch_sched_queue qe;
128 int ret;
129 u32 i;
130
131 for (i = 0; i < pi->nqsets; i++) {
132 qe.queue = i;
133 qe.class = tc;
134 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
135 if (ret)
136 goto out_free;
137 }
138
139 return 0;
140
141out_free:
142 while (i--) {
143 qe.queue = i;
144 qe.class = SCHED_CLS_NONE;
145 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
146 }
147
148 return ret;
149}
150
151static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
152{
153 struct port_info *pi = netdev2pinfo(dev);
154 struct ch_sched_queue qe;
155 u32 i;
156
157 for (i = 0; i < pi->nqsets; i++) {
158 qe.queue = i;
159 qe.class = SCHED_CLS_NONE;
160 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
161 }
162}
163
164static int cxgb4_matchall_alloc_tc(struct net_device *dev,
165 struct tc_cls_matchall_offload *cls)
166{
167 struct ch_sched_params p = {
168 .type = SCHED_CLASS_TYPE_PACKET,
169 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
170 .u.params.mode = SCHED_CLASS_MODE_CLASS,
171 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
172 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
173 .u.params.class = SCHED_CLS_NONE,
174 .u.params.minrate = 0,
175 .u.params.weight = 0,
176 .u.params.pktsize = dev->mtu,
177 };
178 struct netlink_ext_ack *extack = cls->common.extack;
179 struct cxgb4_tc_port_matchall *tc_port_matchall;
180 struct port_info *pi = netdev2pinfo(dev);
181 struct adapter *adap = netdev2adap(dev);
182 struct flow_action_entry *entry;
183 struct sched_class *e;
184 int ret;
185 u32 i;
186
187 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
188
189 flow_action_for_each(i, entry, &cls->rule->action)
190 if (entry->id == FLOW_ACTION_POLICE)
191 break;
192
193 ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
194 if (ret)
195 return ret;
196
197 /* Convert from bytes per second to Kbps */
198 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
199 p.u.params.channel = pi->tx_chan;
200 e = cxgb4_sched_class_alloc(dev, &p);
201 if (!e) {
202 NL_SET_ERR_MSG_MOD(extack,
203 "No free traffic class available for policing action");
204 return -ENOMEM;
205 }
206
207 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
208 if (ret) {
209 NL_SET_ERR_MSG_MOD(extack,
210 "Could not bind queues to traffic class");
211 goto out_free;
212 }
213
214 tc_port_matchall->egress.hwtc = e->idx;
215 tc_port_matchall->egress.cookie = cls->cookie;
216 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
217 return 0;
218
219out_free:
220 cxgb4_sched_class_free(dev, e->idx);
221 return ret;
222}
223
224static void cxgb4_matchall_free_tc(struct net_device *dev)
225{
226 struct cxgb4_tc_port_matchall *tc_port_matchall;
227 struct port_info *pi = netdev2pinfo(dev);
228 struct adapter *adap = netdev2adap(dev);
229
230 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
231 cxgb4_matchall_tc_unbind_queues(dev);
232 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
233
234 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
235 tc_port_matchall->egress.cookie = 0;
236 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
237}
238
239static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
240 struct tc_cls_matchall_offload *cls)
241{
242 struct netlink_ext_ack *extack = cls->common.extack;
243 struct cxgb4_tc_port_matchall *tc_port_matchall;
244 struct port_info *pi = netdev2pinfo(dev);
245 struct adapter *adap = netdev2adap(dev);
246 struct flow_action_entry *act;
247 int ret;
248 u32 i;
249
250 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251 flow_action_for_each(i, act, &cls->rule->action) {
252 if (act->id == FLOW_ACTION_MIRRED) {
253 ret = cxgb4_port_mirror_alloc(dev);
254 if (ret) {
255 NL_SET_ERR_MSG_MOD(extack,
256 "Couldn't allocate mirror");
257 return ret;
258 }
259
260 tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
261 break;
262 }
263 }
264
265 return 0;
266}
267
268static void cxgb4_matchall_mirror_free(struct net_device *dev)
269{
270 struct cxgb4_tc_port_matchall *tc_port_matchall;
271 struct port_info *pi = netdev2pinfo(dev);
272 struct adapter *adap = netdev2adap(dev);
273
274 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
275 if (!tc_port_matchall->ingress.viid_mirror)
276 return;
277
278 cxgb4_port_mirror_free(dev);
279 tc_port_matchall->ingress.viid_mirror = 0;
280}
281
282static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
283{
284 struct cxgb4_tc_port_matchall *tc_port_matchall;
285 struct port_info *pi = netdev2pinfo(dev);
286 struct adapter *adap = netdev2adap(dev);
287 int ret;
288
289 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
290 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
291 &tc_port_matchall->ingress.fs[filter_type]);
292 if (ret)
293 return ret;
294
295 tc_port_matchall->ingress.tid[filter_type] = 0;
296 return 0;
297}
298
299static int cxgb4_matchall_add_filter(struct net_device *dev,
300 struct tc_cls_matchall_offload *cls,
301 u8 filter_type)
302{
303 struct netlink_ext_ack *extack = cls->common.extack;
304 struct cxgb4_tc_port_matchall *tc_port_matchall;
305 struct port_info *pi = netdev2pinfo(dev);
306 struct adapter *adap = netdev2adap(dev);
307 struct ch_filter_specification *fs;
308 int ret, fidx;
309
310 /* Get a free filter entry TID, where we can insert this new
311 * rule. Only insert rule if its prio doesn't conflict with
312 * existing rules.
313 */
314 fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
315 false, cls->common.prio);
316 if (fidx < 0) {
317 NL_SET_ERR_MSG_MOD(extack,
318 "No free LETCAM index available");
319 return -ENOMEM;
320 }
321
322 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
323 fs = &tc_port_matchall->ingress.fs[filter_type];
324 memset(fs, 0, sizeof(*fs));
325
326 if (fidx < adap->tids.nhpftids)
327 fs->prio = 1;
328 fs->tc_prio = cls->common.prio;
329 fs->tc_cookie = cls->cookie;
330 fs->type = filter_type;
331 fs->hitcnts = 1;
332
333 fs->val.pfvf_vld = 1;
334 fs->val.pf = adap->pf;
335 fs->val.vf = pi->vin;
336
337 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
338
339 ret = cxgb4_set_filter(dev, fidx, fs);
340 if (ret)
341 return ret;
342
343 tc_port_matchall->ingress.tid[filter_type] = fidx;
344 return 0;
345}
346
347static int cxgb4_matchall_alloc_filter(struct net_device *dev,
348 struct tc_cls_matchall_offload *cls)
349{
350 struct cxgb4_tc_port_matchall *tc_port_matchall;
351 struct port_info *pi = netdev2pinfo(dev);
352 struct adapter *adap = netdev2adap(dev);
353 int ret, i;
354
355 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
356
357 ret = cxgb4_matchall_mirror_alloc(dev, cls);
358 if (ret)
359 return ret;
360
361 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
362 ret = cxgb4_matchall_add_filter(dev, cls, i);
363 if (ret)
364 goto out_free;
365 }
366
367 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
368 return 0;
369
370out_free:
371 while (i-- > 0)
372 cxgb4_matchall_del_filter(dev, i);
373
374 cxgb4_matchall_mirror_free(dev);
375 return ret;
376}
377
378static int cxgb4_matchall_free_filter(struct net_device *dev)
379{
380 struct cxgb4_tc_port_matchall *tc_port_matchall;
381 struct port_info *pi = netdev2pinfo(dev);
382 struct adapter *adap = netdev2adap(dev);
383 int ret;
384 u8 i;
385
386 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
387
388 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
389 ret = cxgb4_matchall_del_filter(dev, i);
390 if (ret)
391 return ret;
392 }
393
394 cxgb4_matchall_mirror_free(dev);
395
396 tc_port_matchall->ingress.packets = 0;
397 tc_port_matchall->ingress.bytes = 0;
398 tc_port_matchall->ingress.last_used = 0;
399 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
400 return 0;
401}
402
403int cxgb4_tc_matchall_replace(struct net_device *dev,
404 struct tc_cls_matchall_offload *cls_matchall,
405 bool ingress)
406{
407 struct netlink_ext_ack *extack = cls_matchall->common.extack;
408 struct cxgb4_tc_port_matchall *tc_port_matchall;
409 struct port_info *pi = netdev2pinfo(dev);
410 struct adapter *adap = netdev2adap(dev);
411 int ret;
412
413 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
414 if (ingress) {
415 if (tc_port_matchall->ingress.state ==
416 CXGB4_MATCHALL_STATE_ENABLED) {
417 NL_SET_ERR_MSG_MOD(extack,
418 "Only 1 Ingress MATCHALL can be offloaded");
419 return -ENOMEM;
420 }
421
422 ret = cxgb4_validate_flow_actions(dev,
423 &cls_matchall->rule->action,
424 extack, 1);
425 if (ret)
426 return ret;
427
428 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
429 }
430
431 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
432 NL_SET_ERR_MSG_MOD(extack,
433 "Only 1 Egress MATCHALL can be offloaded");
434 return -ENOMEM;
435 }
436
437 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
438 if (ret)
439 return ret;
440
441 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
442}
443
444int cxgb4_tc_matchall_destroy(struct net_device *dev,
445 struct tc_cls_matchall_offload *cls_matchall,
446 bool ingress)
447{
448 struct cxgb4_tc_port_matchall *tc_port_matchall;
449 struct port_info *pi = netdev2pinfo(dev);
450 struct adapter *adap = netdev2adap(dev);
451
452 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
453 if (ingress) {
454 /* All the filter types of this matchall rule save the
455 * same cookie. So, checking for the first one is
456 * enough.
457 */
458 if (cls_matchall->cookie !=
459 tc_port_matchall->ingress.fs[0].tc_cookie)
460 return -ENOENT;
461
462 return cxgb4_matchall_free_filter(dev);
463 }
464
465 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
466 return -ENOENT;
467
468 cxgb4_matchall_free_tc(dev);
469 return 0;
470}
471
472int cxgb4_tc_matchall_stats(struct net_device *dev,
473 struct tc_cls_matchall_offload *cls_matchall)
474{
475 u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
476 struct cxgb4_tc_port_matchall *tc_port_matchall;
477 struct cxgb4_matchall_ingress_entry *ingress;
478 struct port_info *pi = netdev2pinfo(dev);
479 struct adapter *adap = netdev2adap(dev);
480 int ret;
481 u8 i;
482
483 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
484 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
485 return -ENOENT;
486
487 ingress = &tc_port_matchall->ingress;
488 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
489 ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
490 &tmp_packets, &tmp_bytes,
491 ingress->fs[i].hash);
492 if (ret)
493 return ret;
494
495 packets += tmp_packets;
496 bytes += tmp_bytes;
497 }
498
499 if (tc_port_matchall->ingress.packets != packets) {
500 flow_stats_update(&cls_matchall->stats,
501 bytes - tc_port_matchall->ingress.bytes,
502 packets - tc_port_matchall->ingress.packets,
503 0, tc_port_matchall->ingress.last_used,
504 FLOW_ACTION_HW_STATS_IMMEDIATE);
505
506 tc_port_matchall->ingress.packets = packets;
507 tc_port_matchall->ingress.bytes = bytes;
508 tc_port_matchall->ingress.last_used = jiffies;
509 }
510
511 return 0;
512}
513
514static void cxgb4_matchall_disable_offload(struct net_device *dev)
515{
516 struct cxgb4_tc_port_matchall *tc_port_matchall;
517 struct port_info *pi = netdev2pinfo(dev);
518 struct adapter *adap = netdev2adap(dev);
519
520 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
521 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
522 cxgb4_matchall_free_tc(dev);
523
524 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
525 cxgb4_matchall_free_filter(dev);
526}
527
528int cxgb4_init_tc_matchall(struct adapter *adap)
529{
530 struct cxgb4_tc_port_matchall *tc_port_matchall;
531 struct cxgb4_tc_matchall *tc_matchall;
532 int ret;
533
534 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
535 if (!tc_matchall)
536 return -ENOMEM;
537
538 tc_port_matchall = kcalloc(adap->params.nports,
539 sizeof(*tc_port_matchall),
540 GFP_KERNEL);
541 if (!tc_port_matchall) {
542 ret = -ENOMEM;
543 goto out_free_matchall;
544 }
545
546 tc_matchall->port_matchall = tc_port_matchall;
547 adap->tc_matchall = tc_matchall;
548 return 0;
549
550out_free_matchall:
551 kfree(tc_matchall);
552 return ret;
553}
554
555void cxgb4_cleanup_tc_matchall(struct adapter *adap)
556{
557 u8 i;
558
559 if (adap->tc_matchall) {
560 if (adap->tc_matchall->port_matchall) {
561 for (i = 0; i < adap->params.nports; i++) {
562 struct net_device *dev = adap->port[i];
563
564 if (dev)
565 cxgb4_matchall_disable_offload(dev);
566 }
567 kfree(adap->tc_matchall->port_matchall);
568 }
569 kfree(adap->tc_matchall);
570 }
571}
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
3
4#include "cxgb4.h"
5#include "cxgb4_tc_matchall.h"
6#include "sched.h"
7#include "cxgb4_uld.h"
8#include "cxgb4_filter.h"
9#include "cxgb4_tc_flower.h"
10
11static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 struct tc_cls_matchall_offload *cls)
13{
14 struct netlink_ext_ack *extack = cls->common.extack;
15 struct flow_action *actions = &cls->rule->action;
16 struct port_info *pi = netdev2pinfo(dev);
17 struct flow_action_entry *entry;
18 struct ch_sched_queue qe;
19 struct sched_class *e;
20 u64 max_link_rate;
21 u32 i, speed;
22 int ret;
23
24 if (!flow_action_has_entries(actions)) {
25 NL_SET_ERR_MSG_MOD(extack,
26 "Egress MATCHALL offload needs at least 1 policing action");
27 return -EINVAL;
28 } else if (!flow_offload_has_one_action(actions)) {
29 NL_SET_ERR_MSG_MOD(extack,
30 "Egress MATCHALL offload only supports 1 policing action");
31 return -EINVAL;
32 } else if (pi->tc_block_shared) {
33 NL_SET_ERR_MSG_MOD(extack,
34 "Egress MATCHALL offload not supported with shared blocks");
35 return -EINVAL;
36 }
37
38 ret = t4_get_link_params(pi, NULL, &speed, NULL);
39 if (ret) {
40 NL_SET_ERR_MSG_MOD(extack,
41 "Failed to get max speed supported by the link");
42 return -EINVAL;
43 }
44
45 /* Convert from Mbps to bps */
46 max_link_rate = (u64)speed * 1000 * 1000;
47
48 flow_action_for_each(i, entry, actions) {
49 switch (entry->id) {
50 case FLOW_ACTION_POLICE:
51 if (entry->police.rate_pkt_ps) {
52 NL_SET_ERR_MSG_MOD(extack,
53 "QoS offload not support packets per second");
54 return -EOPNOTSUPP;
55 }
56 /* Convert bytes per second to bits per second */
57 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
58 NL_SET_ERR_MSG_MOD(extack,
59 "Specified policing max rate is larger than underlying link speed");
60 return -ERANGE;
61 }
62 break;
63 default:
64 NL_SET_ERR_MSG_MOD(extack,
65 "Only policing action supported with Egress MATCHALL offload");
66 return -EOPNOTSUPP;
67 }
68 }
69
70 for (i = 0; i < pi->nqsets; i++) {
71 memset(&qe, 0, sizeof(qe));
72 qe.queue = i;
73
74 e = cxgb4_sched_queue_lookup(dev, &qe);
75 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
76 NL_SET_ERR_MSG_MOD(extack,
77 "Some queues are already bound to different class");
78 return -EBUSY;
79 }
80 }
81
82 return 0;
83}
84
85static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
86{
87 struct port_info *pi = netdev2pinfo(dev);
88 struct ch_sched_queue qe;
89 int ret;
90 u32 i;
91
92 for (i = 0; i < pi->nqsets; i++) {
93 qe.queue = i;
94 qe.class = tc;
95 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
96 if (ret)
97 goto out_free;
98 }
99
100 return 0;
101
102out_free:
103 while (i--) {
104 qe.queue = i;
105 qe.class = SCHED_CLS_NONE;
106 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
107 }
108
109 return ret;
110}
111
112static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
113{
114 struct port_info *pi = netdev2pinfo(dev);
115 struct ch_sched_queue qe;
116 u32 i;
117
118 for (i = 0; i < pi->nqsets; i++) {
119 qe.queue = i;
120 qe.class = SCHED_CLS_NONE;
121 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
122 }
123}
124
125static int cxgb4_matchall_alloc_tc(struct net_device *dev,
126 struct tc_cls_matchall_offload *cls)
127{
128 struct ch_sched_params p = {
129 .type = SCHED_CLASS_TYPE_PACKET,
130 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
131 .u.params.mode = SCHED_CLASS_MODE_CLASS,
132 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
133 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
134 .u.params.class = SCHED_CLS_NONE,
135 .u.params.minrate = 0,
136 .u.params.weight = 0,
137 .u.params.pktsize = dev->mtu,
138 };
139 struct netlink_ext_ack *extack = cls->common.extack;
140 struct cxgb4_tc_port_matchall *tc_port_matchall;
141 struct port_info *pi = netdev2pinfo(dev);
142 struct adapter *adap = netdev2adap(dev);
143 struct flow_action_entry *entry;
144 struct sched_class *e;
145 int ret;
146 u32 i;
147
148 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
149
150 flow_action_for_each(i, entry, &cls->rule->action)
151 if (entry->id == FLOW_ACTION_POLICE)
152 break;
153 if (entry->police.rate_pkt_ps) {
154 NL_SET_ERR_MSG_MOD(extack,
155 "QoS offload not support packets per second");
156 return -EOPNOTSUPP;
157 }
158 /* Convert from bytes per second to Kbps */
159 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
160 p.u.params.channel = pi->tx_chan;
161 e = cxgb4_sched_class_alloc(dev, &p);
162 if (!e) {
163 NL_SET_ERR_MSG_MOD(extack,
164 "No free traffic class available for policing action");
165 return -ENOMEM;
166 }
167
168 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
169 if (ret) {
170 NL_SET_ERR_MSG_MOD(extack,
171 "Could not bind queues to traffic class");
172 goto out_free;
173 }
174
175 tc_port_matchall->egress.hwtc = e->idx;
176 tc_port_matchall->egress.cookie = cls->cookie;
177 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
178 return 0;
179
180out_free:
181 cxgb4_sched_class_free(dev, e->idx);
182 return ret;
183}
184
185static void cxgb4_matchall_free_tc(struct net_device *dev)
186{
187 struct cxgb4_tc_port_matchall *tc_port_matchall;
188 struct port_info *pi = netdev2pinfo(dev);
189 struct adapter *adap = netdev2adap(dev);
190
191 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
192 cxgb4_matchall_tc_unbind_queues(dev);
193 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
194
195 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
196 tc_port_matchall->egress.cookie = 0;
197 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
198}
199
200static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
201 struct tc_cls_matchall_offload *cls)
202{
203 struct netlink_ext_ack *extack = cls->common.extack;
204 struct cxgb4_tc_port_matchall *tc_port_matchall;
205 struct port_info *pi = netdev2pinfo(dev);
206 struct adapter *adap = netdev2adap(dev);
207 struct flow_action_entry *act;
208 int ret;
209 u32 i;
210
211 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
212 flow_action_for_each(i, act, &cls->rule->action) {
213 if (act->id == FLOW_ACTION_MIRRED) {
214 ret = cxgb4_port_mirror_alloc(dev);
215 if (ret) {
216 NL_SET_ERR_MSG_MOD(extack,
217 "Couldn't allocate mirror");
218 return ret;
219 }
220
221 tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
222 break;
223 }
224 }
225
226 return 0;
227}
228
229static void cxgb4_matchall_mirror_free(struct net_device *dev)
230{
231 struct cxgb4_tc_port_matchall *tc_port_matchall;
232 struct port_info *pi = netdev2pinfo(dev);
233 struct adapter *adap = netdev2adap(dev);
234
235 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
236 if (!tc_port_matchall->ingress.viid_mirror)
237 return;
238
239 cxgb4_port_mirror_free(dev);
240 tc_port_matchall->ingress.viid_mirror = 0;
241}
242
243static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
244{
245 struct cxgb4_tc_port_matchall *tc_port_matchall;
246 struct port_info *pi = netdev2pinfo(dev);
247 struct adapter *adap = netdev2adap(dev);
248 int ret;
249
250 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
252 &tc_port_matchall->ingress.fs[filter_type]);
253 if (ret)
254 return ret;
255
256 tc_port_matchall->ingress.tid[filter_type] = 0;
257 return 0;
258}
259
260static int cxgb4_matchall_add_filter(struct net_device *dev,
261 struct tc_cls_matchall_offload *cls,
262 u8 filter_type)
263{
264 struct netlink_ext_ack *extack = cls->common.extack;
265 struct cxgb4_tc_port_matchall *tc_port_matchall;
266 struct port_info *pi = netdev2pinfo(dev);
267 struct adapter *adap = netdev2adap(dev);
268 struct ch_filter_specification *fs;
269 int ret, fidx;
270
271 /* Get a free filter entry TID, where we can insert this new
272 * rule. Only insert rule if its prio doesn't conflict with
273 * existing rules.
274 */
275 fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
276 false, cls->common.prio);
277 if (fidx < 0) {
278 NL_SET_ERR_MSG_MOD(extack,
279 "No free LETCAM index available");
280 return -ENOMEM;
281 }
282
283 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
284 fs = &tc_port_matchall->ingress.fs[filter_type];
285 memset(fs, 0, sizeof(*fs));
286
287 if (fidx < adap->tids.nhpftids)
288 fs->prio = 1;
289 fs->tc_prio = cls->common.prio;
290 fs->tc_cookie = cls->cookie;
291 fs->type = filter_type;
292 fs->hitcnts = 1;
293
294 fs->val.pfvf_vld = 1;
295 fs->val.pf = adap->pf;
296 fs->val.vf = pi->vin;
297
298 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
299
300 ret = cxgb4_set_filter(dev, fidx, fs);
301 if (ret)
302 return ret;
303
304 tc_port_matchall->ingress.tid[filter_type] = fidx;
305 return 0;
306}
307
308static int cxgb4_matchall_alloc_filter(struct net_device *dev,
309 struct tc_cls_matchall_offload *cls)
310{
311 struct cxgb4_tc_port_matchall *tc_port_matchall;
312 struct port_info *pi = netdev2pinfo(dev);
313 struct adapter *adap = netdev2adap(dev);
314 int ret, i;
315
316 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
317
318 ret = cxgb4_matchall_mirror_alloc(dev, cls);
319 if (ret)
320 return ret;
321
322 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
323 ret = cxgb4_matchall_add_filter(dev, cls, i);
324 if (ret)
325 goto out_free;
326 }
327
328 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
329 return 0;
330
331out_free:
332 while (i-- > 0)
333 cxgb4_matchall_del_filter(dev, i);
334
335 cxgb4_matchall_mirror_free(dev);
336 return ret;
337}
338
339static int cxgb4_matchall_free_filter(struct net_device *dev)
340{
341 struct cxgb4_tc_port_matchall *tc_port_matchall;
342 struct port_info *pi = netdev2pinfo(dev);
343 struct adapter *adap = netdev2adap(dev);
344 int ret;
345 u8 i;
346
347 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
348
349 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
350 ret = cxgb4_matchall_del_filter(dev, i);
351 if (ret)
352 return ret;
353 }
354
355 cxgb4_matchall_mirror_free(dev);
356
357 tc_port_matchall->ingress.packets = 0;
358 tc_port_matchall->ingress.bytes = 0;
359 tc_port_matchall->ingress.last_used = 0;
360 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
361 return 0;
362}
363
364int cxgb4_tc_matchall_replace(struct net_device *dev,
365 struct tc_cls_matchall_offload *cls_matchall,
366 bool ingress)
367{
368 struct netlink_ext_ack *extack = cls_matchall->common.extack;
369 struct cxgb4_tc_port_matchall *tc_port_matchall;
370 struct port_info *pi = netdev2pinfo(dev);
371 struct adapter *adap = netdev2adap(dev);
372 int ret;
373
374 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
375 if (ingress) {
376 if (tc_port_matchall->ingress.state ==
377 CXGB4_MATCHALL_STATE_ENABLED) {
378 NL_SET_ERR_MSG_MOD(extack,
379 "Only 1 Ingress MATCHALL can be offloaded");
380 return -ENOMEM;
381 }
382
383 ret = cxgb4_validate_flow_actions(dev,
384 &cls_matchall->rule->action,
385 extack, 1);
386 if (ret)
387 return ret;
388
389 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
390 }
391
392 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
393 NL_SET_ERR_MSG_MOD(extack,
394 "Only 1 Egress MATCHALL can be offloaded");
395 return -ENOMEM;
396 }
397
398 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
399 if (ret)
400 return ret;
401
402 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
403}
404
405int cxgb4_tc_matchall_destroy(struct net_device *dev,
406 struct tc_cls_matchall_offload *cls_matchall,
407 bool ingress)
408{
409 struct cxgb4_tc_port_matchall *tc_port_matchall;
410 struct port_info *pi = netdev2pinfo(dev);
411 struct adapter *adap = netdev2adap(dev);
412
413 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
414 if (ingress) {
415 /* All the filter types of this matchall rule save the
416 * same cookie. So, checking for the first one is
417 * enough.
418 */
419 if (cls_matchall->cookie !=
420 tc_port_matchall->ingress.fs[0].tc_cookie)
421 return -ENOENT;
422
423 return cxgb4_matchall_free_filter(dev);
424 }
425
426 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
427 return -ENOENT;
428
429 cxgb4_matchall_free_tc(dev);
430 return 0;
431}
432
433int cxgb4_tc_matchall_stats(struct net_device *dev,
434 struct tc_cls_matchall_offload *cls_matchall)
435{
436 u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
437 struct cxgb4_tc_port_matchall *tc_port_matchall;
438 struct cxgb4_matchall_ingress_entry *ingress;
439 struct port_info *pi = netdev2pinfo(dev);
440 struct adapter *adap = netdev2adap(dev);
441 int ret;
442 u8 i;
443
444 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
445 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
446 return -ENOENT;
447
448 ingress = &tc_port_matchall->ingress;
449 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
450 ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
451 &tmp_packets, &tmp_bytes,
452 ingress->fs[i].hash);
453 if (ret)
454 return ret;
455
456 packets += tmp_packets;
457 bytes += tmp_bytes;
458 }
459
460 if (tc_port_matchall->ingress.packets != packets) {
461 flow_stats_update(&cls_matchall->stats,
462 bytes - tc_port_matchall->ingress.bytes,
463 packets - tc_port_matchall->ingress.packets,
464 0, tc_port_matchall->ingress.last_used,
465 FLOW_ACTION_HW_STATS_IMMEDIATE);
466
467 tc_port_matchall->ingress.packets = packets;
468 tc_port_matchall->ingress.bytes = bytes;
469 tc_port_matchall->ingress.last_used = jiffies;
470 }
471
472 return 0;
473}
474
475static void cxgb4_matchall_disable_offload(struct net_device *dev)
476{
477 struct cxgb4_tc_port_matchall *tc_port_matchall;
478 struct port_info *pi = netdev2pinfo(dev);
479 struct adapter *adap = netdev2adap(dev);
480
481 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
482 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
483 cxgb4_matchall_free_tc(dev);
484
485 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
486 cxgb4_matchall_free_filter(dev);
487}
488
489int cxgb4_init_tc_matchall(struct adapter *adap)
490{
491 struct cxgb4_tc_port_matchall *tc_port_matchall;
492 struct cxgb4_tc_matchall *tc_matchall;
493 int ret;
494
495 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
496 if (!tc_matchall)
497 return -ENOMEM;
498
499 tc_port_matchall = kcalloc(adap->params.nports,
500 sizeof(*tc_port_matchall),
501 GFP_KERNEL);
502 if (!tc_port_matchall) {
503 ret = -ENOMEM;
504 goto out_free_matchall;
505 }
506
507 tc_matchall->port_matchall = tc_port_matchall;
508 adap->tc_matchall = tc_matchall;
509 return 0;
510
511out_free_matchall:
512 kfree(tc_matchall);
513 return ret;
514}
515
516void cxgb4_cleanup_tc_matchall(struct adapter *adap)
517{
518 u8 i;
519
520 if (adap->tc_matchall) {
521 if (adap->tc_matchall->port_matchall) {
522 for (i = 0; i < adap->params.nports; i++) {
523 struct net_device *dev = adap->port[i];
524
525 if (dev)
526 cxgb4_matchall_disable_offload(dev);
527 }
528 kfree(adap->tc_matchall->port_matchall);
529 }
530 kfree(adap->tc_matchall);
531 }
532}