Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021-2023, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_flow.h"
8#include "ice_vf_lib_private.h"
9
10#define to_fltr_conf_from_desc(p) \
11 container_of(p, struct virtchnl_fdir_fltr_conf, input)
12
13#define GTPU_TEID_OFFSET 4
14#define GTPU_EH_QFI_OFFSET 1
15#define GTPU_EH_QFI_MASK 0x3F
16#define PFCP_S_OFFSET 0
17#define PFCP_S_MASK 0x1
18#define PFCP_PORT_NR 8805
19
20#define FDIR_INSET_FLAG_ESP_S 0
21#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24
25enum ice_fdir_tunnel_type {
26 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 ICE_FDIR_TUNNEL_TYPE_GTPU,
28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29};
30
31struct virtchnl_fdir_fltr_conf {
32 struct ice_fdir_fltr input;
33 enum ice_fdir_tunnel_type ttype;
34 u64 inset_flag;
35 u32 flow_id;
36};
37
38struct virtchnl_fdir_inset_map {
39 enum virtchnl_proto_hdr_field field;
40 enum ice_flow_field fld;
41 u64 flag;
42 u64 mask;
43};
44
45static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
46 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
47 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
48 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
49 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
50 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
51 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
52 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
53 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
54 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
55 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
56 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
57 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
58 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
59 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
60 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
61 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
62 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
63 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
64 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
65 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
66 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
67 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
68 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
69 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
70 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
71 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72};
73
74/**
75 * ice_vc_fdir_param_check
76 * @vf: pointer to the VF structure
77 * @vsi_id: VF relative VSI ID
78 *
79 * Check for the valid VSI ID, PF's state and VF's state
80 *
81 * Return: 0 on success, and -EINVAL on error.
82 */
83static int
84ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
85{
86 struct ice_pf *pf = vf->pf;
87
88 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
89 return -EINVAL;
90
91 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
92 return -EINVAL;
93
94 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
95 return -EINVAL;
96
97 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
98 return -EINVAL;
99
100 if (!ice_get_vf_vsi(vf))
101 return -EINVAL;
102
103 return 0;
104}
105
106/**
107 * ice_vf_start_ctrl_vsi
108 * @vf: pointer to the VF structure
109 *
110 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
111 *
112 * Return: 0 on success, and other on error.
113 */
114static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
115{
116 struct ice_pf *pf = vf->pf;
117 struct ice_vsi *ctrl_vsi;
118 struct device *dev;
119 int err;
120
121 dev = ice_pf_to_dev(pf);
122 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
123 return -EEXIST;
124
125 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
126 if (!ctrl_vsi) {
127 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
128 vf->vf_id);
129 return -ENOMEM;
130 }
131
132 err = ice_vsi_open_ctrl(ctrl_vsi);
133 if (err) {
134 dev_dbg(dev, "Could not open control VSI for VF %d\n",
135 vf->vf_id);
136 goto err_vsi_open;
137 }
138
139 return 0;
140
141err_vsi_open:
142 ice_vsi_release(ctrl_vsi);
143 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
144 pf->vsi[vf->ctrl_vsi_idx] = NULL;
145 vf->ctrl_vsi_idx = ICE_NO_VSI;
146 }
147 return err;
148}
149
150/**
151 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
152 * @vf: pointer to the VF structure
153 * @flow: filter flow type
154 *
155 * Return: 0 on success, and other on error.
156 */
157static int
158ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
159{
160 struct ice_vf_fdir *fdir = &vf->fdir;
161
162 if (!fdir->fdir_prof) {
163 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
164 ICE_FLTR_PTYPE_MAX,
165 sizeof(*fdir->fdir_prof),
166 GFP_KERNEL);
167 if (!fdir->fdir_prof)
168 return -ENOMEM;
169 }
170
171 if (!fdir->fdir_prof[flow]) {
172 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
173 sizeof(**fdir->fdir_prof),
174 GFP_KERNEL);
175 if (!fdir->fdir_prof[flow])
176 return -ENOMEM;
177 }
178
179 return 0;
180}
181
182/**
183 * ice_vc_fdir_free_prof - free profile for this filter flow type
184 * @vf: pointer to the VF structure
185 * @flow: filter flow type
186 */
187static void
188ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
189{
190 struct ice_vf_fdir *fdir = &vf->fdir;
191
192 if (!fdir->fdir_prof)
193 return;
194
195 if (!fdir->fdir_prof[flow])
196 return;
197
198 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
199 fdir->fdir_prof[flow] = NULL;
200}
201
202/**
203 * ice_vc_fdir_free_prof_all - free all the profile for this VF
204 * @vf: pointer to the VF structure
205 */
206static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
207{
208 struct ice_vf_fdir *fdir = &vf->fdir;
209 enum ice_fltr_ptype flow;
210
211 if (!fdir->fdir_prof)
212 return;
213
214 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
215 ice_vc_fdir_free_prof(vf, flow);
216
217 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
218 fdir->fdir_prof = NULL;
219}
220
221/**
222 * ice_vc_fdir_parse_flow_fld
223 * @proto_hdr: virtual channel protocol filter header
224 * @conf: FDIR configuration for each filter
225 * @fld: field type array
226 * @fld_cnt: field counter
227 *
228 * Parse the virtual channel filter header and store them into field type array
229 *
230 * Return: 0 on success, and other on error.
231 */
232static int
233ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
234 struct virtchnl_fdir_fltr_conf *conf,
235 enum ice_flow_field *fld, int *fld_cnt)
236{
237 struct virtchnl_proto_hdr hdr;
238 u32 i;
239
240 memcpy(&hdr, proto_hdr, sizeof(hdr));
241
242 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
243 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
244 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
245 if (fdir_inset_map[i].mask &&
246 ((fdir_inset_map[i].mask & conf->inset_flag) !=
247 fdir_inset_map[i].flag))
248 continue;
249
250 fld[*fld_cnt] = fdir_inset_map[i].fld;
251 *fld_cnt += 1;
252 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
253 return -EINVAL;
254 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
255 fdir_inset_map[i].field);
256 }
257
258 return 0;
259}
260
261/**
262 * ice_vc_fdir_set_flow_fld
263 * @vf: pointer to the VF structure
264 * @fltr: virtual channel add cmd buffer
265 * @conf: FDIR configuration for each filter
266 * @seg: array of one or more packet segments that describe the flow
267 *
268 * Parse the virtual channel add msg buffer's field vector and store them into
269 * flow's packet segment field
270 *
271 * Return: 0 on success, and other on error.
272 */
273static int
274ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
275 struct virtchnl_fdir_fltr_conf *conf,
276 struct ice_flow_seg_info *seg)
277{
278 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
279 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
280 struct device *dev = ice_pf_to_dev(vf->pf);
281 struct virtchnl_proto_hdrs *proto;
282 int fld_cnt = 0;
283 int i;
284
285 proto = &rule->proto_hdrs;
286 for (i = 0; i < proto->count; i++) {
287 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
288 int ret;
289
290 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
291 if (ret)
292 return ret;
293 }
294
295 if (fld_cnt == 0) {
296 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
297 return -EINVAL;
298 }
299
300 for (i = 0; i < fld_cnt; i++)
301 ice_flow_set_fld(seg, fld[i],
302 ICE_FLOW_FLD_OFF_INVAL,
303 ICE_FLOW_FLD_OFF_INVAL,
304 ICE_FLOW_FLD_OFF_INVAL, false);
305
306 return 0;
307}
308
309/**
310 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
311 * @vf: pointer to the VF structure
312 * @conf: FDIR configuration for each filter
313 * @seg: array of one or more packet segments that describe the flow
314 *
315 * Return: 0 on success, and other on error.
316 */
317static int
318ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
319 struct virtchnl_fdir_fltr_conf *conf,
320 struct ice_flow_seg_info *seg)
321{
322 enum ice_fltr_ptype flow = conf->input.flow_type;
323 enum ice_fdir_tunnel_type ttype = conf->ttype;
324 struct device *dev = ice_pf_to_dev(vf->pf);
325
326 switch (flow) {
327 case ICE_FLTR_PTYPE_NON_IP_L2:
328 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
329 break;
330 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
331 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
332 ICE_FLOW_SEG_HDR_IPV4 |
333 ICE_FLOW_SEG_HDR_IPV_OTHER);
334 break;
335 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
336 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
337 ICE_FLOW_SEG_HDR_IPV4 |
338 ICE_FLOW_SEG_HDR_IPV_OTHER);
339 break;
340 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
341 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
342 ICE_FLOW_SEG_HDR_IPV4 |
343 ICE_FLOW_SEG_HDR_IPV_OTHER);
344 break;
345 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
346 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
347 ICE_FLOW_SEG_HDR_IPV4 |
348 ICE_FLOW_SEG_HDR_IPV_OTHER);
349 break;
350 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
351 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
352 ICE_FLOW_SEG_HDR_IPV4 |
353 ICE_FLOW_SEG_HDR_IPV_OTHER);
354 break;
355 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
356 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
357 ICE_FLOW_SEG_HDR_IPV4 |
358 ICE_FLOW_SEG_HDR_IPV_OTHER);
359 break;
360 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
361 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
362 ICE_FLOW_SEG_HDR_IPV_OTHER);
363 break;
364 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
365 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
366 ICE_FLOW_SEG_HDR_IPV4 |
367 ICE_FLOW_SEG_HDR_IPV_OTHER);
368 break;
369 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
370 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
371 ICE_FLOW_SEG_HDR_IPV4 |
372 ICE_FLOW_SEG_HDR_IPV_OTHER);
373 break;
374 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
375 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
376 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
377 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
378 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
379 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
380 ICE_FLOW_SEG_HDR_IPV4 |
381 ICE_FLOW_SEG_HDR_IPV_OTHER);
382 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
383 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
384 ICE_FLOW_SEG_HDR_GTPU_IP |
385 ICE_FLOW_SEG_HDR_IPV4 |
386 ICE_FLOW_SEG_HDR_IPV_OTHER);
387 } else {
388 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
389 flow, vf->vf_id);
390 return -EINVAL;
391 }
392 break;
393 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
394 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
395 ICE_FLOW_SEG_HDR_IPV4 |
396 ICE_FLOW_SEG_HDR_IPV_OTHER);
397 break;
398 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
400 ICE_FLOW_SEG_HDR_IPV6 |
401 ICE_FLOW_SEG_HDR_IPV_OTHER);
402 break;
403 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
404 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
405 ICE_FLOW_SEG_HDR_IPV6 |
406 ICE_FLOW_SEG_HDR_IPV_OTHER);
407 break;
408 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
409 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
410 ICE_FLOW_SEG_HDR_IPV6 |
411 ICE_FLOW_SEG_HDR_IPV_OTHER);
412 break;
413 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
414 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
415 ICE_FLOW_SEG_HDR_IPV6 |
416 ICE_FLOW_SEG_HDR_IPV_OTHER);
417 break;
418 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
419 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
420 ICE_FLOW_SEG_HDR_IPV6 |
421 ICE_FLOW_SEG_HDR_IPV_OTHER);
422 break;
423 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
424 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
425 ICE_FLOW_SEG_HDR_IPV6 |
426 ICE_FLOW_SEG_HDR_IPV_OTHER);
427 break;
428 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
429 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
430 ICE_FLOW_SEG_HDR_IPV_OTHER);
431 break;
432 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
433 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
434 ICE_FLOW_SEG_HDR_IPV6 |
435 ICE_FLOW_SEG_HDR_IPV_OTHER);
436 break;
437 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
438 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
439 ICE_FLOW_SEG_HDR_IPV6 |
440 ICE_FLOW_SEG_HDR_IPV_OTHER);
441 break;
442 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
443 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
444 ICE_FLOW_SEG_HDR_IPV6 |
445 ICE_FLOW_SEG_HDR_IPV_OTHER);
446 break;
447 default:
448 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
449 flow, vf->vf_id);
450 return -EINVAL;
451 }
452
453 return 0;
454}
455
456/**
457 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
458 * @vf: pointer to the VF structure
459 * @flow: filter flow type
460 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
461 */
462static void
463ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
464{
465 struct ice_vf_fdir *fdir = &vf->fdir;
466 struct ice_fd_hw_prof *vf_prof;
467 struct ice_pf *pf = vf->pf;
468 struct ice_vsi *vf_vsi;
469 struct device *dev;
470 struct ice_hw *hw;
471 u64 prof_id;
472 int i;
473
474 dev = ice_pf_to_dev(pf);
475 hw = &pf->hw;
476 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
477 return;
478
479 vf_prof = fdir->fdir_prof[flow];
480 prof_id = vf_prof->prof_id[tun];
481
482 vf_vsi = ice_get_vf_vsi(vf);
483 if (!vf_vsi) {
484 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
485 return;
486 }
487
488 if (!fdir->prof_entry_cnt[flow][tun])
489 return;
490
491 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
492 if (vf_prof->entry_h[i][tun]) {
493 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
494
495 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
496 ice_flow_rem_entry(hw, ICE_BLK_FD,
497 vf_prof->entry_h[i][tun]);
498 vf_prof->entry_h[i][tun] = 0;
499 }
500
501 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
502 devm_kfree(dev, vf_prof->fdir_seg[tun]);
503 vf_prof->fdir_seg[tun] = NULL;
504
505 for (i = 0; i < vf_prof->cnt; i++)
506 vf_prof->vsi_h[i] = 0;
507
508 fdir->prof_entry_cnt[flow][tun] = 0;
509}
510
511/**
512 * ice_vc_fdir_rem_prof_all - remove profile for this VF
513 * @vf: pointer to the VF structure
514 */
515static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
516{
517 enum ice_fltr_ptype flow;
518
519 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
520 flow < ICE_FLTR_PTYPE_MAX; flow++) {
521 ice_vc_fdir_rem_prof(vf, flow, 0);
522 ice_vc_fdir_rem_prof(vf, flow, 1);
523 }
524}
525
526/**
527 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
528 * @fdir: pointer to the VF FDIR structure
529 */
530static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
531{
532 enum ice_fltr_ptype flow;
533
534 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
535 flow < ICE_FLTR_PTYPE_MAX; flow++) {
536 fdir->fdir_fltr_cnt[flow][0] = 0;
537 fdir->fdir_fltr_cnt[flow][1] = 0;
538 }
539}
540
541/**
542 * ice_vc_fdir_has_prof_conflict
543 * @vf: pointer to the VF structure
544 * @conf: FDIR configuration for each filter
545 *
546 * Check if @conf has conflicting profile with existing profiles
547 *
548 * Return: true on success, and false on error.
549 */
550static bool
551ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
552 struct virtchnl_fdir_fltr_conf *conf)
553{
554 struct ice_fdir_fltr *desc;
555
556 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
557 struct virtchnl_fdir_fltr_conf *existing_conf;
558 enum ice_fltr_ptype flow_type_a, flow_type_b;
559 struct ice_fdir_fltr *a, *b;
560
561 existing_conf = to_fltr_conf_from_desc(desc);
562 a = &existing_conf->input;
563 b = &conf->input;
564 flow_type_a = a->flow_type;
565 flow_type_b = b->flow_type;
566
567 /* No need to compare two rules with different tunnel types or
568 * with the same protocol type.
569 */
570 if (existing_conf->ttype != conf->ttype ||
571 flow_type_a == flow_type_b)
572 continue;
573
574 switch (flow_type_a) {
575 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
576 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
577 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
578 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
579 return true;
580 break;
581 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
582 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
583 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
584 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
585 return true;
586 break;
587 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
588 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
589 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
590 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
591 return true;
592 break;
593 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
594 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
595 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
596 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
597 return true;
598 break;
599 default:
600 break;
601 }
602 }
603
604 return false;
605}
606
607/**
608 * ice_vc_fdir_write_flow_prof
609 * @vf: pointer to the VF structure
610 * @flow: filter flow type
611 * @seg: array of one or more packet segments that describe the flow
612 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
613 *
614 * Write the flow's profile config and packet segment into the hardware
615 *
616 * Return: 0 on success, and other on error.
617 */
618static int
619ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
620 struct ice_flow_seg_info *seg, int tun)
621{
622 struct ice_vf_fdir *fdir = &vf->fdir;
623 struct ice_vsi *vf_vsi, *ctrl_vsi;
624 struct ice_flow_seg_info *old_seg;
625 struct ice_flow_prof *prof = NULL;
626 struct ice_fd_hw_prof *vf_prof;
627 struct device *dev;
628 struct ice_pf *pf;
629 struct ice_hw *hw;
630 u64 entry1_h = 0;
631 u64 entry2_h = 0;
632 int ret;
633
634 pf = vf->pf;
635 dev = ice_pf_to_dev(pf);
636 hw = &pf->hw;
637 vf_vsi = ice_get_vf_vsi(vf);
638 if (!vf_vsi)
639 return -EINVAL;
640
641 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
642 if (!ctrl_vsi)
643 return -EINVAL;
644
645 vf_prof = fdir->fdir_prof[flow];
646 old_seg = vf_prof->fdir_seg[tun];
647 if (old_seg) {
648 if (!memcmp(old_seg, seg, sizeof(*seg))) {
649 dev_dbg(dev, "Duplicated profile for VF %d!\n",
650 vf->vf_id);
651 return -EEXIST;
652 }
653
654 if (fdir->fdir_fltr_cnt[flow][tun]) {
655 ret = -EINVAL;
656 dev_dbg(dev, "Input set conflicts for VF %d\n",
657 vf->vf_id);
658 goto err_exit;
659 }
660
661 /* remove previously allocated profile */
662 ice_vc_fdir_rem_prof(vf, flow, tun);
663 }
664
665 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
666 tun + 1, false, &prof);
667 if (ret) {
668 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
669 flow, vf->vf_id);
670 goto err_exit;
671 }
672
673 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
674 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
675 seg, &entry1_h);
676 if (ret) {
677 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
678 flow, vf->vf_id);
679 goto err_prof;
680 }
681
682 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
683 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
684 seg, &entry2_h);
685 if (ret) {
686 dev_dbg(dev,
687 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
688 flow, vf->vf_id);
689 goto err_entry_1;
690 }
691
692 vf_prof->fdir_seg[tun] = seg;
693 vf_prof->cnt = 0;
694 fdir->prof_entry_cnt[flow][tun] = 0;
695
696 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
697 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
698 vf_prof->cnt++;
699 fdir->prof_entry_cnt[flow][tun]++;
700
701 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
702 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
703 vf_prof->cnt++;
704 fdir->prof_entry_cnt[flow][tun]++;
705
706 vf_prof->prof_id[tun] = prof->id;
707
708 return 0;
709
710err_entry_1:
711 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
712 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
713 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
714err_prof:
715 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
716err_exit:
717 return ret;
718}
719
720/**
721 * ice_vc_fdir_config_input_set
722 * @vf: pointer to the VF structure
723 * @fltr: virtual channel add cmd buffer
724 * @conf: FDIR configuration for each filter
725 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
726 *
727 * Config the input set type and value for virtual channel add msg buffer
728 *
729 * Return: 0 on success, and other on error.
730 */
731static int
732ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
733 struct virtchnl_fdir_fltr_conf *conf, int tun)
734{
735 struct ice_fdir_fltr *input = &conf->input;
736 struct device *dev = ice_pf_to_dev(vf->pf);
737 struct ice_flow_seg_info *seg;
738 enum ice_fltr_ptype flow;
739 int ret;
740
741 ret = ice_vc_fdir_has_prof_conflict(vf, conf);
742 if (ret) {
743 dev_dbg(dev, "Found flow profile conflict for VF %d\n",
744 vf->vf_id);
745 return ret;
746 }
747
748 flow = input->flow_type;
749 ret = ice_vc_fdir_alloc_prof(vf, flow);
750 if (ret) {
751 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
752 return ret;
753 }
754
755 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
756 if (!seg)
757 return -ENOMEM;
758
759 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
760 if (ret) {
761 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
762 goto err_exit;
763 }
764
765 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
766 if (ret) {
767 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
768 goto err_exit;
769 }
770
771 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
772 if (ret == -EEXIST) {
773 devm_kfree(dev, seg);
774 } else if (ret) {
775 dev_dbg(dev, "Write flow profile for VF %d failed\n",
776 vf->vf_id);
777 goto err_exit;
778 }
779
780 return 0;
781
782err_exit:
783 devm_kfree(dev, seg);
784 return ret;
785}
786
787/**
788 * ice_vc_fdir_parse_pattern
789 * @vf: pointer to the VF info
790 * @fltr: virtual channel add cmd buffer
791 * @conf: FDIR configuration for each filter
792 *
793 * Parse the virtual channel filter's pattern and store them into conf
794 *
795 * Return: 0 on success, and other on error.
796 */
797static int
798ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
799 struct virtchnl_fdir_fltr_conf *conf)
800{
801 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
802 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
803 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
804 struct device *dev = ice_pf_to_dev(vf->pf);
805 struct ice_fdir_fltr *input = &conf->input;
806 int i;
807
808 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
809 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
810 proto->count, vf->vf_id);
811 return -EINVAL;
812 }
813
814 for (i = 0; i < proto->count; i++) {
815 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
816 struct ip_esp_hdr *esph;
817 struct ip_auth_hdr *ah;
818 struct sctphdr *sctph;
819 struct ipv6hdr *ip6h;
820 struct udphdr *udph;
821 struct tcphdr *tcph;
822 struct ethhdr *eth;
823 struct iphdr *iph;
824 u8 s_field;
825 u8 *rawh;
826
827 switch (hdr->type) {
828 case VIRTCHNL_PROTO_HDR_ETH:
829 eth = (struct ethhdr *)hdr->buffer;
830 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
831
832 if (hdr->field_selector)
833 input->ext_data.ether_type = eth->h_proto;
834 break;
835 case VIRTCHNL_PROTO_HDR_IPV4:
836 iph = (struct iphdr *)hdr->buffer;
837 l3 = VIRTCHNL_PROTO_HDR_IPV4;
838 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
839
840 if (hdr->field_selector) {
841 input->ip.v4.src_ip = iph->saddr;
842 input->ip.v4.dst_ip = iph->daddr;
843 input->ip.v4.tos = iph->tos;
844 input->ip.v4.proto = iph->protocol;
845 }
846 break;
847 case VIRTCHNL_PROTO_HDR_IPV6:
848 ip6h = (struct ipv6hdr *)hdr->buffer;
849 l3 = VIRTCHNL_PROTO_HDR_IPV6;
850 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
851
852 if (hdr->field_selector) {
853 memcpy(input->ip.v6.src_ip,
854 ip6h->saddr.in6_u.u6_addr8,
855 sizeof(ip6h->saddr));
856 memcpy(input->ip.v6.dst_ip,
857 ip6h->daddr.in6_u.u6_addr8,
858 sizeof(ip6h->daddr));
859 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
860 (ip6h->flow_lbl[0] >> 4);
861 input->ip.v6.proto = ip6h->nexthdr;
862 }
863 break;
864 case VIRTCHNL_PROTO_HDR_TCP:
865 tcph = (struct tcphdr *)hdr->buffer;
866 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
867 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
868 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
869 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
870
871 if (hdr->field_selector) {
872 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
873 input->ip.v4.src_port = tcph->source;
874 input->ip.v4.dst_port = tcph->dest;
875 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
876 input->ip.v6.src_port = tcph->source;
877 input->ip.v6.dst_port = tcph->dest;
878 }
879 }
880 break;
881 case VIRTCHNL_PROTO_HDR_UDP:
882 udph = (struct udphdr *)hdr->buffer;
883 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
884 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
885 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
886 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
887
888 if (hdr->field_selector) {
889 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
890 input->ip.v4.src_port = udph->source;
891 input->ip.v4.dst_port = udph->dest;
892 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
893 input->ip.v6.src_port = udph->source;
894 input->ip.v6.dst_port = udph->dest;
895 }
896 }
897 break;
898 case VIRTCHNL_PROTO_HDR_SCTP:
899 sctph = (struct sctphdr *)hdr->buffer;
900 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
901 input->flow_type =
902 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
903 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
904 input->flow_type =
905 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
906
907 if (hdr->field_selector) {
908 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
909 input->ip.v4.src_port = sctph->source;
910 input->ip.v4.dst_port = sctph->dest;
911 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
912 input->ip.v6.src_port = sctph->source;
913 input->ip.v6.dst_port = sctph->dest;
914 }
915 }
916 break;
917 case VIRTCHNL_PROTO_HDR_L2TPV3:
918 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
919 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
920 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
921 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
922
923 if (hdr->field_selector)
924 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
925 break;
926 case VIRTCHNL_PROTO_HDR_ESP:
927 esph = (struct ip_esp_hdr *)hdr->buffer;
928 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
929 l4 == VIRTCHNL_PROTO_HDR_UDP)
930 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
931 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
932 l4 == VIRTCHNL_PROTO_HDR_UDP)
933 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
934 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
935 l4 == VIRTCHNL_PROTO_HDR_NONE)
936 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
937 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
938 l4 == VIRTCHNL_PROTO_HDR_NONE)
939 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
940
941 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
942 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
943 else
944 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
945
946 if (hdr->field_selector) {
947 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
948 input->ip.v4.sec_parm_idx = esph->spi;
949 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
950 input->ip.v6.sec_parm_idx = esph->spi;
951 }
952 break;
953 case VIRTCHNL_PROTO_HDR_AH:
954 ah = (struct ip_auth_hdr *)hdr->buffer;
955 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
956 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
957 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
958 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
959
960 if (hdr->field_selector) {
961 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
962 input->ip.v4.sec_parm_idx = ah->spi;
963 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
964 input->ip.v6.sec_parm_idx = ah->spi;
965 }
966 break;
967 case VIRTCHNL_PROTO_HDR_PFCP:
968 rawh = (u8 *)hdr->buffer;
969 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
970 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
971 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
972 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
973 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
974 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
975 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
976 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
977 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
978
979 if (hdr->field_selector) {
980 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
981 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
982 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
983 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
984 }
985 break;
986 case VIRTCHNL_PROTO_HDR_GTPU_IP:
987 rawh = (u8 *)hdr->buffer;
988 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
989
990 if (hdr->field_selector)
991 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
992 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
993 break;
994 case VIRTCHNL_PROTO_HDR_GTPU_EH:
995 rawh = (u8 *)hdr->buffer;
996
997 if (hdr->field_selector)
998 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
999 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1000 break;
1001 default:
1002 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1003 hdr->type, vf->vf_id);
1004 return -EINVAL;
1005 }
1006 }
1007
1008 return 0;
1009}
1010
1011/**
1012 * ice_vc_fdir_parse_action
1013 * @vf: pointer to the VF info
1014 * @fltr: virtual channel add cmd buffer
1015 * @conf: FDIR configuration for each filter
1016 *
1017 * Parse the virtual channel filter's action and store them into conf
1018 *
1019 * Return: 0 on success, and other on error.
1020 */
1021static int
1022ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1023 struct virtchnl_fdir_fltr_conf *conf)
1024{
1025 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1026 struct device *dev = ice_pf_to_dev(vf->pf);
1027 struct ice_fdir_fltr *input = &conf->input;
1028 u32 dest_num = 0;
1029 u32 mark_num = 0;
1030 int i;
1031
1032 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1033 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1034 as->count, vf->vf_id);
1035 return -EINVAL;
1036 }
1037
1038 for (i = 0; i < as->count; i++) {
1039 struct virtchnl_filter_action *action = &as->actions[i];
1040
1041 switch (action->type) {
1042 case VIRTCHNL_ACTION_PASSTHRU:
1043 dest_num++;
1044 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1045 break;
1046 case VIRTCHNL_ACTION_DROP:
1047 dest_num++;
1048 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1049 break;
1050 case VIRTCHNL_ACTION_QUEUE:
1051 dest_num++;
1052 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1053 input->q_index = action->act_conf.queue.index;
1054 break;
1055 case VIRTCHNL_ACTION_Q_REGION:
1056 dest_num++;
1057 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1058 input->q_index = action->act_conf.queue.index;
1059 input->q_region = action->act_conf.queue.region;
1060 break;
1061 case VIRTCHNL_ACTION_MARK:
1062 mark_num++;
1063 input->fltr_id = action->act_conf.mark_id;
1064 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1065 break;
1066 default:
1067 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1068 action->type, vf->vf_id);
1069 return -EINVAL;
1070 }
1071 }
1072
1073 if (dest_num == 0 || dest_num >= 2) {
1074 dev_dbg(dev, "Invalid destination action for VF %d\n",
1075 vf->vf_id);
1076 return -EINVAL;
1077 }
1078
1079 if (mark_num >= 2) {
1080 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087/**
1088 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1089 * @vf: pointer to the VF info
1090 * @fltr: virtual channel add cmd buffer
1091 * @conf: FDIR configuration for each filter
1092 *
1093 * Return: 0 on success, and other on error.
1094 */
1095static int
1096ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1097 struct virtchnl_fdir_fltr_conf *conf)
1098{
1099 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1100 int ret;
1101
1102 if (!ice_vc_validate_pattern(vf, proto))
1103 return -EINVAL;
1104
1105 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1106 if (ret)
1107 return ret;
1108
1109 return ice_vc_fdir_parse_action(vf, fltr, conf);
1110}
1111
1112/**
1113 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1114 * @conf_a: FDIR configuration for filter a
1115 * @conf_b: FDIR configuration for filter b
1116 *
1117 * Return: 0 on success, and other on error.
1118 */
1119static bool
1120ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1121 struct virtchnl_fdir_fltr_conf *conf_b)
1122{
1123 struct ice_fdir_fltr *a = &conf_a->input;
1124 struct ice_fdir_fltr *b = &conf_b->input;
1125
1126 if (conf_a->ttype != conf_b->ttype)
1127 return false;
1128 if (a->flow_type != b->flow_type)
1129 return false;
1130 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1131 return false;
1132 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1133 return false;
1134 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1135 return false;
1136 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1137 return false;
1138 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1139 return false;
1140 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1141 return false;
1142 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1143 return false;
1144 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1145 return false;
1146
1147 return true;
1148}
1149
1150/**
1151 * ice_vc_fdir_is_dup_fltr
1152 * @vf: pointer to the VF info
1153 * @conf: FDIR configuration for each filter
1154 *
1155 * Check if there is duplicated rule with same conf value
1156 *
1157 * Return: 0 true success, and false on error.
1158 */
1159static bool
1160ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1161{
1162 struct ice_fdir_fltr *desc;
1163 bool ret;
1164
1165 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1166 struct virtchnl_fdir_fltr_conf *node =
1167 to_fltr_conf_from_desc(desc);
1168
1169 ret = ice_vc_fdir_comp_rules(node, conf);
1170 if (ret)
1171 return true;
1172 }
1173
1174 return false;
1175}
1176
1177/**
1178 * ice_vc_fdir_insert_entry
1179 * @vf: pointer to the VF info
1180 * @conf: FDIR configuration for each filter
1181 * @id: pointer to ID value allocated by driver
1182 *
1183 * Insert FDIR conf entry into list and allocate ID for this filter
1184 *
1185 * Return: 0 true success, and other on error.
1186 */
1187static int
1188ice_vc_fdir_insert_entry(struct ice_vf *vf,
1189 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1190{
1191 struct ice_fdir_fltr *input = &conf->input;
1192 int i;
1193
1194 /* alloc ID corresponding with conf */
1195 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1196 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1197 if (i < 0)
1198 return -EINVAL;
1199 *id = i;
1200
1201 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1202 return 0;
1203}
1204
1205/**
1206 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1207 * @vf: pointer to the VF info
1208 * @conf: FDIR configuration for each filter
1209 * @id: filter rule's ID
1210 */
1211static void
1212ice_vc_fdir_remove_entry(struct ice_vf *vf,
1213 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1214{
1215 struct ice_fdir_fltr *input = &conf->input;
1216
1217 idr_remove(&vf->fdir.fdir_rule_idr, id);
1218 list_del(&input->fltr_node);
1219}
1220
1221/**
1222 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1223 * @vf: pointer to the VF info
1224 * @id: filter rule's ID
1225 *
1226 * Return: NULL on error, and other on success.
1227 */
1228static struct virtchnl_fdir_fltr_conf *
1229ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1230{
1231 return idr_find(&vf->fdir.fdir_rule_idr, id);
1232}
1233
1234/**
1235 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1236 * @vf: pointer to the VF info
1237 */
1238static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1239{
1240 struct virtchnl_fdir_fltr_conf *conf;
1241 struct ice_fdir_fltr *desc, *temp;
1242
1243 list_for_each_entry_safe(desc, temp,
1244 &vf->fdir.fdir_rule_list, fltr_node) {
1245 conf = to_fltr_conf_from_desc(desc);
1246 list_del(&desc->fltr_node);
1247 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1248 }
1249}
1250
1251/**
1252 * ice_vc_fdir_write_fltr - write filter rule into hardware
1253 * @vf: pointer to the VF info
1254 * @conf: FDIR configuration for each filter
1255 * @add: true implies add rule, false implies del rules
1256 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1257 *
1258 * Return: 0 on success, and other on error.
1259 */
1260static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1261 struct virtchnl_fdir_fltr_conf *conf,
1262 bool add, bool is_tun)
1263{
1264 struct ice_fdir_fltr *input = &conf->input;
1265 struct ice_vsi *vsi, *ctrl_vsi;
1266 struct ice_fltr_desc desc;
1267 struct device *dev;
1268 struct ice_pf *pf;
1269 struct ice_hw *hw;
1270 int ret;
1271 u8 *pkt;
1272
1273 pf = vf->pf;
1274 dev = ice_pf_to_dev(pf);
1275 hw = &pf->hw;
1276 vsi = ice_get_vf_vsi(vf);
1277 if (!vsi) {
1278 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1279 return -EINVAL;
1280 }
1281
1282 input->dest_vsi = vsi->idx;
1283 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1284
1285 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1286 if (!ctrl_vsi) {
1287 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1288 return -EINVAL;
1289 }
1290
1291 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1292 if (!pkt)
1293 return -ENOMEM;
1294
1295 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1296 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1297 if (ret) {
1298 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1299 vf->vf_id, input->flow_type);
1300 goto err_free_pkt;
1301 }
1302
1303 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1304 if (ret)
1305 goto err_free_pkt;
1306
1307 return 0;
1308
1309err_free_pkt:
1310 devm_kfree(dev, pkt);
1311 return ret;
1312}
1313
1314/**
1315 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1316 * @t: pointer to timer_list
1317 */
1318static void ice_vf_fdir_timer(struct timer_list *t)
1319{
1320 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1321 struct ice_vf_fdir_ctx *ctx_done;
1322 struct ice_vf_fdir *fdir;
1323 unsigned long flags;
1324 struct ice_vf *vf;
1325 struct ice_pf *pf;
1326
1327 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1328 vf = container_of(fdir, struct ice_vf, fdir);
1329 ctx_done = &fdir->ctx_done;
1330 pf = vf->pf;
1331 spin_lock_irqsave(&fdir->ctx_lock, flags);
1332 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1333 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1334 WARN_ON_ONCE(1);
1335 return;
1336 }
1337
1338 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1339
1340 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1341 ctx_done->conf = ctx_irq->conf;
1342 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1343 ctx_done->v_opcode = ctx_irq->v_opcode;
1344 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1345
1346 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1347 ice_service_task_schedule(pf);
1348}
1349
1350/**
1351 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1352 * @ctrl_vsi: pointer to a VF's CTRL VSI
1353 * @rx_desc: pointer to FDIR Rx queue descriptor
1354 */
1355void
1356ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1357 union ice_32b_rx_flex_desc *rx_desc)
1358{
1359 struct ice_pf *pf = ctrl_vsi->back;
1360 struct ice_vf *vf = ctrl_vsi->vf;
1361 struct ice_vf_fdir_ctx *ctx_done;
1362 struct ice_vf_fdir_ctx *ctx_irq;
1363 struct ice_vf_fdir *fdir;
1364 unsigned long flags;
1365 struct device *dev;
1366 int ret;
1367
1368 if (WARN_ON(!vf))
1369 return;
1370
1371 fdir = &vf->fdir;
1372 ctx_done = &fdir->ctx_done;
1373 ctx_irq = &fdir->ctx_irq;
1374 dev = ice_pf_to_dev(pf);
1375 spin_lock_irqsave(&fdir->ctx_lock, flags);
1376 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1377 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1378 WARN_ON_ONCE(1);
1379 return;
1380 }
1381
1382 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1383
1384 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1385 ctx_done->conf = ctx_irq->conf;
1386 ctx_done->stat = ICE_FDIR_CTX_IRQ;
1387 ctx_done->v_opcode = ctx_irq->v_opcode;
1388 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1389 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1390
1391 ret = del_timer(&ctx_irq->rx_tmr);
1392 if (!ret)
1393 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1394
1395 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1396 ice_service_task_schedule(pf);
1397}
1398
1399/**
1400 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1401 * @vf: pointer to the VF info
1402 */
1403static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1404{
1405 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1406 struct ice_vsi *vf_vsi;
1407 struct device *dev;
1408 struct ice_pf *pf;
1409 struct ice_hw *hw;
1410 u16 vsi_num;
1411
1412 pf = vf->pf;
1413 hw = &pf->hw;
1414 dev = ice_pf_to_dev(pf);
1415 vf_vsi = ice_get_vf_vsi(vf);
1416 if (!vf_vsi) {
1417 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1418 return;
1419 }
1420
1421 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1422
1423 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1424 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1425 switch (hw->mac_type) {
1426 case ICE_MAC_E830:
1427 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1428 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1429 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1430 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1431 break;
1432 case ICE_MAC_E810:
1433 default:
1434 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1435 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1436 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1437 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1438 }
1439
1440 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1441 vf->vf_id, fd_size_g, fd_size_b);
1442 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1443 vf->vf_id, fd_cnt_g, fd_cnt_b);
1444}
1445
1446/**
1447 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1448 * @vf: pointer to the VF info
1449 * @ctx: FDIR context info for post processing
1450 * @status: virtchnl FDIR program status
1451 *
1452 * Return: 0 on success, and other on error.
1453 */
1454static int
1455ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1456 enum virtchnl_fdir_prgm_status *status)
1457{
1458 struct device *dev = ice_pf_to_dev(vf->pf);
1459 u32 stat_err, error, prog_id;
1460 int ret;
1461
1462 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1463 if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1464 ICE_FXD_FLTR_WB_QW1_DD_YES) {
1465 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1466 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1467 ret = -EINVAL;
1468 goto err_exit;
1469 }
1470
1471 prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1472 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1473 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1474 dev_err(dev, "VF %d: Desc show add, but ctx not",
1475 vf->vf_id);
1476 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1477 ret = -EINVAL;
1478 goto err_exit;
1479 }
1480
1481 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1482 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1483 dev_err(dev, "VF %d: Desc show del, but ctx not",
1484 vf->vf_id);
1485 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1486 ret = -EINVAL;
1487 goto err_exit;
1488 }
1489
1490 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1491 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1492 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1493 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1494 vf->vf_id);
1495 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1496 } else {
1497 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1498 vf->vf_id);
1499 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1500 }
1501 ret = -EINVAL;
1502 goto err_exit;
1503 }
1504
1505 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1506 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1507 dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1508 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1509 ret = -EINVAL;
1510 goto err_exit;
1511 }
1512
1513 *status = VIRTCHNL_FDIR_SUCCESS;
1514
1515 return 0;
1516
1517err_exit:
1518 ice_vf_fdir_dump_info(vf);
1519 return ret;
1520}
1521
1522/**
1523 * ice_vc_add_fdir_fltr_post
1524 * @vf: pointer to the VF structure
1525 * @ctx: FDIR context info for post processing
1526 * @status: virtchnl FDIR program status
1527 * @success: true implies success, false implies failure
1528 *
1529 * Post process for flow director add command. If success, then do post process
1530 * and send back success msg by virtchnl. Otherwise, do context reversion and
1531 * send back failure msg by virtchnl.
1532 *
1533 * Return: 0 on success, and other on error.
1534 */
1535static int
1536ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1537 enum virtchnl_fdir_prgm_status status,
1538 bool success)
1539{
1540 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1541 struct device *dev = ice_pf_to_dev(vf->pf);
1542 enum virtchnl_status_code v_ret;
1543 struct virtchnl_fdir_add *resp;
1544 int ret, len, is_tun;
1545
1546 v_ret = VIRTCHNL_STATUS_SUCCESS;
1547 len = sizeof(*resp);
1548 resp = kzalloc(len, GFP_KERNEL);
1549 if (!resp) {
1550 len = 0;
1551 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1552 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1553 goto err_exit;
1554 }
1555
1556 if (!success)
1557 goto err_exit;
1558
1559 is_tun = 0;
1560 resp->status = status;
1561 resp->flow_id = conf->flow_id;
1562 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1563
1564 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1565 (u8 *)resp, len);
1566 kfree(resp);
1567
1568 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1569 vf->vf_id, conf->flow_id,
1570 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1571 "add" : "del");
1572 return ret;
1573
1574err_exit:
1575 if (resp)
1576 resp->status = status;
1577 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1578 devm_kfree(dev, conf);
1579
1580 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1581 (u8 *)resp, len);
1582 kfree(resp);
1583 return ret;
1584}
1585
1586/**
1587 * ice_vc_del_fdir_fltr_post
1588 * @vf: pointer to the VF structure
1589 * @ctx: FDIR context info for post processing
1590 * @status: virtchnl FDIR program status
1591 * @success: true implies success, false implies failure
1592 *
1593 * Post process for flow director del command. If success, then do post process
1594 * and send back success msg by virtchnl. Otherwise, do context reversion and
1595 * send back failure msg by virtchnl.
1596 *
1597 * Return: 0 on success, and other on error.
1598 */
1599static int
1600ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1601 enum virtchnl_fdir_prgm_status status,
1602 bool success)
1603{
1604 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1605 struct device *dev = ice_pf_to_dev(vf->pf);
1606 enum virtchnl_status_code v_ret;
1607 struct virtchnl_fdir_del *resp;
1608 int ret, len, is_tun;
1609
1610 v_ret = VIRTCHNL_STATUS_SUCCESS;
1611 len = sizeof(*resp);
1612 resp = kzalloc(len, GFP_KERNEL);
1613 if (!resp) {
1614 len = 0;
1615 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1616 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1617 goto err_exit;
1618 }
1619
1620 if (!success)
1621 goto err_exit;
1622
1623 is_tun = 0;
1624 resp->status = status;
1625 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1626 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1627
1628 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1629 (u8 *)resp, len);
1630 kfree(resp);
1631
1632 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1633 vf->vf_id, conf->flow_id,
1634 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1635 "add" : "del");
1636 devm_kfree(dev, conf);
1637 return ret;
1638
1639err_exit:
1640 if (resp)
1641 resp->status = status;
1642 if (success)
1643 devm_kfree(dev, conf);
1644
1645 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1646 (u8 *)resp, len);
1647 kfree(resp);
1648 return ret;
1649}
1650
1651/**
1652 * ice_flush_fdir_ctx
1653 * @pf: pointer to the PF structure
1654 *
1655 * Flush all the pending event on ctx_done list and process them.
1656 */
1657void ice_flush_fdir_ctx(struct ice_pf *pf)
1658{
1659 struct ice_vf *vf;
1660 unsigned int bkt;
1661
1662 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1663 return;
1664
1665 mutex_lock(&pf->vfs.table_lock);
1666 ice_for_each_vf(pf, bkt, vf) {
1667 struct device *dev = ice_pf_to_dev(pf);
1668 enum virtchnl_fdir_prgm_status status;
1669 struct ice_vf_fdir_ctx *ctx;
1670 unsigned long flags;
1671 int ret;
1672
1673 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1674 continue;
1675
1676 if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1677 continue;
1678
1679 ctx = &vf->fdir.ctx_done;
1680 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1681 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1682 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1683 continue;
1684 }
1685 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686
1687 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1688 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1689 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1690 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1691 vf->vf_id);
1692 goto err_exit;
1693 }
1694
1695 ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1696 if (ret)
1697 goto err_exit;
1698
1699 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1700 ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1701 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1702 ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1703 else
1704 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1705
1706 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1707 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1708 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1709 continue;
1710err_exit:
1711 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1712 ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1713 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1714 ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1715 else
1716 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1717
1718 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1719 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1720 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1721 }
1722 mutex_unlock(&pf->vfs.table_lock);
1723}
1724
1725/**
1726 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1727 * @vf: pointer to the VF structure
1728 * @conf: FDIR configuration for each filter
1729 * @v_opcode: virtual channel operation code
1730 *
1731 * Return: 0 on success, and other on error.
1732 */
1733static int
1734ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1735 enum virtchnl_ops v_opcode)
1736{
1737 struct device *dev = ice_pf_to_dev(vf->pf);
1738 struct ice_vf_fdir_ctx *ctx;
1739 unsigned long flags;
1740
1741 ctx = &vf->fdir.ctx_irq;
1742 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1743 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1744 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1745 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1746 dev_dbg(dev, "VF %d: Last request is still in progress\n",
1747 vf->vf_id);
1748 return -EBUSY;
1749 }
1750 ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1751 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1752
1753 ctx->conf = conf;
1754 ctx->v_opcode = v_opcode;
1755 ctx->stat = ICE_FDIR_CTX_READY;
1756 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1757
1758 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1759
1760 return 0;
1761}
1762
1763/**
1764 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1765 * @vf: pointer to the VF structure
1766 *
1767 * Return: 0 on success, and other on error.
1768 */
1769static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1770{
1771 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1772 unsigned long flags;
1773
1774 del_timer(&ctx->rx_tmr);
1775 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1776 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1777 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1778}
1779
1780/**
1781 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1782 * @vf: pointer to the VF info
1783 * @msg: pointer to the msg buffer
1784 *
1785 * Return: 0 on success, and other on error.
1786 */
1787int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1788{
1789 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1790 struct virtchnl_fdir_add *stat = NULL;
1791 struct virtchnl_fdir_fltr_conf *conf;
1792 enum virtchnl_status_code v_ret;
1793 struct device *dev;
1794 struct ice_pf *pf;
1795 int is_tun = 0;
1796 int len = 0;
1797 int ret;
1798
1799 pf = vf->pf;
1800 dev = ice_pf_to_dev(pf);
1801 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1802 if (ret) {
1803 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1804 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1805 goto err_exit;
1806 }
1807
1808 ret = ice_vf_start_ctrl_vsi(vf);
1809 if (ret && (ret != -EEXIST)) {
1810 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1811 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1812 vf->vf_id, ret);
1813 goto err_exit;
1814 }
1815
1816 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1817 if (!stat) {
1818 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1819 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1820 goto err_exit;
1821 }
1822
1823 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1824 if (!conf) {
1825 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1826 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1827 goto err_exit;
1828 }
1829
1830 len = sizeof(*stat);
1831 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1832 if (ret) {
1833 v_ret = VIRTCHNL_STATUS_SUCCESS;
1834 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1835 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1836 goto err_free_conf;
1837 }
1838
1839 if (fltr->validate_only) {
1840 v_ret = VIRTCHNL_STATUS_SUCCESS;
1841 stat->status = VIRTCHNL_FDIR_SUCCESS;
1842 devm_kfree(dev, conf);
1843 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1844 v_ret, (u8 *)stat, len);
1845 goto exit;
1846 }
1847
1848 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1849 if (ret) {
1850 v_ret = VIRTCHNL_STATUS_SUCCESS;
1851 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1852 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1853 vf->vf_id, ret);
1854 goto err_free_conf;
1855 }
1856
1857 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1858 if (ret) {
1859 v_ret = VIRTCHNL_STATUS_SUCCESS;
1860 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1861 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1862 vf->vf_id);
1863 goto err_free_conf;
1864 }
1865
1866 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1867 if (ret) {
1868 v_ret = VIRTCHNL_STATUS_SUCCESS;
1869 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1870 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1871 goto err_free_conf;
1872 }
1873
1874 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1875 if (ret) {
1876 v_ret = VIRTCHNL_STATUS_SUCCESS;
1877 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1878 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1879 goto err_rem_entry;
1880 }
1881
1882 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1883 if (ret) {
1884 v_ret = VIRTCHNL_STATUS_SUCCESS;
1885 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1886 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1887 vf->vf_id, ret);
1888 goto err_clr_irq;
1889 }
1890
1891exit:
1892 kfree(stat);
1893 return ret;
1894
1895err_clr_irq:
1896 ice_vc_fdir_clear_irq_ctx(vf);
1897err_rem_entry:
1898 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1899err_free_conf:
1900 devm_kfree(dev, conf);
1901err_exit:
1902 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1903 (u8 *)stat, len);
1904 kfree(stat);
1905 return ret;
1906}
1907
1908/**
1909 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1910 * @vf: pointer to the VF info
1911 * @msg: pointer to the msg buffer
1912 *
1913 * Return: 0 on success, and other on error.
1914 */
1915int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1916{
1917 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1918 struct virtchnl_fdir_del *stat = NULL;
1919 struct virtchnl_fdir_fltr_conf *conf;
1920 enum virtchnl_status_code v_ret;
1921 struct device *dev;
1922 struct ice_pf *pf;
1923 int is_tun = 0;
1924 int len = 0;
1925 int ret;
1926
1927 pf = vf->pf;
1928 dev = ice_pf_to_dev(pf);
1929 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1930 if (ret) {
1931 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1933 goto err_exit;
1934 }
1935
1936 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1937 if (!stat) {
1938 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1939 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1940 goto err_exit;
1941 }
1942
1943 len = sizeof(*stat);
1944
1945 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1946 if (!conf) {
1947 v_ret = VIRTCHNL_STATUS_SUCCESS;
1948 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1949 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1950 vf->vf_id, fltr->flow_id);
1951 goto err_exit;
1952 }
1953
1954 /* Just return failure when ctrl_vsi idx is invalid */
1955 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1956 v_ret = VIRTCHNL_STATUS_SUCCESS;
1957 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1958 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1959 goto err_exit;
1960 }
1961
1962 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1963 if (ret) {
1964 v_ret = VIRTCHNL_STATUS_SUCCESS;
1965 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1966 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1967 goto err_exit;
1968 }
1969
1970 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1971 if (ret) {
1972 v_ret = VIRTCHNL_STATUS_SUCCESS;
1973 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1974 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1975 vf->vf_id, ret);
1976 goto err_del_tmr;
1977 }
1978
1979 kfree(stat);
1980
1981 return ret;
1982
1983err_del_tmr:
1984 ice_vc_fdir_clear_irq_ctx(vf);
1985err_exit:
1986 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1987 (u8 *)stat, len);
1988 kfree(stat);
1989 return ret;
1990}
1991
1992/**
1993 * ice_vf_fdir_init - init FDIR resource for VF
1994 * @vf: pointer to the VF info
1995 */
1996void ice_vf_fdir_init(struct ice_vf *vf)
1997{
1998 struct ice_vf_fdir *fdir = &vf->fdir;
1999
2000 idr_init(&fdir->fdir_rule_idr);
2001 INIT_LIST_HEAD(&fdir->fdir_rule_list);
2002
2003 spin_lock_init(&fdir->ctx_lock);
2004 fdir->ctx_irq.flags = 0;
2005 fdir->ctx_done.flags = 0;
2006 ice_vc_fdir_reset_cnt_all(fdir);
2007}
2008
2009/**
2010 * ice_vf_fdir_exit - destroy FDIR resource for VF
2011 * @vf: pointer to the VF info
2012 */
2013void ice_vf_fdir_exit(struct ice_vf *vf)
2014{
2015 ice_vc_fdir_flush_entry(vf);
2016 idr_destroy(&vf->fdir.fdir_rule_idr);
2017 ice_vc_fdir_rem_prof_all(vf);
2018 ice_vc_fdir_free_prof_all(vf);
2019}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021-2023, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_flow.h"
8#include "ice_vf_lib_private.h"
9
10#define to_fltr_conf_from_desc(p) \
11 container_of(p, struct virtchnl_fdir_fltr_conf, input)
12
13#define GTPU_TEID_OFFSET 4
14#define GTPU_EH_QFI_OFFSET 1
15#define GTPU_EH_QFI_MASK 0x3F
16#define PFCP_S_OFFSET 0
17#define PFCP_S_MASK 0x1
18#define PFCP_PORT_NR 8805
19
20#define FDIR_INSET_FLAG_ESP_S 0
21#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24
25enum ice_fdir_tunnel_type {
26 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 ICE_FDIR_TUNNEL_TYPE_GTPU,
28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29};
30
31struct virtchnl_fdir_fltr_conf {
32 struct ice_fdir_fltr input;
33 enum ice_fdir_tunnel_type ttype;
34 u64 inset_flag;
35 u32 flow_id;
36};
37
38struct virtchnl_fdir_inset_map {
39 enum virtchnl_proto_hdr_field field;
40 enum ice_flow_field fld;
41 u64 flag;
42 u64 mask;
43};
44
45static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
46 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
47 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
48 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
49 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
50 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
51 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
52 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
53 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
54 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
55 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
56 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
57 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
58 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
59 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
60 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
61 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
62 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
63 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
64 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
65 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
66 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
67 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
68 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
69 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
70 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
71 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72};
73
74/**
75 * ice_vc_fdir_param_check
76 * @vf: pointer to the VF structure
77 * @vsi_id: VF relative VSI ID
78 *
79 * Check for the valid VSI ID, PF's state and VF's state
80 *
81 * Return: 0 on success, and -EINVAL on error.
82 */
83static int
84ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
85{
86 struct ice_pf *pf = vf->pf;
87
88 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
89 return -EINVAL;
90
91 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
92 return -EINVAL;
93
94 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
95 return -EINVAL;
96
97 if (vsi_id != vf->lan_vsi_num)
98 return -EINVAL;
99
100 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
101 return -EINVAL;
102
103 if (!ice_get_vf_vsi(vf))
104 return -EINVAL;
105
106 return 0;
107}
108
109/**
110 * ice_vf_start_ctrl_vsi
111 * @vf: pointer to the VF structure
112 *
113 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
114 *
115 * Return: 0 on success, and other on error.
116 */
117static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
118{
119 struct ice_pf *pf = vf->pf;
120 struct ice_vsi *ctrl_vsi;
121 struct device *dev;
122 int err;
123
124 dev = ice_pf_to_dev(pf);
125 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
126 return -EEXIST;
127
128 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
129 if (!ctrl_vsi) {
130 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
131 vf->vf_id);
132 return -ENOMEM;
133 }
134
135 err = ice_vsi_open_ctrl(ctrl_vsi);
136 if (err) {
137 dev_dbg(dev, "Could not open control VSI for VF %d\n",
138 vf->vf_id);
139 goto err_vsi_open;
140 }
141
142 return 0;
143
144err_vsi_open:
145 ice_vsi_release(ctrl_vsi);
146 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
147 pf->vsi[vf->ctrl_vsi_idx] = NULL;
148 vf->ctrl_vsi_idx = ICE_NO_VSI;
149 }
150 return err;
151}
152
153/**
154 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
155 * @vf: pointer to the VF structure
156 * @flow: filter flow type
157 *
158 * Return: 0 on success, and other on error.
159 */
160static int
161ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
162{
163 struct ice_vf_fdir *fdir = &vf->fdir;
164
165 if (!fdir->fdir_prof) {
166 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
167 ICE_FLTR_PTYPE_MAX,
168 sizeof(*fdir->fdir_prof),
169 GFP_KERNEL);
170 if (!fdir->fdir_prof)
171 return -ENOMEM;
172 }
173
174 if (!fdir->fdir_prof[flow]) {
175 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
176 sizeof(**fdir->fdir_prof),
177 GFP_KERNEL);
178 if (!fdir->fdir_prof[flow])
179 return -ENOMEM;
180 }
181
182 return 0;
183}
184
185/**
186 * ice_vc_fdir_free_prof - free profile for this filter flow type
187 * @vf: pointer to the VF structure
188 * @flow: filter flow type
189 */
190static void
191ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
192{
193 struct ice_vf_fdir *fdir = &vf->fdir;
194
195 if (!fdir->fdir_prof)
196 return;
197
198 if (!fdir->fdir_prof[flow])
199 return;
200
201 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
202 fdir->fdir_prof[flow] = NULL;
203}
204
205/**
206 * ice_vc_fdir_free_prof_all - free all the profile for this VF
207 * @vf: pointer to the VF structure
208 */
209static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
210{
211 struct ice_vf_fdir *fdir = &vf->fdir;
212 enum ice_fltr_ptype flow;
213
214 if (!fdir->fdir_prof)
215 return;
216
217 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
218 ice_vc_fdir_free_prof(vf, flow);
219
220 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
221 fdir->fdir_prof = NULL;
222}
223
224/**
225 * ice_vc_fdir_parse_flow_fld
226 * @proto_hdr: virtual channel protocol filter header
227 * @conf: FDIR configuration for each filter
228 * @fld: field type array
229 * @fld_cnt: field counter
230 *
231 * Parse the virtual channel filter header and store them into field type array
232 *
233 * Return: 0 on success, and other on error.
234 */
235static int
236ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
237 struct virtchnl_fdir_fltr_conf *conf,
238 enum ice_flow_field *fld, int *fld_cnt)
239{
240 struct virtchnl_proto_hdr hdr;
241 u32 i;
242
243 memcpy(&hdr, proto_hdr, sizeof(hdr));
244
245 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
246 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
247 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
248 if (fdir_inset_map[i].mask &&
249 ((fdir_inset_map[i].mask & conf->inset_flag) !=
250 fdir_inset_map[i].flag))
251 continue;
252
253 fld[*fld_cnt] = fdir_inset_map[i].fld;
254 *fld_cnt += 1;
255 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
256 return -EINVAL;
257 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
258 fdir_inset_map[i].field);
259 }
260
261 return 0;
262}
263
264/**
265 * ice_vc_fdir_set_flow_fld
266 * @vf: pointer to the VF structure
267 * @fltr: virtual channel add cmd buffer
268 * @conf: FDIR configuration for each filter
269 * @seg: array of one or more packet segments that describe the flow
270 *
271 * Parse the virtual channel add msg buffer's field vector and store them into
272 * flow's packet segment field
273 *
274 * Return: 0 on success, and other on error.
275 */
276static int
277ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
278 struct virtchnl_fdir_fltr_conf *conf,
279 struct ice_flow_seg_info *seg)
280{
281 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
282 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
283 struct device *dev = ice_pf_to_dev(vf->pf);
284 struct virtchnl_proto_hdrs *proto;
285 int fld_cnt = 0;
286 int i;
287
288 proto = &rule->proto_hdrs;
289 for (i = 0; i < proto->count; i++) {
290 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
291 int ret;
292
293 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
294 if (ret)
295 return ret;
296 }
297
298 if (fld_cnt == 0) {
299 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
300 return -EINVAL;
301 }
302
303 for (i = 0; i < fld_cnt; i++)
304 ice_flow_set_fld(seg, fld[i],
305 ICE_FLOW_FLD_OFF_INVAL,
306 ICE_FLOW_FLD_OFF_INVAL,
307 ICE_FLOW_FLD_OFF_INVAL, false);
308
309 return 0;
310}
311
312/**
313 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
314 * @vf: pointer to the VF structure
315 * @conf: FDIR configuration for each filter
316 * @seg: array of one or more packet segments that describe the flow
317 *
318 * Return: 0 on success, and other on error.
319 */
320static int
321ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
322 struct virtchnl_fdir_fltr_conf *conf,
323 struct ice_flow_seg_info *seg)
324{
325 enum ice_fltr_ptype flow = conf->input.flow_type;
326 enum ice_fdir_tunnel_type ttype = conf->ttype;
327 struct device *dev = ice_pf_to_dev(vf->pf);
328
329 switch (flow) {
330 case ICE_FLTR_PTYPE_NON_IP_L2:
331 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
332 break;
333 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
334 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
335 ICE_FLOW_SEG_HDR_IPV4 |
336 ICE_FLOW_SEG_HDR_IPV_OTHER);
337 break;
338 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
339 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
340 ICE_FLOW_SEG_HDR_IPV4 |
341 ICE_FLOW_SEG_HDR_IPV_OTHER);
342 break;
343 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
345 ICE_FLOW_SEG_HDR_IPV4 |
346 ICE_FLOW_SEG_HDR_IPV_OTHER);
347 break;
348 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
349 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
350 ICE_FLOW_SEG_HDR_IPV4 |
351 ICE_FLOW_SEG_HDR_IPV_OTHER);
352 break;
353 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
354 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
355 ICE_FLOW_SEG_HDR_IPV4 |
356 ICE_FLOW_SEG_HDR_IPV_OTHER);
357 break;
358 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
359 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
360 ICE_FLOW_SEG_HDR_IPV4 |
361 ICE_FLOW_SEG_HDR_IPV_OTHER);
362 break;
363 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
364 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
365 ICE_FLOW_SEG_HDR_IPV_OTHER);
366 break;
367 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
368 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
369 ICE_FLOW_SEG_HDR_IPV4 |
370 ICE_FLOW_SEG_HDR_IPV_OTHER);
371 break;
372 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
373 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
374 ICE_FLOW_SEG_HDR_IPV4 |
375 ICE_FLOW_SEG_HDR_IPV_OTHER);
376 break;
377 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
378 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
379 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
380 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
381 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
382 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
383 ICE_FLOW_SEG_HDR_IPV4 |
384 ICE_FLOW_SEG_HDR_IPV_OTHER);
385 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
386 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
387 ICE_FLOW_SEG_HDR_GTPU_IP |
388 ICE_FLOW_SEG_HDR_IPV4 |
389 ICE_FLOW_SEG_HDR_IPV_OTHER);
390 } else {
391 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
392 flow, vf->vf_id);
393 return -EINVAL;
394 }
395 break;
396 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
397 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
398 ICE_FLOW_SEG_HDR_IPV4 |
399 ICE_FLOW_SEG_HDR_IPV_OTHER);
400 break;
401 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
402 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
403 ICE_FLOW_SEG_HDR_IPV6 |
404 ICE_FLOW_SEG_HDR_IPV_OTHER);
405 break;
406 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
407 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
408 ICE_FLOW_SEG_HDR_IPV6 |
409 ICE_FLOW_SEG_HDR_IPV_OTHER);
410 break;
411 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
412 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
413 ICE_FLOW_SEG_HDR_IPV6 |
414 ICE_FLOW_SEG_HDR_IPV_OTHER);
415 break;
416 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
417 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
418 ICE_FLOW_SEG_HDR_IPV6 |
419 ICE_FLOW_SEG_HDR_IPV_OTHER);
420 break;
421 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
422 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
423 ICE_FLOW_SEG_HDR_IPV6 |
424 ICE_FLOW_SEG_HDR_IPV_OTHER);
425 break;
426 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
427 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
428 ICE_FLOW_SEG_HDR_IPV6 |
429 ICE_FLOW_SEG_HDR_IPV_OTHER);
430 break;
431 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
432 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
433 ICE_FLOW_SEG_HDR_IPV_OTHER);
434 break;
435 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
436 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
437 ICE_FLOW_SEG_HDR_IPV6 |
438 ICE_FLOW_SEG_HDR_IPV_OTHER);
439 break;
440 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
441 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
442 ICE_FLOW_SEG_HDR_IPV6 |
443 ICE_FLOW_SEG_HDR_IPV_OTHER);
444 break;
445 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
446 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
447 ICE_FLOW_SEG_HDR_IPV6 |
448 ICE_FLOW_SEG_HDR_IPV_OTHER);
449 break;
450 default:
451 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
452 flow, vf->vf_id);
453 return -EINVAL;
454 }
455
456 return 0;
457}
458
459/**
460 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
461 * @vf: pointer to the VF structure
462 * @flow: filter flow type
463 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
464 */
465static void
466ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
467{
468 struct ice_vf_fdir *fdir = &vf->fdir;
469 struct ice_fd_hw_prof *vf_prof;
470 struct ice_pf *pf = vf->pf;
471 struct ice_vsi *vf_vsi;
472 struct device *dev;
473 struct ice_hw *hw;
474 u64 prof_id;
475 int i;
476
477 dev = ice_pf_to_dev(pf);
478 hw = &pf->hw;
479 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
480 return;
481
482 vf_prof = fdir->fdir_prof[flow];
483 prof_id = vf_prof->prof_id[tun];
484
485 vf_vsi = ice_get_vf_vsi(vf);
486 if (!vf_vsi) {
487 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
488 return;
489 }
490
491 if (!fdir->prof_entry_cnt[flow][tun])
492 return;
493
494 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
495 if (vf_prof->entry_h[i][tun]) {
496 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
497
498 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
499 ice_flow_rem_entry(hw, ICE_BLK_FD,
500 vf_prof->entry_h[i][tun]);
501 vf_prof->entry_h[i][tun] = 0;
502 }
503
504 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
505 devm_kfree(dev, vf_prof->fdir_seg[tun]);
506 vf_prof->fdir_seg[tun] = NULL;
507
508 for (i = 0; i < vf_prof->cnt; i++)
509 vf_prof->vsi_h[i] = 0;
510
511 fdir->prof_entry_cnt[flow][tun] = 0;
512}
513
514/**
515 * ice_vc_fdir_rem_prof_all - remove profile for this VF
516 * @vf: pointer to the VF structure
517 */
518static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
519{
520 enum ice_fltr_ptype flow;
521
522 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
523 flow < ICE_FLTR_PTYPE_MAX; flow++) {
524 ice_vc_fdir_rem_prof(vf, flow, 0);
525 ice_vc_fdir_rem_prof(vf, flow, 1);
526 }
527}
528
529/**
530 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
531 * @fdir: pointer to the VF FDIR structure
532 */
533static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
534{
535 enum ice_fltr_ptype flow;
536
537 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
538 flow < ICE_FLTR_PTYPE_MAX; flow++) {
539 fdir->fdir_fltr_cnt[flow][0] = 0;
540 fdir->fdir_fltr_cnt[flow][1] = 0;
541 }
542}
543
544/**
545 * ice_vc_fdir_has_prof_conflict
546 * @vf: pointer to the VF structure
547 * @conf: FDIR configuration for each filter
548 *
549 * Check if @conf has conflicting profile with existing profiles
550 *
551 * Return: true on success, and false on error.
552 */
553static bool
554ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
555 struct virtchnl_fdir_fltr_conf *conf)
556{
557 struct ice_fdir_fltr *desc;
558
559 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
560 struct virtchnl_fdir_fltr_conf *existing_conf;
561 enum ice_fltr_ptype flow_type_a, flow_type_b;
562 struct ice_fdir_fltr *a, *b;
563
564 existing_conf = to_fltr_conf_from_desc(desc);
565 a = &existing_conf->input;
566 b = &conf->input;
567 flow_type_a = a->flow_type;
568 flow_type_b = b->flow_type;
569
570 /* No need to compare two rules with different tunnel types or
571 * with the same protocol type.
572 */
573 if (existing_conf->ttype != conf->ttype ||
574 flow_type_a == flow_type_b)
575 continue;
576
577 switch (flow_type_a) {
578 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
579 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
580 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
581 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
582 return true;
583 break;
584 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
585 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
586 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
587 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
588 return true;
589 break;
590 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
591 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
592 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
593 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
594 return true;
595 break;
596 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
597 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
598 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
599 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
600 return true;
601 break;
602 default:
603 break;
604 }
605 }
606
607 return false;
608}
609
610/**
611 * ice_vc_fdir_write_flow_prof
612 * @vf: pointer to the VF structure
613 * @flow: filter flow type
614 * @seg: array of one or more packet segments that describe the flow
615 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
616 *
617 * Write the flow's profile config and packet segment into the hardware
618 *
619 * Return: 0 on success, and other on error.
620 */
621static int
622ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
623 struct ice_flow_seg_info *seg, int tun)
624{
625 struct ice_vf_fdir *fdir = &vf->fdir;
626 struct ice_vsi *vf_vsi, *ctrl_vsi;
627 struct ice_flow_seg_info *old_seg;
628 struct ice_flow_prof *prof = NULL;
629 struct ice_fd_hw_prof *vf_prof;
630 struct device *dev;
631 struct ice_pf *pf;
632 struct ice_hw *hw;
633 u64 entry1_h = 0;
634 u64 entry2_h = 0;
635 int ret;
636
637 pf = vf->pf;
638 dev = ice_pf_to_dev(pf);
639 hw = &pf->hw;
640 vf_vsi = ice_get_vf_vsi(vf);
641 if (!vf_vsi)
642 return -EINVAL;
643
644 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
645 if (!ctrl_vsi)
646 return -EINVAL;
647
648 vf_prof = fdir->fdir_prof[flow];
649 old_seg = vf_prof->fdir_seg[tun];
650 if (old_seg) {
651 if (!memcmp(old_seg, seg, sizeof(*seg))) {
652 dev_dbg(dev, "Duplicated profile for VF %d!\n",
653 vf->vf_id);
654 return -EEXIST;
655 }
656
657 if (fdir->fdir_fltr_cnt[flow][tun]) {
658 ret = -EINVAL;
659 dev_dbg(dev, "Input set conflicts for VF %d\n",
660 vf->vf_id);
661 goto err_exit;
662 }
663
664 /* remove previously allocated profile */
665 ice_vc_fdir_rem_prof(vf, flow, tun);
666 }
667
668 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
669 tun + 1, false, &prof);
670 if (ret) {
671 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
672 flow, vf->vf_id);
673 goto err_exit;
674 }
675
676 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
677 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
678 seg, &entry1_h);
679 if (ret) {
680 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
681 flow, vf->vf_id);
682 goto err_prof;
683 }
684
685 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
686 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
687 seg, &entry2_h);
688 if (ret) {
689 dev_dbg(dev,
690 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
691 flow, vf->vf_id);
692 goto err_entry_1;
693 }
694
695 vf_prof->fdir_seg[tun] = seg;
696 vf_prof->cnt = 0;
697 fdir->prof_entry_cnt[flow][tun] = 0;
698
699 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
700 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
701 vf_prof->cnt++;
702 fdir->prof_entry_cnt[flow][tun]++;
703
704 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
705 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
706 vf_prof->cnt++;
707 fdir->prof_entry_cnt[flow][tun]++;
708
709 vf_prof->prof_id[tun] = prof->id;
710
711 return 0;
712
713err_entry_1:
714 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
715 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
716 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
717err_prof:
718 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
719err_exit:
720 return ret;
721}
722
723/**
724 * ice_vc_fdir_config_input_set
725 * @vf: pointer to the VF structure
726 * @fltr: virtual channel add cmd buffer
727 * @conf: FDIR configuration for each filter
728 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
729 *
730 * Config the input set type and value for virtual channel add msg buffer
731 *
732 * Return: 0 on success, and other on error.
733 */
734static int
735ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
736 struct virtchnl_fdir_fltr_conf *conf, int tun)
737{
738 struct ice_fdir_fltr *input = &conf->input;
739 struct device *dev = ice_pf_to_dev(vf->pf);
740 struct ice_flow_seg_info *seg;
741 enum ice_fltr_ptype flow;
742 int ret;
743
744 ret = ice_vc_fdir_has_prof_conflict(vf, conf);
745 if (ret) {
746 dev_dbg(dev, "Found flow profile conflict for VF %d\n",
747 vf->vf_id);
748 return ret;
749 }
750
751 flow = input->flow_type;
752 ret = ice_vc_fdir_alloc_prof(vf, flow);
753 if (ret) {
754 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
755 return ret;
756 }
757
758 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
759 if (!seg)
760 return -ENOMEM;
761
762 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
763 if (ret) {
764 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
765 goto err_exit;
766 }
767
768 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
769 if (ret) {
770 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
771 goto err_exit;
772 }
773
774 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
775 if (ret == -EEXIST) {
776 devm_kfree(dev, seg);
777 } else if (ret) {
778 dev_dbg(dev, "Write flow profile for VF %d failed\n",
779 vf->vf_id);
780 goto err_exit;
781 }
782
783 return 0;
784
785err_exit:
786 devm_kfree(dev, seg);
787 return ret;
788}
789
790/**
791 * ice_vc_fdir_parse_pattern
792 * @vf: pointer to the VF info
793 * @fltr: virtual channel add cmd buffer
794 * @conf: FDIR configuration for each filter
795 *
796 * Parse the virtual channel filter's pattern and store them into conf
797 *
798 * Return: 0 on success, and other on error.
799 */
800static int
801ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
802 struct virtchnl_fdir_fltr_conf *conf)
803{
804 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
805 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
806 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
807 struct device *dev = ice_pf_to_dev(vf->pf);
808 struct ice_fdir_fltr *input = &conf->input;
809 int i;
810
811 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
812 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
813 proto->count, vf->vf_id);
814 return -EINVAL;
815 }
816
817 for (i = 0; i < proto->count; i++) {
818 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
819 struct ip_esp_hdr *esph;
820 struct ip_auth_hdr *ah;
821 struct sctphdr *sctph;
822 struct ipv6hdr *ip6h;
823 struct udphdr *udph;
824 struct tcphdr *tcph;
825 struct ethhdr *eth;
826 struct iphdr *iph;
827 u8 s_field;
828 u8 *rawh;
829
830 switch (hdr->type) {
831 case VIRTCHNL_PROTO_HDR_ETH:
832 eth = (struct ethhdr *)hdr->buffer;
833 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
834
835 if (hdr->field_selector)
836 input->ext_data.ether_type = eth->h_proto;
837 break;
838 case VIRTCHNL_PROTO_HDR_IPV4:
839 iph = (struct iphdr *)hdr->buffer;
840 l3 = VIRTCHNL_PROTO_HDR_IPV4;
841 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
842
843 if (hdr->field_selector) {
844 input->ip.v4.src_ip = iph->saddr;
845 input->ip.v4.dst_ip = iph->daddr;
846 input->ip.v4.tos = iph->tos;
847 input->ip.v4.proto = iph->protocol;
848 }
849 break;
850 case VIRTCHNL_PROTO_HDR_IPV6:
851 ip6h = (struct ipv6hdr *)hdr->buffer;
852 l3 = VIRTCHNL_PROTO_HDR_IPV6;
853 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
854
855 if (hdr->field_selector) {
856 memcpy(input->ip.v6.src_ip,
857 ip6h->saddr.in6_u.u6_addr8,
858 sizeof(ip6h->saddr));
859 memcpy(input->ip.v6.dst_ip,
860 ip6h->daddr.in6_u.u6_addr8,
861 sizeof(ip6h->daddr));
862 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
863 (ip6h->flow_lbl[0] >> 4);
864 input->ip.v6.proto = ip6h->nexthdr;
865 }
866 break;
867 case VIRTCHNL_PROTO_HDR_TCP:
868 tcph = (struct tcphdr *)hdr->buffer;
869 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
870 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
871 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
872 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
873
874 if (hdr->field_selector) {
875 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
876 input->ip.v4.src_port = tcph->source;
877 input->ip.v4.dst_port = tcph->dest;
878 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
879 input->ip.v6.src_port = tcph->source;
880 input->ip.v6.dst_port = tcph->dest;
881 }
882 }
883 break;
884 case VIRTCHNL_PROTO_HDR_UDP:
885 udph = (struct udphdr *)hdr->buffer;
886 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
887 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
888 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
889 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
890
891 if (hdr->field_selector) {
892 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
893 input->ip.v4.src_port = udph->source;
894 input->ip.v4.dst_port = udph->dest;
895 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
896 input->ip.v6.src_port = udph->source;
897 input->ip.v6.dst_port = udph->dest;
898 }
899 }
900 break;
901 case VIRTCHNL_PROTO_HDR_SCTP:
902 sctph = (struct sctphdr *)hdr->buffer;
903 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
904 input->flow_type =
905 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
906 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
907 input->flow_type =
908 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
909
910 if (hdr->field_selector) {
911 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
912 input->ip.v4.src_port = sctph->source;
913 input->ip.v4.dst_port = sctph->dest;
914 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
915 input->ip.v6.src_port = sctph->source;
916 input->ip.v6.dst_port = sctph->dest;
917 }
918 }
919 break;
920 case VIRTCHNL_PROTO_HDR_L2TPV3:
921 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
922 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
923 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
924 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
925
926 if (hdr->field_selector)
927 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
928 break;
929 case VIRTCHNL_PROTO_HDR_ESP:
930 esph = (struct ip_esp_hdr *)hdr->buffer;
931 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
932 l4 == VIRTCHNL_PROTO_HDR_UDP)
933 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
934 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
935 l4 == VIRTCHNL_PROTO_HDR_UDP)
936 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
937 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
938 l4 == VIRTCHNL_PROTO_HDR_NONE)
939 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
940 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
941 l4 == VIRTCHNL_PROTO_HDR_NONE)
942 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
943
944 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
945 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
946 else
947 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
948
949 if (hdr->field_selector) {
950 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
951 input->ip.v4.sec_parm_idx = esph->spi;
952 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
953 input->ip.v6.sec_parm_idx = esph->spi;
954 }
955 break;
956 case VIRTCHNL_PROTO_HDR_AH:
957 ah = (struct ip_auth_hdr *)hdr->buffer;
958 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
959 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
960 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
961 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
962
963 if (hdr->field_selector) {
964 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
965 input->ip.v4.sec_parm_idx = ah->spi;
966 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
967 input->ip.v6.sec_parm_idx = ah->spi;
968 }
969 break;
970 case VIRTCHNL_PROTO_HDR_PFCP:
971 rawh = (u8 *)hdr->buffer;
972 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
973 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
974 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
975 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
976 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
977 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
978 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
979 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
980 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
981
982 if (hdr->field_selector) {
983 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
984 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
985 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
986 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
987 }
988 break;
989 case VIRTCHNL_PROTO_HDR_GTPU_IP:
990 rawh = (u8 *)hdr->buffer;
991 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
992
993 if (hdr->field_selector)
994 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
995 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
996 break;
997 case VIRTCHNL_PROTO_HDR_GTPU_EH:
998 rawh = (u8 *)hdr->buffer;
999
1000 if (hdr->field_selector)
1001 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1002 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1003 break;
1004 default:
1005 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1006 hdr->type, vf->vf_id);
1007 return -EINVAL;
1008 }
1009 }
1010
1011 return 0;
1012}
1013
1014/**
1015 * ice_vc_fdir_parse_action
1016 * @vf: pointer to the VF info
1017 * @fltr: virtual channel add cmd buffer
1018 * @conf: FDIR configuration for each filter
1019 *
1020 * Parse the virtual channel filter's action and store them into conf
1021 *
1022 * Return: 0 on success, and other on error.
1023 */
1024static int
1025ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1026 struct virtchnl_fdir_fltr_conf *conf)
1027{
1028 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1029 struct device *dev = ice_pf_to_dev(vf->pf);
1030 struct ice_fdir_fltr *input = &conf->input;
1031 u32 dest_num = 0;
1032 u32 mark_num = 0;
1033 int i;
1034
1035 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1036 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1037 as->count, vf->vf_id);
1038 return -EINVAL;
1039 }
1040
1041 for (i = 0; i < as->count; i++) {
1042 struct virtchnl_filter_action *action = &as->actions[i];
1043
1044 switch (action->type) {
1045 case VIRTCHNL_ACTION_PASSTHRU:
1046 dest_num++;
1047 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1048 break;
1049 case VIRTCHNL_ACTION_DROP:
1050 dest_num++;
1051 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1052 break;
1053 case VIRTCHNL_ACTION_QUEUE:
1054 dest_num++;
1055 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1056 input->q_index = action->act_conf.queue.index;
1057 break;
1058 case VIRTCHNL_ACTION_Q_REGION:
1059 dest_num++;
1060 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1061 input->q_index = action->act_conf.queue.index;
1062 input->q_region = action->act_conf.queue.region;
1063 break;
1064 case VIRTCHNL_ACTION_MARK:
1065 mark_num++;
1066 input->fltr_id = action->act_conf.mark_id;
1067 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1068 break;
1069 default:
1070 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1071 action->type, vf->vf_id);
1072 return -EINVAL;
1073 }
1074 }
1075
1076 if (dest_num == 0 || dest_num >= 2) {
1077 dev_dbg(dev, "Invalid destination action for VF %d\n",
1078 vf->vf_id);
1079 return -EINVAL;
1080 }
1081
1082 if (mark_num >= 2) {
1083 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1084 return -EINVAL;
1085 }
1086
1087 return 0;
1088}
1089
1090/**
1091 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1092 * @vf: pointer to the VF info
1093 * @fltr: virtual channel add cmd buffer
1094 * @conf: FDIR configuration for each filter
1095 *
1096 * Return: 0 on success, and other on error.
1097 */
1098static int
1099ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1100 struct virtchnl_fdir_fltr_conf *conf)
1101{
1102 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1103 int ret;
1104
1105 if (!ice_vc_validate_pattern(vf, proto))
1106 return -EINVAL;
1107
1108 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1109 if (ret)
1110 return ret;
1111
1112 return ice_vc_fdir_parse_action(vf, fltr, conf);
1113}
1114
1115/**
1116 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1117 * @conf_a: FDIR configuration for filter a
1118 * @conf_b: FDIR configuration for filter b
1119 *
1120 * Return: 0 on success, and other on error.
1121 */
1122static bool
1123ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1124 struct virtchnl_fdir_fltr_conf *conf_b)
1125{
1126 struct ice_fdir_fltr *a = &conf_a->input;
1127 struct ice_fdir_fltr *b = &conf_b->input;
1128
1129 if (conf_a->ttype != conf_b->ttype)
1130 return false;
1131 if (a->flow_type != b->flow_type)
1132 return false;
1133 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1134 return false;
1135 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1136 return false;
1137 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1138 return false;
1139 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1140 return false;
1141 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1142 return false;
1143 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1144 return false;
1145 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1146 return false;
1147 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1148 return false;
1149
1150 return true;
1151}
1152
1153/**
1154 * ice_vc_fdir_is_dup_fltr
1155 * @vf: pointer to the VF info
1156 * @conf: FDIR configuration for each filter
1157 *
1158 * Check if there is duplicated rule with same conf value
1159 *
1160 * Return: 0 true success, and false on error.
1161 */
1162static bool
1163ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1164{
1165 struct ice_fdir_fltr *desc;
1166 bool ret;
1167
1168 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1169 struct virtchnl_fdir_fltr_conf *node =
1170 to_fltr_conf_from_desc(desc);
1171
1172 ret = ice_vc_fdir_comp_rules(node, conf);
1173 if (ret)
1174 return true;
1175 }
1176
1177 return false;
1178}
1179
1180/**
1181 * ice_vc_fdir_insert_entry
1182 * @vf: pointer to the VF info
1183 * @conf: FDIR configuration for each filter
1184 * @id: pointer to ID value allocated by driver
1185 *
1186 * Insert FDIR conf entry into list and allocate ID for this filter
1187 *
1188 * Return: 0 true success, and other on error.
1189 */
1190static int
1191ice_vc_fdir_insert_entry(struct ice_vf *vf,
1192 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1193{
1194 struct ice_fdir_fltr *input = &conf->input;
1195 int i;
1196
1197 /* alloc ID corresponding with conf */
1198 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1199 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1200 if (i < 0)
1201 return -EINVAL;
1202 *id = i;
1203
1204 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1205 return 0;
1206}
1207
1208/**
1209 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1210 * @vf: pointer to the VF info
1211 * @conf: FDIR configuration for each filter
1212 * @id: filter rule's ID
1213 */
1214static void
1215ice_vc_fdir_remove_entry(struct ice_vf *vf,
1216 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1217{
1218 struct ice_fdir_fltr *input = &conf->input;
1219
1220 idr_remove(&vf->fdir.fdir_rule_idr, id);
1221 list_del(&input->fltr_node);
1222}
1223
1224/**
1225 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1226 * @vf: pointer to the VF info
1227 * @id: filter rule's ID
1228 *
1229 * Return: NULL on error, and other on success.
1230 */
1231static struct virtchnl_fdir_fltr_conf *
1232ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1233{
1234 return idr_find(&vf->fdir.fdir_rule_idr, id);
1235}
1236
1237/**
1238 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1239 * @vf: pointer to the VF info
1240 */
1241static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1242{
1243 struct virtchnl_fdir_fltr_conf *conf;
1244 struct ice_fdir_fltr *desc, *temp;
1245
1246 list_for_each_entry_safe(desc, temp,
1247 &vf->fdir.fdir_rule_list, fltr_node) {
1248 conf = to_fltr_conf_from_desc(desc);
1249 list_del(&desc->fltr_node);
1250 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1251 }
1252}
1253
1254/**
1255 * ice_vc_fdir_write_fltr - write filter rule into hardware
1256 * @vf: pointer to the VF info
1257 * @conf: FDIR configuration for each filter
1258 * @add: true implies add rule, false implies del rules
1259 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1260 *
1261 * Return: 0 on success, and other on error.
1262 */
1263static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1264 struct virtchnl_fdir_fltr_conf *conf,
1265 bool add, bool is_tun)
1266{
1267 struct ice_fdir_fltr *input = &conf->input;
1268 struct ice_vsi *vsi, *ctrl_vsi;
1269 struct ice_fltr_desc desc;
1270 struct device *dev;
1271 struct ice_pf *pf;
1272 struct ice_hw *hw;
1273 int ret;
1274 u8 *pkt;
1275
1276 pf = vf->pf;
1277 dev = ice_pf_to_dev(pf);
1278 hw = &pf->hw;
1279 vsi = ice_get_vf_vsi(vf);
1280 if (!vsi) {
1281 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1282 return -EINVAL;
1283 }
1284
1285 input->dest_vsi = vsi->idx;
1286 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1287
1288 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1289 if (!ctrl_vsi) {
1290 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1291 return -EINVAL;
1292 }
1293
1294 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1295 if (!pkt)
1296 return -ENOMEM;
1297
1298 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1299 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1300 if (ret) {
1301 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1302 vf->vf_id, input->flow_type);
1303 goto err_free_pkt;
1304 }
1305
1306 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1307 if (ret)
1308 goto err_free_pkt;
1309
1310 return 0;
1311
1312err_free_pkt:
1313 devm_kfree(dev, pkt);
1314 return ret;
1315}
1316
1317/**
1318 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1319 * @t: pointer to timer_list
1320 */
1321static void ice_vf_fdir_timer(struct timer_list *t)
1322{
1323 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1324 struct ice_vf_fdir_ctx *ctx_done;
1325 struct ice_vf_fdir *fdir;
1326 unsigned long flags;
1327 struct ice_vf *vf;
1328 struct ice_pf *pf;
1329
1330 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1331 vf = container_of(fdir, struct ice_vf, fdir);
1332 ctx_done = &fdir->ctx_done;
1333 pf = vf->pf;
1334 spin_lock_irqsave(&fdir->ctx_lock, flags);
1335 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1336 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1337 WARN_ON_ONCE(1);
1338 return;
1339 }
1340
1341 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1342
1343 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1344 ctx_done->conf = ctx_irq->conf;
1345 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1346 ctx_done->v_opcode = ctx_irq->v_opcode;
1347 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1348
1349 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1350 ice_service_task_schedule(pf);
1351}
1352
1353/**
1354 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1355 * @ctrl_vsi: pointer to a VF's CTRL VSI
1356 * @rx_desc: pointer to FDIR Rx queue descriptor
1357 */
1358void
1359ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1360 union ice_32b_rx_flex_desc *rx_desc)
1361{
1362 struct ice_pf *pf = ctrl_vsi->back;
1363 struct ice_vf *vf = ctrl_vsi->vf;
1364 struct ice_vf_fdir_ctx *ctx_done;
1365 struct ice_vf_fdir_ctx *ctx_irq;
1366 struct ice_vf_fdir *fdir;
1367 unsigned long flags;
1368 struct device *dev;
1369 int ret;
1370
1371 if (WARN_ON(!vf))
1372 return;
1373
1374 fdir = &vf->fdir;
1375 ctx_done = &fdir->ctx_done;
1376 ctx_irq = &fdir->ctx_irq;
1377 dev = ice_pf_to_dev(pf);
1378 spin_lock_irqsave(&fdir->ctx_lock, flags);
1379 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1380 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1381 WARN_ON_ONCE(1);
1382 return;
1383 }
1384
1385 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1386
1387 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1388 ctx_done->conf = ctx_irq->conf;
1389 ctx_done->stat = ICE_FDIR_CTX_IRQ;
1390 ctx_done->v_opcode = ctx_irq->v_opcode;
1391 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1392 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1393
1394 ret = del_timer(&ctx_irq->rx_tmr);
1395 if (!ret)
1396 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1397
1398 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1399 ice_service_task_schedule(pf);
1400}
1401
1402/**
1403 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1404 * @vf: pointer to the VF info
1405 */
1406static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1407{
1408 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1409 struct ice_vsi *vf_vsi;
1410 struct device *dev;
1411 struct ice_pf *pf;
1412 struct ice_hw *hw;
1413 u16 vsi_num;
1414
1415 pf = vf->pf;
1416 hw = &pf->hw;
1417 dev = ice_pf_to_dev(pf);
1418 vf_vsi = ice_get_vf_vsi(vf);
1419 if (!vf_vsi) {
1420 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1421 return;
1422 }
1423
1424 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1425
1426 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1427 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1428 switch (hw->mac_type) {
1429 case ICE_MAC_E830:
1430 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1431 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1432 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1433 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1434 break;
1435 case ICE_MAC_E810:
1436 default:
1437 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1438 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1439 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1440 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1441 }
1442
1443 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1444 vf->vf_id, fd_size_g, fd_size_b);
1445 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1446 vf->vf_id, fd_cnt_g, fd_cnt_b);
1447}
1448
1449/**
1450 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1451 * @vf: pointer to the VF info
1452 * @ctx: FDIR context info for post processing
1453 * @status: virtchnl FDIR program status
1454 *
1455 * Return: 0 on success, and other on error.
1456 */
1457static int
1458ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1459 enum virtchnl_fdir_prgm_status *status)
1460{
1461 struct device *dev = ice_pf_to_dev(vf->pf);
1462 u32 stat_err, error, prog_id;
1463 int ret;
1464
1465 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1466 if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1467 ICE_FXD_FLTR_WB_QW1_DD_YES) {
1468 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1469 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1470 ret = -EINVAL;
1471 goto err_exit;
1472 }
1473
1474 prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1475 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1476 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1477 dev_err(dev, "VF %d: Desc show add, but ctx not",
1478 vf->vf_id);
1479 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1480 ret = -EINVAL;
1481 goto err_exit;
1482 }
1483
1484 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1485 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1486 dev_err(dev, "VF %d: Desc show del, but ctx not",
1487 vf->vf_id);
1488 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1489 ret = -EINVAL;
1490 goto err_exit;
1491 }
1492
1493 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1494 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1495 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1496 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1497 vf->vf_id);
1498 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1499 } else {
1500 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1501 vf->vf_id);
1502 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1503 }
1504 ret = -EINVAL;
1505 goto err_exit;
1506 }
1507
1508 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1509 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1510 dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1511 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1512 ret = -EINVAL;
1513 goto err_exit;
1514 }
1515
1516 *status = VIRTCHNL_FDIR_SUCCESS;
1517
1518 return 0;
1519
1520err_exit:
1521 ice_vf_fdir_dump_info(vf);
1522 return ret;
1523}
1524
1525/**
1526 * ice_vc_add_fdir_fltr_post
1527 * @vf: pointer to the VF structure
1528 * @ctx: FDIR context info for post processing
1529 * @status: virtchnl FDIR program status
1530 * @success: true implies success, false implies failure
1531 *
1532 * Post process for flow director add command. If success, then do post process
1533 * and send back success msg by virtchnl. Otherwise, do context reversion and
1534 * send back failure msg by virtchnl.
1535 *
1536 * Return: 0 on success, and other on error.
1537 */
1538static int
1539ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1540 enum virtchnl_fdir_prgm_status status,
1541 bool success)
1542{
1543 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1544 struct device *dev = ice_pf_to_dev(vf->pf);
1545 enum virtchnl_status_code v_ret;
1546 struct virtchnl_fdir_add *resp;
1547 int ret, len, is_tun;
1548
1549 v_ret = VIRTCHNL_STATUS_SUCCESS;
1550 len = sizeof(*resp);
1551 resp = kzalloc(len, GFP_KERNEL);
1552 if (!resp) {
1553 len = 0;
1554 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1555 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1556 goto err_exit;
1557 }
1558
1559 if (!success)
1560 goto err_exit;
1561
1562 is_tun = 0;
1563 resp->status = status;
1564 resp->flow_id = conf->flow_id;
1565 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1566
1567 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1568 (u8 *)resp, len);
1569 kfree(resp);
1570
1571 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1572 vf->vf_id, conf->flow_id,
1573 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1574 "add" : "del");
1575 return ret;
1576
1577err_exit:
1578 if (resp)
1579 resp->status = status;
1580 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1581 devm_kfree(dev, conf);
1582
1583 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1584 (u8 *)resp, len);
1585 kfree(resp);
1586 return ret;
1587}
1588
1589/**
1590 * ice_vc_del_fdir_fltr_post
1591 * @vf: pointer to the VF structure
1592 * @ctx: FDIR context info for post processing
1593 * @status: virtchnl FDIR program status
1594 * @success: true implies success, false implies failure
1595 *
1596 * Post process for flow director del command. If success, then do post process
1597 * and send back success msg by virtchnl. Otherwise, do context reversion and
1598 * send back failure msg by virtchnl.
1599 *
1600 * Return: 0 on success, and other on error.
1601 */
1602static int
1603ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1604 enum virtchnl_fdir_prgm_status status,
1605 bool success)
1606{
1607 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1608 struct device *dev = ice_pf_to_dev(vf->pf);
1609 enum virtchnl_status_code v_ret;
1610 struct virtchnl_fdir_del *resp;
1611 int ret, len, is_tun;
1612
1613 v_ret = VIRTCHNL_STATUS_SUCCESS;
1614 len = sizeof(*resp);
1615 resp = kzalloc(len, GFP_KERNEL);
1616 if (!resp) {
1617 len = 0;
1618 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1619 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1620 goto err_exit;
1621 }
1622
1623 if (!success)
1624 goto err_exit;
1625
1626 is_tun = 0;
1627 resp->status = status;
1628 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1629 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1630
1631 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1632 (u8 *)resp, len);
1633 kfree(resp);
1634
1635 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1636 vf->vf_id, conf->flow_id,
1637 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1638 "add" : "del");
1639 devm_kfree(dev, conf);
1640 return ret;
1641
1642err_exit:
1643 if (resp)
1644 resp->status = status;
1645 if (success)
1646 devm_kfree(dev, conf);
1647
1648 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1649 (u8 *)resp, len);
1650 kfree(resp);
1651 return ret;
1652}
1653
1654/**
1655 * ice_flush_fdir_ctx
1656 * @pf: pointer to the PF structure
1657 *
1658 * Flush all the pending event on ctx_done list and process them.
1659 */
1660void ice_flush_fdir_ctx(struct ice_pf *pf)
1661{
1662 struct ice_vf *vf;
1663 unsigned int bkt;
1664
1665 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1666 return;
1667
1668 mutex_lock(&pf->vfs.table_lock);
1669 ice_for_each_vf(pf, bkt, vf) {
1670 struct device *dev = ice_pf_to_dev(pf);
1671 enum virtchnl_fdir_prgm_status status;
1672 struct ice_vf_fdir_ctx *ctx;
1673 unsigned long flags;
1674 int ret;
1675
1676 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1677 continue;
1678
1679 if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1680 continue;
1681
1682 ctx = &vf->fdir.ctx_done;
1683 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1684 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1685 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686 continue;
1687 }
1688 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1689
1690 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1691 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1692 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1693 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1694 vf->vf_id);
1695 goto err_exit;
1696 }
1697
1698 ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1699 if (ret)
1700 goto err_exit;
1701
1702 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1703 ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1704 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1705 ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1706 else
1707 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1708
1709 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1710 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1711 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1712 continue;
1713err_exit:
1714 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1715 ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1716 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1717 ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1718 else
1719 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1720
1721 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1722 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1723 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1724 }
1725 mutex_unlock(&pf->vfs.table_lock);
1726}
1727
1728/**
1729 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1730 * @vf: pointer to the VF structure
1731 * @conf: FDIR configuration for each filter
1732 * @v_opcode: virtual channel operation code
1733 *
1734 * Return: 0 on success, and other on error.
1735 */
1736static int
1737ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1738 enum virtchnl_ops v_opcode)
1739{
1740 struct device *dev = ice_pf_to_dev(vf->pf);
1741 struct ice_vf_fdir_ctx *ctx;
1742 unsigned long flags;
1743
1744 ctx = &vf->fdir.ctx_irq;
1745 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1746 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1747 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1748 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1749 dev_dbg(dev, "VF %d: Last request is still in progress\n",
1750 vf->vf_id);
1751 return -EBUSY;
1752 }
1753 ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1754 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1755
1756 ctx->conf = conf;
1757 ctx->v_opcode = v_opcode;
1758 ctx->stat = ICE_FDIR_CTX_READY;
1759 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1760
1761 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1762
1763 return 0;
1764}
1765
1766/**
1767 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1768 * @vf: pointer to the VF structure
1769 *
1770 * Return: 0 on success, and other on error.
1771 */
1772static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1773{
1774 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1775 unsigned long flags;
1776
1777 del_timer(&ctx->rx_tmr);
1778 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1779 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1780 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1781}
1782
1783/**
1784 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1785 * @vf: pointer to the VF info
1786 * @msg: pointer to the msg buffer
1787 *
1788 * Return: 0 on success, and other on error.
1789 */
1790int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1791{
1792 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1793 struct virtchnl_fdir_add *stat = NULL;
1794 struct virtchnl_fdir_fltr_conf *conf;
1795 enum virtchnl_status_code v_ret;
1796 struct device *dev;
1797 struct ice_pf *pf;
1798 int is_tun = 0;
1799 int len = 0;
1800 int ret;
1801
1802 pf = vf->pf;
1803 dev = ice_pf_to_dev(pf);
1804 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1805 if (ret) {
1806 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1807 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1808 goto err_exit;
1809 }
1810
1811 ret = ice_vf_start_ctrl_vsi(vf);
1812 if (ret && (ret != -EEXIST)) {
1813 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1814 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1815 vf->vf_id, ret);
1816 goto err_exit;
1817 }
1818
1819 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1820 if (!stat) {
1821 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1822 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1823 goto err_exit;
1824 }
1825
1826 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1827 if (!conf) {
1828 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1829 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1830 goto err_exit;
1831 }
1832
1833 len = sizeof(*stat);
1834 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1835 if (ret) {
1836 v_ret = VIRTCHNL_STATUS_SUCCESS;
1837 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1838 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1839 goto err_free_conf;
1840 }
1841
1842 if (fltr->validate_only) {
1843 v_ret = VIRTCHNL_STATUS_SUCCESS;
1844 stat->status = VIRTCHNL_FDIR_SUCCESS;
1845 devm_kfree(dev, conf);
1846 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1847 v_ret, (u8 *)stat, len);
1848 goto exit;
1849 }
1850
1851 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1852 if (ret) {
1853 v_ret = VIRTCHNL_STATUS_SUCCESS;
1854 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1855 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1856 vf->vf_id, ret);
1857 goto err_free_conf;
1858 }
1859
1860 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1861 if (ret) {
1862 v_ret = VIRTCHNL_STATUS_SUCCESS;
1863 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1864 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1865 vf->vf_id);
1866 goto err_free_conf;
1867 }
1868
1869 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1870 if (ret) {
1871 v_ret = VIRTCHNL_STATUS_SUCCESS;
1872 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1873 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1874 goto err_free_conf;
1875 }
1876
1877 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1878 if (ret) {
1879 v_ret = VIRTCHNL_STATUS_SUCCESS;
1880 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1881 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1882 goto err_rem_entry;
1883 }
1884
1885 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1886 if (ret) {
1887 v_ret = VIRTCHNL_STATUS_SUCCESS;
1888 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1889 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1890 vf->vf_id, ret);
1891 goto err_clr_irq;
1892 }
1893
1894exit:
1895 kfree(stat);
1896 return ret;
1897
1898err_clr_irq:
1899 ice_vc_fdir_clear_irq_ctx(vf);
1900err_rem_entry:
1901 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1902err_free_conf:
1903 devm_kfree(dev, conf);
1904err_exit:
1905 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1906 (u8 *)stat, len);
1907 kfree(stat);
1908 return ret;
1909}
1910
1911/**
1912 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1913 * @vf: pointer to the VF info
1914 * @msg: pointer to the msg buffer
1915 *
1916 * Return: 0 on success, and other on error.
1917 */
1918int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1919{
1920 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1921 struct virtchnl_fdir_del *stat = NULL;
1922 struct virtchnl_fdir_fltr_conf *conf;
1923 enum virtchnl_status_code v_ret;
1924 struct device *dev;
1925 struct ice_pf *pf;
1926 int is_tun = 0;
1927 int len = 0;
1928 int ret;
1929
1930 pf = vf->pf;
1931 dev = ice_pf_to_dev(pf);
1932 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1933 if (ret) {
1934 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1935 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1936 goto err_exit;
1937 }
1938
1939 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1940 if (!stat) {
1941 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1942 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1943 goto err_exit;
1944 }
1945
1946 len = sizeof(*stat);
1947
1948 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1949 if (!conf) {
1950 v_ret = VIRTCHNL_STATUS_SUCCESS;
1951 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1952 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1953 vf->vf_id, fltr->flow_id);
1954 goto err_exit;
1955 }
1956
1957 /* Just return failure when ctrl_vsi idx is invalid */
1958 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1959 v_ret = VIRTCHNL_STATUS_SUCCESS;
1960 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1961 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1962 goto err_exit;
1963 }
1964
1965 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1966 if (ret) {
1967 v_ret = VIRTCHNL_STATUS_SUCCESS;
1968 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1969 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1970 goto err_exit;
1971 }
1972
1973 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1974 if (ret) {
1975 v_ret = VIRTCHNL_STATUS_SUCCESS;
1976 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1977 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1978 vf->vf_id, ret);
1979 goto err_del_tmr;
1980 }
1981
1982 kfree(stat);
1983
1984 return ret;
1985
1986err_del_tmr:
1987 ice_vc_fdir_clear_irq_ctx(vf);
1988err_exit:
1989 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1990 (u8 *)stat, len);
1991 kfree(stat);
1992 return ret;
1993}
1994
1995/**
1996 * ice_vf_fdir_init - init FDIR resource for VF
1997 * @vf: pointer to the VF info
1998 */
1999void ice_vf_fdir_init(struct ice_vf *vf)
2000{
2001 struct ice_vf_fdir *fdir = &vf->fdir;
2002
2003 idr_init(&fdir->fdir_rule_idr);
2004 INIT_LIST_HEAD(&fdir->fdir_rule_list);
2005
2006 spin_lock_init(&fdir->ctx_lock);
2007 fdir->ctx_irq.flags = 0;
2008 fdir->ctx_done.flags = 0;
2009 ice_vc_fdir_reset_cnt_all(fdir);
2010}
2011
2012/**
2013 * ice_vf_fdir_exit - destroy FDIR resource for VF
2014 * @vf: pointer to the VF info
2015 */
2016void ice_vf_fdir_exit(struct ice_vf *vf)
2017{
2018 ice_vc_fdir_flush_entry(vf);
2019 idr_destroy(&vf->fdir.fdir_rule_idr);
2020 ice_vc_fdir_rem_prof_all(vf);
2021 ice_vc_fdir_free_prof_all(vf);
2022}