Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021-2023, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_flow.h"
8#include "ice_vf_lib_private.h"
9
10#define to_fltr_conf_from_desc(p) \
11 container_of(p, struct virtchnl_fdir_fltr_conf, input)
12
13#define GTPU_TEID_OFFSET 4
14#define GTPU_EH_QFI_OFFSET 1
15#define GTPU_EH_QFI_MASK 0x3F
16#define PFCP_S_OFFSET 0
17#define PFCP_S_MASK 0x1
18#define PFCP_PORT_NR 8805
19
20#define FDIR_INSET_FLAG_ESP_S 0
21#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24
25enum ice_fdir_tunnel_type {
26 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 ICE_FDIR_TUNNEL_TYPE_GTPU,
28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29};
30
31struct virtchnl_fdir_fltr_conf {
32 struct ice_fdir_fltr input;
33 enum ice_fdir_tunnel_type ttype;
34 u64 inset_flag;
35 u32 flow_id;
36};
37
38struct virtchnl_fdir_inset_map {
39 enum virtchnl_proto_hdr_field field;
40 enum ice_flow_field fld;
41 u64 flag;
42 u64 mask;
43};
44
45static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
46 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
47 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
48 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
49 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
50 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
51 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
52 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
53 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
54 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
55 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
56 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
57 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
58 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
59 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
60 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
61 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
62 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
63 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
64 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
65 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
66 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
67 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
68 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
69 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
70 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
71 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72};
73
74/**
75 * ice_vc_fdir_param_check
76 * @vf: pointer to the VF structure
77 * @vsi_id: VF relative VSI ID
78 *
79 * Check for the valid VSI ID, PF's state and VF's state
80 *
81 * Return: 0 on success, and -EINVAL on error.
82 */
83static int
84ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
85{
86 struct ice_pf *pf = vf->pf;
87
88 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
89 return -EINVAL;
90
91 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
92 return -EINVAL;
93
94 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
95 return -EINVAL;
96
97 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
98 return -EINVAL;
99
100 if (!ice_get_vf_vsi(vf))
101 return -EINVAL;
102
103 return 0;
104}
105
106/**
107 * ice_vf_start_ctrl_vsi
108 * @vf: pointer to the VF structure
109 *
110 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
111 *
112 * Return: 0 on success, and other on error.
113 */
114static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
115{
116 struct ice_pf *pf = vf->pf;
117 struct ice_vsi *ctrl_vsi;
118 struct device *dev;
119 int err;
120
121 dev = ice_pf_to_dev(pf);
122 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
123 return -EEXIST;
124
125 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
126 if (!ctrl_vsi) {
127 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
128 vf->vf_id);
129 return -ENOMEM;
130 }
131
132 err = ice_vsi_open_ctrl(ctrl_vsi);
133 if (err) {
134 dev_dbg(dev, "Could not open control VSI for VF %d\n",
135 vf->vf_id);
136 goto err_vsi_open;
137 }
138
139 return 0;
140
141err_vsi_open:
142 ice_vsi_release(ctrl_vsi);
143 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
144 pf->vsi[vf->ctrl_vsi_idx] = NULL;
145 vf->ctrl_vsi_idx = ICE_NO_VSI;
146 }
147 return err;
148}
149
150/**
151 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
152 * @vf: pointer to the VF structure
153 * @flow: filter flow type
154 *
155 * Return: 0 on success, and other on error.
156 */
157static int
158ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
159{
160 struct ice_vf_fdir *fdir = &vf->fdir;
161
162 if (!fdir->fdir_prof) {
163 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
164 ICE_FLTR_PTYPE_MAX,
165 sizeof(*fdir->fdir_prof),
166 GFP_KERNEL);
167 if (!fdir->fdir_prof)
168 return -ENOMEM;
169 }
170
171 if (!fdir->fdir_prof[flow]) {
172 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
173 sizeof(**fdir->fdir_prof),
174 GFP_KERNEL);
175 if (!fdir->fdir_prof[flow])
176 return -ENOMEM;
177 }
178
179 return 0;
180}
181
182/**
183 * ice_vc_fdir_free_prof - free profile for this filter flow type
184 * @vf: pointer to the VF structure
185 * @flow: filter flow type
186 */
187static void
188ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
189{
190 struct ice_vf_fdir *fdir = &vf->fdir;
191
192 if (!fdir->fdir_prof)
193 return;
194
195 if (!fdir->fdir_prof[flow])
196 return;
197
198 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
199 fdir->fdir_prof[flow] = NULL;
200}
201
202/**
203 * ice_vc_fdir_free_prof_all - free all the profile for this VF
204 * @vf: pointer to the VF structure
205 */
206static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
207{
208 struct ice_vf_fdir *fdir = &vf->fdir;
209 enum ice_fltr_ptype flow;
210
211 if (!fdir->fdir_prof)
212 return;
213
214 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
215 ice_vc_fdir_free_prof(vf, flow);
216
217 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
218 fdir->fdir_prof = NULL;
219}
220
221/**
222 * ice_vc_fdir_parse_flow_fld
223 * @proto_hdr: virtual channel protocol filter header
224 * @conf: FDIR configuration for each filter
225 * @fld: field type array
226 * @fld_cnt: field counter
227 *
228 * Parse the virtual channel filter header and store them into field type array
229 *
230 * Return: 0 on success, and other on error.
231 */
232static int
233ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
234 struct virtchnl_fdir_fltr_conf *conf,
235 enum ice_flow_field *fld, int *fld_cnt)
236{
237 struct virtchnl_proto_hdr hdr;
238 u32 i;
239
240 memcpy(&hdr, proto_hdr, sizeof(hdr));
241
242 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
243 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
244 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
245 if (fdir_inset_map[i].mask &&
246 ((fdir_inset_map[i].mask & conf->inset_flag) !=
247 fdir_inset_map[i].flag))
248 continue;
249
250 fld[*fld_cnt] = fdir_inset_map[i].fld;
251 *fld_cnt += 1;
252 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
253 return -EINVAL;
254 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
255 fdir_inset_map[i].field);
256 }
257
258 return 0;
259}
260
261/**
262 * ice_vc_fdir_set_flow_fld
263 * @vf: pointer to the VF structure
264 * @fltr: virtual channel add cmd buffer
265 * @conf: FDIR configuration for each filter
266 * @seg: array of one or more packet segments that describe the flow
267 *
268 * Parse the virtual channel add msg buffer's field vector and store them into
269 * flow's packet segment field
270 *
271 * Return: 0 on success, and other on error.
272 */
273static int
274ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
275 struct virtchnl_fdir_fltr_conf *conf,
276 struct ice_flow_seg_info *seg)
277{
278 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
279 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
280 struct device *dev = ice_pf_to_dev(vf->pf);
281 struct virtchnl_proto_hdrs *proto;
282 int fld_cnt = 0;
283 int i;
284
285 proto = &rule->proto_hdrs;
286 for (i = 0; i < proto->count; i++) {
287 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
288 int ret;
289
290 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
291 if (ret)
292 return ret;
293 }
294
295 if (fld_cnt == 0) {
296 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
297 return -EINVAL;
298 }
299
300 for (i = 0; i < fld_cnt; i++)
301 ice_flow_set_fld(seg, fld[i],
302 ICE_FLOW_FLD_OFF_INVAL,
303 ICE_FLOW_FLD_OFF_INVAL,
304 ICE_FLOW_FLD_OFF_INVAL, false);
305
306 return 0;
307}
308
309/**
310 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
311 * @vf: pointer to the VF structure
312 * @conf: FDIR configuration for each filter
313 * @seg: array of one or more packet segments that describe the flow
314 *
315 * Return: 0 on success, and other on error.
316 */
317static int
318ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
319 struct virtchnl_fdir_fltr_conf *conf,
320 struct ice_flow_seg_info *seg)
321{
322 enum ice_fltr_ptype flow = conf->input.flow_type;
323 enum ice_fdir_tunnel_type ttype = conf->ttype;
324 struct device *dev = ice_pf_to_dev(vf->pf);
325
326 switch (flow) {
327 case ICE_FLTR_PTYPE_NON_IP_L2:
328 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
329 break;
330 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
331 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
332 ICE_FLOW_SEG_HDR_IPV4 |
333 ICE_FLOW_SEG_HDR_IPV_OTHER);
334 break;
335 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
336 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
337 ICE_FLOW_SEG_HDR_IPV4 |
338 ICE_FLOW_SEG_HDR_IPV_OTHER);
339 break;
340 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
341 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
342 ICE_FLOW_SEG_HDR_IPV4 |
343 ICE_FLOW_SEG_HDR_IPV_OTHER);
344 break;
345 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
346 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
347 ICE_FLOW_SEG_HDR_IPV4 |
348 ICE_FLOW_SEG_HDR_IPV_OTHER);
349 break;
350 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
351 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
352 ICE_FLOW_SEG_HDR_IPV4 |
353 ICE_FLOW_SEG_HDR_IPV_OTHER);
354 break;
355 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
356 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
357 ICE_FLOW_SEG_HDR_IPV4 |
358 ICE_FLOW_SEG_HDR_IPV_OTHER);
359 break;
360 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
361 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
362 ICE_FLOW_SEG_HDR_IPV_OTHER);
363 break;
364 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
365 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
366 ICE_FLOW_SEG_HDR_IPV4 |
367 ICE_FLOW_SEG_HDR_IPV_OTHER);
368 break;
369 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
370 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
371 ICE_FLOW_SEG_HDR_IPV4 |
372 ICE_FLOW_SEG_HDR_IPV_OTHER);
373 break;
374 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
375 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
376 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
377 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
378 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
379 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
380 ICE_FLOW_SEG_HDR_IPV4 |
381 ICE_FLOW_SEG_HDR_IPV_OTHER);
382 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
383 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
384 ICE_FLOW_SEG_HDR_GTPU_IP |
385 ICE_FLOW_SEG_HDR_IPV4 |
386 ICE_FLOW_SEG_HDR_IPV_OTHER);
387 } else {
388 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
389 flow, vf->vf_id);
390 return -EINVAL;
391 }
392 break;
393 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
394 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
395 ICE_FLOW_SEG_HDR_IPV4 |
396 ICE_FLOW_SEG_HDR_IPV_OTHER);
397 break;
398 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
400 ICE_FLOW_SEG_HDR_IPV6 |
401 ICE_FLOW_SEG_HDR_IPV_OTHER);
402 break;
403 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
404 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
405 ICE_FLOW_SEG_HDR_IPV6 |
406 ICE_FLOW_SEG_HDR_IPV_OTHER);
407 break;
408 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
409 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
410 ICE_FLOW_SEG_HDR_IPV6 |
411 ICE_FLOW_SEG_HDR_IPV_OTHER);
412 break;
413 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
414 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
415 ICE_FLOW_SEG_HDR_IPV6 |
416 ICE_FLOW_SEG_HDR_IPV_OTHER);
417 break;
418 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
419 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
420 ICE_FLOW_SEG_HDR_IPV6 |
421 ICE_FLOW_SEG_HDR_IPV_OTHER);
422 break;
423 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
424 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
425 ICE_FLOW_SEG_HDR_IPV6 |
426 ICE_FLOW_SEG_HDR_IPV_OTHER);
427 break;
428 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
429 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
430 ICE_FLOW_SEG_HDR_IPV_OTHER);
431 break;
432 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
433 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
434 ICE_FLOW_SEG_HDR_IPV6 |
435 ICE_FLOW_SEG_HDR_IPV_OTHER);
436 break;
437 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
438 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
439 ICE_FLOW_SEG_HDR_IPV6 |
440 ICE_FLOW_SEG_HDR_IPV_OTHER);
441 break;
442 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
443 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
444 ICE_FLOW_SEG_HDR_IPV6 |
445 ICE_FLOW_SEG_HDR_IPV_OTHER);
446 break;
447 default:
448 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
449 flow, vf->vf_id);
450 return -EINVAL;
451 }
452
453 return 0;
454}
455
456/**
457 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
458 * @vf: pointer to the VF structure
459 * @flow: filter flow type
460 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
461 */
462static void
463ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
464{
465 struct ice_vf_fdir *fdir = &vf->fdir;
466 struct ice_fd_hw_prof *vf_prof;
467 struct ice_pf *pf = vf->pf;
468 struct ice_vsi *vf_vsi;
469 struct device *dev;
470 struct ice_hw *hw;
471 u64 prof_id;
472 int i;
473
474 dev = ice_pf_to_dev(pf);
475 hw = &pf->hw;
476 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
477 return;
478
479 vf_prof = fdir->fdir_prof[flow];
480 prof_id = vf_prof->prof_id[tun];
481
482 vf_vsi = ice_get_vf_vsi(vf);
483 if (!vf_vsi) {
484 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
485 return;
486 }
487
488 if (!fdir->prof_entry_cnt[flow][tun])
489 return;
490
491 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
492 if (vf_prof->entry_h[i][tun]) {
493 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
494
495 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
496 ice_flow_rem_entry(hw, ICE_BLK_FD,
497 vf_prof->entry_h[i][tun]);
498 vf_prof->entry_h[i][tun] = 0;
499 }
500
501 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
502 devm_kfree(dev, vf_prof->fdir_seg[tun]);
503 vf_prof->fdir_seg[tun] = NULL;
504
505 for (i = 0; i < vf_prof->cnt; i++)
506 vf_prof->vsi_h[i] = 0;
507
508 fdir->prof_entry_cnt[flow][tun] = 0;
509}
510
511/**
512 * ice_vc_fdir_rem_prof_all - remove profile for this VF
513 * @vf: pointer to the VF structure
514 */
515static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
516{
517 enum ice_fltr_ptype flow;
518
519 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
520 flow < ICE_FLTR_PTYPE_MAX; flow++) {
521 ice_vc_fdir_rem_prof(vf, flow, 0);
522 ice_vc_fdir_rem_prof(vf, flow, 1);
523 }
524}
525
526/**
527 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
528 * @fdir: pointer to the VF FDIR structure
529 */
530static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
531{
532 enum ice_fltr_ptype flow;
533
534 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
535 flow < ICE_FLTR_PTYPE_MAX; flow++) {
536 fdir->fdir_fltr_cnt[flow][0] = 0;
537 fdir->fdir_fltr_cnt[flow][1] = 0;
538 }
539}
540
541/**
542 * ice_vc_fdir_has_prof_conflict
543 * @vf: pointer to the VF structure
544 * @conf: FDIR configuration for each filter
545 *
546 * Check if @conf has conflicting profile with existing profiles
547 *
548 * Return: true on success, and false on error.
549 */
550static bool
551ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
552 struct virtchnl_fdir_fltr_conf *conf)
553{
554 struct ice_fdir_fltr *desc;
555
556 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
557 struct virtchnl_fdir_fltr_conf *existing_conf;
558 enum ice_fltr_ptype flow_type_a, flow_type_b;
559 struct ice_fdir_fltr *a, *b;
560
561 existing_conf = to_fltr_conf_from_desc(desc);
562 a = &existing_conf->input;
563 b = &conf->input;
564 flow_type_a = a->flow_type;
565 flow_type_b = b->flow_type;
566
567 /* No need to compare two rules with different tunnel types or
568 * with the same protocol type.
569 */
570 if (existing_conf->ttype != conf->ttype ||
571 flow_type_a == flow_type_b)
572 continue;
573
574 switch (flow_type_a) {
575 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
576 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
577 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
578 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
579 return true;
580 break;
581 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
582 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
583 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
584 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
585 return true;
586 break;
587 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
588 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
589 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
590 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
591 return true;
592 break;
593 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
594 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
595 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
596 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
597 return true;
598 break;
599 default:
600 break;
601 }
602 }
603
604 return false;
605}
606
607/**
608 * ice_vc_fdir_write_flow_prof
609 * @vf: pointer to the VF structure
610 * @flow: filter flow type
611 * @seg: array of one or more packet segments that describe the flow
612 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
613 *
614 * Write the flow's profile config and packet segment into the hardware
615 *
616 * Return: 0 on success, and other on error.
617 */
618static int
619ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
620 struct ice_flow_seg_info *seg, int tun)
621{
622 struct ice_vf_fdir *fdir = &vf->fdir;
623 struct ice_vsi *vf_vsi, *ctrl_vsi;
624 struct ice_flow_seg_info *old_seg;
625 struct ice_flow_prof *prof = NULL;
626 struct ice_fd_hw_prof *vf_prof;
627 struct device *dev;
628 struct ice_pf *pf;
629 struct ice_hw *hw;
630 u64 entry1_h = 0;
631 u64 entry2_h = 0;
632 int ret;
633
634 pf = vf->pf;
635 dev = ice_pf_to_dev(pf);
636 hw = &pf->hw;
637 vf_vsi = ice_get_vf_vsi(vf);
638 if (!vf_vsi)
639 return -EINVAL;
640
641 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
642 if (!ctrl_vsi)
643 return -EINVAL;
644
645 vf_prof = fdir->fdir_prof[flow];
646 old_seg = vf_prof->fdir_seg[tun];
647 if (old_seg) {
648 if (!memcmp(old_seg, seg, sizeof(*seg))) {
649 dev_dbg(dev, "Duplicated profile for VF %d!\n",
650 vf->vf_id);
651 return -EEXIST;
652 }
653
654 if (fdir->fdir_fltr_cnt[flow][tun]) {
655 ret = -EINVAL;
656 dev_dbg(dev, "Input set conflicts for VF %d\n",
657 vf->vf_id);
658 goto err_exit;
659 }
660
661 /* remove previously allocated profile */
662 ice_vc_fdir_rem_prof(vf, flow, tun);
663 }
664
665 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
666 tun + 1, false, &prof);
667 if (ret) {
668 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
669 flow, vf->vf_id);
670 goto err_exit;
671 }
672
673 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
674 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
675 seg, &entry1_h);
676 if (ret) {
677 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
678 flow, vf->vf_id);
679 goto err_prof;
680 }
681
682 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
683 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
684 seg, &entry2_h);
685 if (ret) {
686 dev_dbg(dev,
687 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
688 flow, vf->vf_id);
689 goto err_entry_1;
690 }
691
692 vf_prof->fdir_seg[tun] = seg;
693 vf_prof->cnt = 0;
694 fdir->prof_entry_cnt[flow][tun] = 0;
695
696 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
697 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
698 vf_prof->cnt++;
699 fdir->prof_entry_cnt[flow][tun]++;
700
701 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
702 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
703 vf_prof->cnt++;
704 fdir->prof_entry_cnt[flow][tun]++;
705
706 vf_prof->prof_id[tun] = prof->id;
707
708 return 0;
709
710err_entry_1:
711 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
712 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
713 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
714err_prof:
715 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
716err_exit:
717 return ret;
718}
719
720/**
721 * ice_vc_fdir_config_input_set
722 * @vf: pointer to the VF structure
723 * @fltr: virtual channel add cmd buffer
724 * @conf: FDIR configuration for each filter
725 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
726 *
727 * Config the input set type and value for virtual channel add msg buffer
728 *
729 * Return: 0 on success, and other on error.
730 */
731static int
732ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
733 struct virtchnl_fdir_fltr_conf *conf, int tun)
734{
735 struct ice_fdir_fltr *input = &conf->input;
736 struct device *dev = ice_pf_to_dev(vf->pf);
737 struct ice_flow_seg_info *seg;
738 enum ice_fltr_ptype flow;
739 int ret;
740
741 ret = ice_vc_fdir_has_prof_conflict(vf, conf);
742 if (ret) {
743 dev_dbg(dev, "Found flow profile conflict for VF %d\n",
744 vf->vf_id);
745 return ret;
746 }
747
748 flow = input->flow_type;
749 ret = ice_vc_fdir_alloc_prof(vf, flow);
750 if (ret) {
751 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
752 return ret;
753 }
754
755 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
756 if (!seg)
757 return -ENOMEM;
758
759 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
760 if (ret) {
761 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
762 goto err_exit;
763 }
764
765 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
766 if (ret) {
767 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
768 goto err_exit;
769 }
770
771 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
772 if (ret == -EEXIST) {
773 devm_kfree(dev, seg);
774 } else if (ret) {
775 dev_dbg(dev, "Write flow profile for VF %d failed\n",
776 vf->vf_id);
777 goto err_exit;
778 }
779
780 return 0;
781
782err_exit:
783 devm_kfree(dev, seg);
784 return ret;
785}
786
787/**
788 * ice_vc_fdir_parse_pattern
789 * @vf: pointer to the VF info
790 * @fltr: virtual channel add cmd buffer
791 * @conf: FDIR configuration for each filter
792 *
793 * Parse the virtual channel filter's pattern and store them into conf
794 *
795 * Return: 0 on success, and other on error.
796 */
797static int
798ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
799 struct virtchnl_fdir_fltr_conf *conf)
800{
801 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
802 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
803 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
804 struct device *dev = ice_pf_to_dev(vf->pf);
805 struct ice_fdir_fltr *input = &conf->input;
806 int i;
807
808 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
809 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
810 proto->count, vf->vf_id);
811 return -EINVAL;
812 }
813
814 for (i = 0; i < proto->count; i++) {
815 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
816 struct ip_esp_hdr *esph;
817 struct ip_auth_hdr *ah;
818 struct sctphdr *sctph;
819 struct ipv6hdr *ip6h;
820 struct udphdr *udph;
821 struct tcphdr *tcph;
822 struct ethhdr *eth;
823 struct iphdr *iph;
824 u8 s_field;
825 u8 *rawh;
826
827 switch (hdr->type) {
828 case VIRTCHNL_PROTO_HDR_ETH:
829 eth = (struct ethhdr *)hdr->buffer;
830 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
831
832 if (hdr->field_selector)
833 input->ext_data.ether_type = eth->h_proto;
834 break;
835 case VIRTCHNL_PROTO_HDR_IPV4:
836 iph = (struct iphdr *)hdr->buffer;
837 l3 = VIRTCHNL_PROTO_HDR_IPV4;
838 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
839
840 if (hdr->field_selector) {
841 input->ip.v4.src_ip = iph->saddr;
842 input->ip.v4.dst_ip = iph->daddr;
843 input->ip.v4.tos = iph->tos;
844 input->ip.v4.proto = iph->protocol;
845 }
846 break;
847 case VIRTCHNL_PROTO_HDR_IPV6:
848 ip6h = (struct ipv6hdr *)hdr->buffer;
849 l3 = VIRTCHNL_PROTO_HDR_IPV6;
850 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
851
852 if (hdr->field_selector) {
853 memcpy(input->ip.v6.src_ip,
854 ip6h->saddr.in6_u.u6_addr8,
855 sizeof(ip6h->saddr));
856 memcpy(input->ip.v6.dst_ip,
857 ip6h->daddr.in6_u.u6_addr8,
858 sizeof(ip6h->daddr));
859 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
860 (ip6h->flow_lbl[0] >> 4);
861 input->ip.v6.proto = ip6h->nexthdr;
862 }
863 break;
864 case VIRTCHNL_PROTO_HDR_TCP:
865 tcph = (struct tcphdr *)hdr->buffer;
866 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
867 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
868 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
869 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
870
871 if (hdr->field_selector) {
872 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
873 input->ip.v4.src_port = tcph->source;
874 input->ip.v4.dst_port = tcph->dest;
875 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
876 input->ip.v6.src_port = tcph->source;
877 input->ip.v6.dst_port = tcph->dest;
878 }
879 }
880 break;
881 case VIRTCHNL_PROTO_HDR_UDP:
882 udph = (struct udphdr *)hdr->buffer;
883 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
884 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
885 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
886 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
887
888 if (hdr->field_selector) {
889 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
890 input->ip.v4.src_port = udph->source;
891 input->ip.v4.dst_port = udph->dest;
892 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
893 input->ip.v6.src_port = udph->source;
894 input->ip.v6.dst_port = udph->dest;
895 }
896 }
897 break;
898 case VIRTCHNL_PROTO_HDR_SCTP:
899 sctph = (struct sctphdr *)hdr->buffer;
900 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
901 input->flow_type =
902 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
903 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
904 input->flow_type =
905 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
906
907 if (hdr->field_selector) {
908 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
909 input->ip.v4.src_port = sctph->source;
910 input->ip.v4.dst_port = sctph->dest;
911 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
912 input->ip.v6.src_port = sctph->source;
913 input->ip.v6.dst_port = sctph->dest;
914 }
915 }
916 break;
917 case VIRTCHNL_PROTO_HDR_L2TPV3:
918 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
919 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
920 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
921 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
922
923 if (hdr->field_selector)
924 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
925 break;
926 case VIRTCHNL_PROTO_HDR_ESP:
927 esph = (struct ip_esp_hdr *)hdr->buffer;
928 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
929 l4 == VIRTCHNL_PROTO_HDR_UDP)
930 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
931 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
932 l4 == VIRTCHNL_PROTO_HDR_UDP)
933 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
934 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
935 l4 == VIRTCHNL_PROTO_HDR_NONE)
936 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
937 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
938 l4 == VIRTCHNL_PROTO_HDR_NONE)
939 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
940
941 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
942 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
943 else
944 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
945
946 if (hdr->field_selector) {
947 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
948 input->ip.v4.sec_parm_idx = esph->spi;
949 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
950 input->ip.v6.sec_parm_idx = esph->spi;
951 }
952 break;
953 case VIRTCHNL_PROTO_HDR_AH:
954 ah = (struct ip_auth_hdr *)hdr->buffer;
955 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
956 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
957 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
958 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
959
960 if (hdr->field_selector) {
961 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
962 input->ip.v4.sec_parm_idx = ah->spi;
963 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
964 input->ip.v6.sec_parm_idx = ah->spi;
965 }
966 break;
967 case VIRTCHNL_PROTO_HDR_PFCP:
968 rawh = (u8 *)hdr->buffer;
969 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
970 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
971 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
972 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
973 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
974 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
975 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
976 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
977 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
978
979 if (hdr->field_selector) {
980 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
981 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
982 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
983 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
984 }
985 break;
986 case VIRTCHNL_PROTO_HDR_GTPU_IP:
987 rawh = (u8 *)hdr->buffer;
988 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
989
990 if (hdr->field_selector)
991 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
992 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
993 break;
994 case VIRTCHNL_PROTO_HDR_GTPU_EH:
995 rawh = (u8 *)hdr->buffer;
996
997 if (hdr->field_selector)
998 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
999 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1000 break;
1001 default:
1002 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1003 hdr->type, vf->vf_id);
1004 return -EINVAL;
1005 }
1006 }
1007
1008 return 0;
1009}
1010
1011/**
1012 * ice_vc_fdir_parse_action
1013 * @vf: pointer to the VF info
1014 * @fltr: virtual channel add cmd buffer
1015 * @conf: FDIR configuration for each filter
1016 *
1017 * Parse the virtual channel filter's action and store them into conf
1018 *
1019 * Return: 0 on success, and other on error.
1020 */
1021static int
1022ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1023 struct virtchnl_fdir_fltr_conf *conf)
1024{
1025 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1026 struct device *dev = ice_pf_to_dev(vf->pf);
1027 struct ice_fdir_fltr *input = &conf->input;
1028 u32 dest_num = 0;
1029 u32 mark_num = 0;
1030 int i;
1031
1032 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1033 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1034 as->count, vf->vf_id);
1035 return -EINVAL;
1036 }
1037
1038 for (i = 0; i < as->count; i++) {
1039 struct virtchnl_filter_action *action = &as->actions[i];
1040
1041 switch (action->type) {
1042 case VIRTCHNL_ACTION_PASSTHRU:
1043 dest_num++;
1044 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1045 break;
1046 case VIRTCHNL_ACTION_DROP:
1047 dest_num++;
1048 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1049 break;
1050 case VIRTCHNL_ACTION_QUEUE:
1051 dest_num++;
1052 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1053 input->q_index = action->act_conf.queue.index;
1054 break;
1055 case VIRTCHNL_ACTION_Q_REGION:
1056 dest_num++;
1057 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1058 input->q_index = action->act_conf.queue.index;
1059 input->q_region = action->act_conf.queue.region;
1060 break;
1061 case VIRTCHNL_ACTION_MARK:
1062 mark_num++;
1063 input->fltr_id = action->act_conf.mark_id;
1064 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1065 break;
1066 default:
1067 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1068 action->type, vf->vf_id);
1069 return -EINVAL;
1070 }
1071 }
1072
1073 if (dest_num == 0 || dest_num >= 2) {
1074 dev_dbg(dev, "Invalid destination action for VF %d\n",
1075 vf->vf_id);
1076 return -EINVAL;
1077 }
1078
1079 if (mark_num >= 2) {
1080 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087/**
1088 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1089 * @vf: pointer to the VF info
1090 * @fltr: virtual channel add cmd buffer
1091 * @conf: FDIR configuration for each filter
1092 *
1093 * Return: 0 on success, and other on error.
1094 */
1095static int
1096ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1097 struct virtchnl_fdir_fltr_conf *conf)
1098{
1099 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1100 int ret;
1101
1102 if (!ice_vc_validate_pattern(vf, proto))
1103 return -EINVAL;
1104
1105 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1106 if (ret)
1107 return ret;
1108
1109 return ice_vc_fdir_parse_action(vf, fltr, conf);
1110}
1111
1112/**
1113 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1114 * @conf_a: FDIR configuration for filter a
1115 * @conf_b: FDIR configuration for filter b
1116 *
1117 * Return: 0 on success, and other on error.
1118 */
1119static bool
1120ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1121 struct virtchnl_fdir_fltr_conf *conf_b)
1122{
1123 struct ice_fdir_fltr *a = &conf_a->input;
1124 struct ice_fdir_fltr *b = &conf_b->input;
1125
1126 if (conf_a->ttype != conf_b->ttype)
1127 return false;
1128 if (a->flow_type != b->flow_type)
1129 return false;
1130 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1131 return false;
1132 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1133 return false;
1134 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1135 return false;
1136 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1137 return false;
1138 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1139 return false;
1140 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1141 return false;
1142 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1143 return false;
1144 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1145 return false;
1146
1147 return true;
1148}
1149
1150/**
1151 * ice_vc_fdir_is_dup_fltr
1152 * @vf: pointer to the VF info
1153 * @conf: FDIR configuration for each filter
1154 *
1155 * Check if there is duplicated rule with same conf value
1156 *
1157 * Return: 0 true success, and false on error.
1158 */
1159static bool
1160ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1161{
1162 struct ice_fdir_fltr *desc;
1163 bool ret;
1164
1165 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1166 struct virtchnl_fdir_fltr_conf *node =
1167 to_fltr_conf_from_desc(desc);
1168
1169 ret = ice_vc_fdir_comp_rules(node, conf);
1170 if (ret)
1171 return true;
1172 }
1173
1174 return false;
1175}
1176
1177/**
1178 * ice_vc_fdir_insert_entry
1179 * @vf: pointer to the VF info
1180 * @conf: FDIR configuration for each filter
1181 * @id: pointer to ID value allocated by driver
1182 *
1183 * Insert FDIR conf entry into list and allocate ID for this filter
1184 *
1185 * Return: 0 true success, and other on error.
1186 */
1187static int
1188ice_vc_fdir_insert_entry(struct ice_vf *vf,
1189 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1190{
1191 struct ice_fdir_fltr *input = &conf->input;
1192 int i;
1193
1194 /* alloc ID corresponding with conf */
1195 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1196 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1197 if (i < 0)
1198 return -EINVAL;
1199 *id = i;
1200
1201 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1202 return 0;
1203}
1204
1205/**
1206 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1207 * @vf: pointer to the VF info
1208 * @conf: FDIR configuration for each filter
1209 * @id: filter rule's ID
1210 */
1211static void
1212ice_vc_fdir_remove_entry(struct ice_vf *vf,
1213 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1214{
1215 struct ice_fdir_fltr *input = &conf->input;
1216
1217 idr_remove(&vf->fdir.fdir_rule_idr, id);
1218 list_del(&input->fltr_node);
1219}
1220
1221/**
1222 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1223 * @vf: pointer to the VF info
1224 * @id: filter rule's ID
1225 *
1226 * Return: NULL on error, and other on success.
1227 */
1228static struct virtchnl_fdir_fltr_conf *
1229ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1230{
1231 return idr_find(&vf->fdir.fdir_rule_idr, id);
1232}
1233
1234/**
1235 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1236 * @vf: pointer to the VF info
1237 */
1238static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1239{
1240 struct virtchnl_fdir_fltr_conf *conf;
1241 struct ice_fdir_fltr *desc, *temp;
1242
1243 list_for_each_entry_safe(desc, temp,
1244 &vf->fdir.fdir_rule_list, fltr_node) {
1245 conf = to_fltr_conf_from_desc(desc);
1246 list_del(&desc->fltr_node);
1247 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1248 }
1249}
1250
1251/**
1252 * ice_vc_fdir_write_fltr - write filter rule into hardware
1253 * @vf: pointer to the VF info
1254 * @conf: FDIR configuration for each filter
1255 * @add: true implies add rule, false implies del rules
1256 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1257 *
1258 * Return: 0 on success, and other on error.
1259 */
1260static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1261 struct virtchnl_fdir_fltr_conf *conf,
1262 bool add, bool is_tun)
1263{
1264 struct ice_fdir_fltr *input = &conf->input;
1265 struct ice_vsi *vsi, *ctrl_vsi;
1266 struct ice_fltr_desc desc;
1267 struct device *dev;
1268 struct ice_pf *pf;
1269 struct ice_hw *hw;
1270 int ret;
1271 u8 *pkt;
1272
1273 pf = vf->pf;
1274 dev = ice_pf_to_dev(pf);
1275 hw = &pf->hw;
1276 vsi = ice_get_vf_vsi(vf);
1277 if (!vsi) {
1278 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1279 return -EINVAL;
1280 }
1281
1282 input->dest_vsi = vsi->idx;
1283 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1284
1285 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1286 if (!ctrl_vsi) {
1287 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1288 return -EINVAL;
1289 }
1290
1291 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1292 if (!pkt)
1293 return -ENOMEM;
1294
1295 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1296 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1297 if (ret) {
1298 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1299 vf->vf_id, input->flow_type);
1300 goto err_free_pkt;
1301 }
1302
1303 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1304 if (ret)
1305 goto err_free_pkt;
1306
1307 return 0;
1308
1309err_free_pkt:
1310 devm_kfree(dev, pkt);
1311 return ret;
1312}
1313
1314/**
1315 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1316 * @t: pointer to timer_list
1317 */
1318static void ice_vf_fdir_timer(struct timer_list *t)
1319{
1320 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1321 struct ice_vf_fdir_ctx *ctx_done;
1322 struct ice_vf_fdir *fdir;
1323 unsigned long flags;
1324 struct ice_vf *vf;
1325 struct ice_pf *pf;
1326
1327 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1328 vf = container_of(fdir, struct ice_vf, fdir);
1329 ctx_done = &fdir->ctx_done;
1330 pf = vf->pf;
1331 spin_lock_irqsave(&fdir->ctx_lock, flags);
1332 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1333 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1334 WARN_ON_ONCE(1);
1335 return;
1336 }
1337
1338 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1339
1340 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1341 ctx_done->conf = ctx_irq->conf;
1342 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1343 ctx_done->v_opcode = ctx_irq->v_opcode;
1344 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1345
1346 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1347 ice_service_task_schedule(pf);
1348}
1349
1350/**
1351 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1352 * @ctrl_vsi: pointer to a VF's CTRL VSI
1353 * @rx_desc: pointer to FDIR Rx queue descriptor
1354 */
1355void
1356ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1357 union ice_32b_rx_flex_desc *rx_desc)
1358{
1359 struct ice_pf *pf = ctrl_vsi->back;
1360 struct ice_vf *vf = ctrl_vsi->vf;
1361 struct ice_vf_fdir_ctx *ctx_done;
1362 struct ice_vf_fdir_ctx *ctx_irq;
1363 struct ice_vf_fdir *fdir;
1364 unsigned long flags;
1365 struct device *dev;
1366 int ret;
1367
1368 if (WARN_ON(!vf))
1369 return;
1370
1371 fdir = &vf->fdir;
1372 ctx_done = &fdir->ctx_done;
1373 ctx_irq = &fdir->ctx_irq;
1374 dev = ice_pf_to_dev(pf);
1375 spin_lock_irqsave(&fdir->ctx_lock, flags);
1376 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1377 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1378 WARN_ON_ONCE(1);
1379 return;
1380 }
1381
1382 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1383
1384 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1385 ctx_done->conf = ctx_irq->conf;
1386 ctx_done->stat = ICE_FDIR_CTX_IRQ;
1387 ctx_done->v_opcode = ctx_irq->v_opcode;
1388 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1389 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1390
1391 ret = del_timer(&ctx_irq->rx_tmr);
1392 if (!ret)
1393 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1394
1395 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1396 ice_service_task_schedule(pf);
1397}
1398
1399/**
1400 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1401 * @vf: pointer to the VF info
1402 */
1403static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1404{
1405 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1406 struct ice_vsi *vf_vsi;
1407 struct device *dev;
1408 struct ice_pf *pf;
1409 struct ice_hw *hw;
1410 u16 vsi_num;
1411
1412 pf = vf->pf;
1413 hw = &pf->hw;
1414 dev = ice_pf_to_dev(pf);
1415 vf_vsi = ice_get_vf_vsi(vf);
1416 if (!vf_vsi) {
1417 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1418 return;
1419 }
1420
1421 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1422
1423 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1424 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1425 switch (hw->mac_type) {
1426 case ICE_MAC_E830:
1427 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1428 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1429 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1430 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1431 break;
1432 case ICE_MAC_E810:
1433 default:
1434 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1435 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1436 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1437 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1438 }
1439
1440 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1441 vf->vf_id, fd_size_g, fd_size_b);
1442 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1443 vf->vf_id, fd_cnt_g, fd_cnt_b);
1444}
1445
1446/**
1447 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1448 * @vf: pointer to the VF info
1449 * @ctx: FDIR context info for post processing
1450 * @status: virtchnl FDIR program status
1451 *
1452 * Return: 0 on success, and other on error.
1453 */
1454static int
1455ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1456 enum virtchnl_fdir_prgm_status *status)
1457{
1458 struct device *dev = ice_pf_to_dev(vf->pf);
1459 u32 stat_err, error, prog_id;
1460 int ret;
1461
1462 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1463 if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1464 ICE_FXD_FLTR_WB_QW1_DD_YES) {
1465 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1466 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1467 ret = -EINVAL;
1468 goto err_exit;
1469 }
1470
1471 prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1472 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1473 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1474 dev_err(dev, "VF %d: Desc show add, but ctx not",
1475 vf->vf_id);
1476 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1477 ret = -EINVAL;
1478 goto err_exit;
1479 }
1480
1481 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1482 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1483 dev_err(dev, "VF %d: Desc show del, but ctx not",
1484 vf->vf_id);
1485 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1486 ret = -EINVAL;
1487 goto err_exit;
1488 }
1489
1490 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1491 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1492 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1493 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1494 vf->vf_id);
1495 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1496 } else {
1497 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1498 vf->vf_id);
1499 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1500 }
1501 ret = -EINVAL;
1502 goto err_exit;
1503 }
1504
1505 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1506 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1507 dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1508 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1509 ret = -EINVAL;
1510 goto err_exit;
1511 }
1512
1513 *status = VIRTCHNL_FDIR_SUCCESS;
1514
1515 return 0;
1516
1517err_exit:
1518 ice_vf_fdir_dump_info(vf);
1519 return ret;
1520}
1521
1522/**
1523 * ice_vc_add_fdir_fltr_post
1524 * @vf: pointer to the VF structure
1525 * @ctx: FDIR context info for post processing
1526 * @status: virtchnl FDIR program status
1527 * @success: true implies success, false implies failure
1528 *
1529 * Post process for flow director add command. If success, then do post process
1530 * and send back success msg by virtchnl. Otherwise, do context reversion and
1531 * send back failure msg by virtchnl.
1532 *
1533 * Return: 0 on success, and other on error.
1534 */
1535static int
1536ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1537 enum virtchnl_fdir_prgm_status status,
1538 bool success)
1539{
1540 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1541 struct device *dev = ice_pf_to_dev(vf->pf);
1542 enum virtchnl_status_code v_ret;
1543 struct virtchnl_fdir_add *resp;
1544 int ret, len, is_tun;
1545
1546 v_ret = VIRTCHNL_STATUS_SUCCESS;
1547 len = sizeof(*resp);
1548 resp = kzalloc(len, GFP_KERNEL);
1549 if (!resp) {
1550 len = 0;
1551 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1552 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1553 goto err_exit;
1554 }
1555
1556 if (!success)
1557 goto err_exit;
1558
1559 is_tun = 0;
1560 resp->status = status;
1561 resp->flow_id = conf->flow_id;
1562 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1563
1564 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1565 (u8 *)resp, len);
1566 kfree(resp);
1567
1568 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1569 vf->vf_id, conf->flow_id,
1570 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1571 "add" : "del");
1572 return ret;
1573
1574err_exit:
1575 if (resp)
1576 resp->status = status;
1577 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1578 devm_kfree(dev, conf);
1579
1580 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1581 (u8 *)resp, len);
1582 kfree(resp);
1583 return ret;
1584}
1585
1586/**
1587 * ice_vc_del_fdir_fltr_post
1588 * @vf: pointer to the VF structure
1589 * @ctx: FDIR context info for post processing
1590 * @status: virtchnl FDIR program status
1591 * @success: true implies success, false implies failure
1592 *
1593 * Post process for flow director del command. If success, then do post process
1594 * and send back success msg by virtchnl. Otherwise, do context reversion and
1595 * send back failure msg by virtchnl.
1596 *
1597 * Return: 0 on success, and other on error.
1598 */
1599static int
1600ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1601 enum virtchnl_fdir_prgm_status status,
1602 bool success)
1603{
1604 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1605 struct device *dev = ice_pf_to_dev(vf->pf);
1606 enum virtchnl_status_code v_ret;
1607 struct virtchnl_fdir_del *resp;
1608 int ret, len, is_tun;
1609
1610 v_ret = VIRTCHNL_STATUS_SUCCESS;
1611 len = sizeof(*resp);
1612 resp = kzalloc(len, GFP_KERNEL);
1613 if (!resp) {
1614 len = 0;
1615 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1616 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1617 goto err_exit;
1618 }
1619
1620 if (!success)
1621 goto err_exit;
1622
1623 is_tun = 0;
1624 resp->status = status;
1625 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1626 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1627
1628 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1629 (u8 *)resp, len);
1630 kfree(resp);
1631
1632 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1633 vf->vf_id, conf->flow_id,
1634 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1635 "add" : "del");
1636 devm_kfree(dev, conf);
1637 return ret;
1638
1639err_exit:
1640 if (resp)
1641 resp->status = status;
1642 if (success)
1643 devm_kfree(dev, conf);
1644
1645 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1646 (u8 *)resp, len);
1647 kfree(resp);
1648 return ret;
1649}
1650
1651/**
1652 * ice_flush_fdir_ctx
1653 * @pf: pointer to the PF structure
1654 *
1655 * Flush all the pending event on ctx_done list and process them.
1656 */
1657void ice_flush_fdir_ctx(struct ice_pf *pf)
1658{
1659 struct ice_vf *vf;
1660 unsigned int bkt;
1661
1662 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1663 return;
1664
1665 mutex_lock(&pf->vfs.table_lock);
1666 ice_for_each_vf(pf, bkt, vf) {
1667 struct device *dev = ice_pf_to_dev(pf);
1668 enum virtchnl_fdir_prgm_status status;
1669 struct ice_vf_fdir_ctx *ctx;
1670 unsigned long flags;
1671 int ret;
1672
1673 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1674 continue;
1675
1676 if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1677 continue;
1678
1679 ctx = &vf->fdir.ctx_done;
1680 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1681 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1682 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1683 continue;
1684 }
1685 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686
1687 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1688 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1689 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1690 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1691 vf->vf_id);
1692 goto err_exit;
1693 }
1694
1695 ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1696 if (ret)
1697 goto err_exit;
1698
1699 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1700 ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1701 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1702 ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1703 else
1704 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1705
1706 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1707 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1708 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1709 continue;
1710err_exit:
1711 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1712 ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1713 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1714 ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1715 else
1716 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1717
1718 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1719 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1720 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1721 }
1722 mutex_unlock(&pf->vfs.table_lock);
1723}
1724
1725/**
1726 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1727 * @vf: pointer to the VF structure
1728 * @conf: FDIR configuration for each filter
1729 * @v_opcode: virtual channel operation code
1730 *
1731 * Return: 0 on success, and other on error.
1732 */
1733static int
1734ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1735 enum virtchnl_ops v_opcode)
1736{
1737 struct device *dev = ice_pf_to_dev(vf->pf);
1738 struct ice_vf_fdir_ctx *ctx;
1739 unsigned long flags;
1740
1741 ctx = &vf->fdir.ctx_irq;
1742 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1743 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1744 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1745 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1746 dev_dbg(dev, "VF %d: Last request is still in progress\n",
1747 vf->vf_id);
1748 return -EBUSY;
1749 }
1750 ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1751 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1752
1753 ctx->conf = conf;
1754 ctx->v_opcode = v_opcode;
1755 ctx->stat = ICE_FDIR_CTX_READY;
1756 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1757
1758 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1759
1760 return 0;
1761}
1762
1763/**
1764 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1765 * @vf: pointer to the VF structure
1766 *
1767 * Return: 0 on success, and other on error.
1768 */
1769static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1770{
1771 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1772 unsigned long flags;
1773
1774 del_timer(&ctx->rx_tmr);
1775 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1776 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1777 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1778}
1779
1780/**
1781 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1782 * @vf: pointer to the VF info
1783 * @msg: pointer to the msg buffer
1784 *
1785 * Return: 0 on success, and other on error.
1786 */
1787int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1788{
1789 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1790 struct virtchnl_fdir_add *stat = NULL;
1791 struct virtchnl_fdir_fltr_conf *conf;
1792 enum virtchnl_status_code v_ret;
1793 struct device *dev;
1794 struct ice_pf *pf;
1795 int is_tun = 0;
1796 int len = 0;
1797 int ret;
1798
1799 pf = vf->pf;
1800 dev = ice_pf_to_dev(pf);
1801 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1802 if (ret) {
1803 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1804 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1805 goto err_exit;
1806 }
1807
1808 ret = ice_vf_start_ctrl_vsi(vf);
1809 if (ret && (ret != -EEXIST)) {
1810 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1811 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1812 vf->vf_id, ret);
1813 goto err_exit;
1814 }
1815
1816 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1817 if (!stat) {
1818 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1819 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1820 goto err_exit;
1821 }
1822
1823 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1824 if (!conf) {
1825 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1826 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1827 goto err_exit;
1828 }
1829
1830 len = sizeof(*stat);
1831 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1832 if (ret) {
1833 v_ret = VIRTCHNL_STATUS_SUCCESS;
1834 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1835 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1836 goto err_free_conf;
1837 }
1838
1839 if (fltr->validate_only) {
1840 v_ret = VIRTCHNL_STATUS_SUCCESS;
1841 stat->status = VIRTCHNL_FDIR_SUCCESS;
1842 devm_kfree(dev, conf);
1843 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1844 v_ret, (u8 *)stat, len);
1845 goto exit;
1846 }
1847
1848 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1849 if (ret) {
1850 v_ret = VIRTCHNL_STATUS_SUCCESS;
1851 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1852 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1853 vf->vf_id, ret);
1854 goto err_free_conf;
1855 }
1856
1857 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1858 if (ret) {
1859 v_ret = VIRTCHNL_STATUS_SUCCESS;
1860 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1861 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1862 vf->vf_id);
1863 goto err_free_conf;
1864 }
1865
1866 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1867 if (ret) {
1868 v_ret = VIRTCHNL_STATUS_SUCCESS;
1869 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1870 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1871 goto err_free_conf;
1872 }
1873
1874 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1875 if (ret) {
1876 v_ret = VIRTCHNL_STATUS_SUCCESS;
1877 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1878 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1879 goto err_rem_entry;
1880 }
1881
1882 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1883 if (ret) {
1884 v_ret = VIRTCHNL_STATUS_SUCCESS;
1885 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1886 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1887 vf->vf_id, ret);
1888 goto err_clr_irq;
1889 }
1890
1891exit:
1892 kfree(stat);
1893 return ret;
1894
1895err_clr_irq:
1896 ice_vc_fdir_clear_irq_ctx(vf);
1897err_rem_entry:
1898 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1899err_free_conf:
1900 devm_kfree(dev, conf);
1901err_exit:
1902 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1903 (u8 *)stat, len);
1904 kfree(stat);
1905 return ret;
1906}
1907
1908/**
1909 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1910 * @vf: pointer to the VF info
1911 * @msg: pointer to the msg buffer
1912 *
1913 * Return: 0 on success, and other on error.
1914 */
1915int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1916{
1917 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1918 struct virtchnl_fdir_del *stat = NULL;
1919 struct virtchnl_fdir_fltr_conf *conf;
1920 enum virtchnl_status_code v_ret;
1921 struct device *dev;
1922 struct ice_pf *pf;
1923 int is_tun = 0;
1924 int len = 0;
1925 int ret;
1926
1927 pf = vf->pf;
1928 dev = ice_pf_to_dev(pf);
1929 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1930 if (ret) {
1931 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1933 goto err_exit;
1934 }
1935
1936 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1937 if (!stat) {
1938 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1939 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1940 goto err_exit;
1941 }
1942
1943 len = sizeof(*stat);
1944
1945 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1946 if (!conf) {
1947 v_ret = VIRTCHNL_STATUS_SUCCESS;
1948 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1949 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1950 vf->vf_id, fltr->flow_id);
1951 goto err_exit;
1952 }
1953
1954 /* Just return failure when ctrl_vsi idx is invalid */
1955 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1956 v_ret = VIRTCHNL_STATUS_SUCCESS;
1957 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1958 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1959 goto err_exit;
1960 }
1961
1962 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1963 if (ret) {
1964 v_ret = VIRTCHNL_STATUS_SUCCESS;
1965 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1966 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1967 goto err_exit;
1968 }
1969
1970 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1971 if (ret) {
1972 v_ret = VIRTCHNL_STATUS_SUCCESS;
1973 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1974 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1975 vf->vf_id, ret);
1976 goto err_del_tmr;
1977 }
1978
1979 kfree(stat);
1980
1981 return ret;
1982
1983err_del_tmr:
1984 ice_vc_fdir_clear_irq_ctx(vf);
1985err_exit:
1986 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1987 (u8 *)stat, len);
1988 kfree(stat);
1989 return ret;
1990}
1991
1992/**
1993 * ice_vf_fdir_init - init FDIR resource for VF
1994 * @vf: pointer to the VF info
1995 */
1996void ice_vf_fdir_init(struct ice_vf *vf)
1997{
1998 struct ice_vf_fdir *fdir = &vf->fdir;
1999
2000 idr_init(&fdir->fdir_rule_idr);
2001 INIT_LIST_HEAD(&fdir->fdir_rule_list);
2002
2003 spin_lock_init(&fdir->ctx_lock);
2004 fdir->ctx_irq.flags = 0;
2005 fdir->ctx_done.flags = 0;
2006 ice_vc_fdir_reset_cnt_all(fdir);
2007}
2008
2009/**
2010 * ice_vf_fdir_exit - destroy FDIR resource for VF
2011 * @vf: pointer to the VF info
2012 */
2013void ice_vf_fdir_exit(struct ice_vf *vf)
2014{
2015 ice_vc_fdir_flush_entry(vf);
2016 idr_destroy(&vf->fdir.fdir_rule_idr);
2017 ice_vc_fdir_rem_prof_all(vf);
2018 ice_vc_fdir_free_prof_all(vf);
2019}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_flow.h"
8
9#define to_fltr_conf_from_desc(p) \
10 container_of(p, struct virtchnl_fdir_fltr_conf, input)
11
12#define ICE_FLOW_PROF_TYPE_S 0
13#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
14#define ICE_FLOW_PROF_VSI_S 32
15#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
16
17/* Flow profile ID format:
18 * [0:31] - flow type, flow + tun_offs
19 * [32:63] - VSI index
20 */
21#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
22 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
23 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
24
25#define GTPU_TEID_OFFSET 4
26#define GTPU_EH_QFI_OFFSET 1
27#define GTPU_EH_QFI_MASK 0x3F
28#define PFCP_S_OFFSET 0
29#define PFCP_S_MASK 0x1
30#define PFCP_PORT_NR 8805
31
32#define FDIR_INSET_FLAG_ESP_S 0
33#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
34#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
35#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
36
37enum ice_fdir_tunnel_type {
38 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
39 ICE_FDIR_TUNNEL_TYPE_GTPU,
40 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
41};
42
43struct virtchnl_fdir_fltr_conf {
44 struct ice_fdir_fltr input;
45 enum ice_fdir_tunnel_type ttype;
46 u64 inset_flag;
47 u32 flow_id;
48};
49
50static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
51 VIRTCHNL_PROTO_HDR_ETH,
52 VIRTCHNL_PROTO_HDR_NONE,
53};
54
55static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
56 VIRTCHNL_PROTO_HDR_ETH,
57 VIRTCHNL_PROTO_HDR_IPV4,
58 VIRTCHNL_PROTO_HDR_NONE,
59};
60
61static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
62 VIRTCHNL_PROTO_HDR_ETH,
63 VIRTCHNL_PROTO_HDR_IPV4,
64 VIRTCHNL_PROTO_HDR_TCP,
65 VIRTCHNL_PROTO_HDR_NONE,
66};
67
68static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
69 VIRTCHNL_PROTO_HDR_ETH,
70 VIRTCHNL_PROTO_HDR_IPV4,
71 VIRTCHNL_PROTO_HDR_UDP,
72 VIRTCHNL_PROTO_HDR_NONE,
73};
74
75static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
76 VIRTCHNL_PROTO_HDR_ETH,
77 VIRTCHNL_PROTO_HDR_IPV4,
78 VIRTCHNL_PROTO_HDR_SCTP,
79 VIRTCHNL_PROTO_HDR_NONE,
80};
81
82static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
83 VIRTCHNL_PROTO_HDR_ETH,
84 VIRTCHNL_PROTO_HDR_IPV6,
85 VIRTCHNL_PROTO_HDR_NONE,
86};
87
88static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
89 VIRTCHNL_PROTO_HDR_ETH,
90 VIRTCHNL_PROTO_HDR_IPV6,
91 VIRTCHNL_PROTO_HDR_TCP,
92 VIRTCHNL_PROTO_HDR_NONE,
93};
94
95static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
96 VIRTCHNL_PROTO_HDR_ETH,
97 VIRTCHNL_PROTO_HDR_IPV6,
98 VIRTCHNL_PROTO_HDR_UDP,
99 VIRTCHNL_PROTO_HDR_NONE,
100};
101
102static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
103 VIRTCHNL_PROTO_HDR_ETH,
104 VIRTCHNL_PROTO_HDR_IPV6,
105 VIRTCHNL_PROTO_HDR_SCTP,
106 VIRTCHNL_PROTO_HDR_NONE,
107};
108
109static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
110 VIRTCHNL_PROTO_HDR_ETH,
111 VIRTCHNL_PROTO_HDR_IPV4,
112 VIRTCHNL_PROTO_HDR_UDP,
113 VIRTCHNL_PROTO_HDR_GTPU_IP,
114 VIRTCHNL_PROTO_HDR_NONE,
115};
116
117static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
118 VIRTCHNL_PROTO_HDR_ETH,
119 VIRTCHNL_PROTO_HDR_IPV4,
120 VIRTCHNL_PROTO_HDR_UDP,
121 VIRTCHNL_PROTO_HDR_GTPU_IP,
122 VIRTCHNL_PROTO_HDR_GTPU_EH,
123 VIRTCHNL_PROTO_HDR_NONE,
124};
125
126static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
127 VIRTCHNL_PROTO_HDR_ETH,
128 VIRTCHNL_PROTO_HDR_IPV4,
129 VIRTCHNL_PROTO_HDR_L2TPV3,
130 VIRTCHNL_PROTO_HDR_NONE,
131};
132
133static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
134 VIRTCHNL_PROTO_HDR_ETH,
135 VIRTCHNL_PROTO_HDR_IPV6,
136 VIRTCHNL_PROTO_HDR_L2TPV3,
137 VIRTCHNL_PROTO_HDR_NONE,
138};
139
140static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
141 VIRTCHNL_PROTO_HDR_ETH,
142 VIRTCHNL_PROTO_HDR_IPV4,
143 VIRTCHNL_PROTO_HDR_ESP,
144 VIRTCHNL_PROTO_HDR_NONE,
145};
146
147static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
148 VIRTCHNL_PROTO_HDR_ETH,
149 VIRTCHNL_PROTO_HDR_IPV6,
150 VIRTCHNL_PROTO_HDR_ESP,
151 VIRTCHNL_PROTO_HDR_NONE,
152};
153
154static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
155 VIRTCHNL_PROTO_HDR_ETH,
156 VIRTCHNL_PROTO_HDR_IPV4,
157 VIRTCHNL_PROTO_HDR_AH,
158 VIRTCHNL_PROTO_HDR_NONE,
159};
160
161static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
162 VIRTCHNL_PROTO_HDR_ETH,
163 VIRTCHNL_PROTO_HDR_IPV6,
164 VIRTCHNL_PROTO_HDR_AH,
165 VIRTCHNL_PROTO_HDR_NONE,
166};
167
168static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
169 VIRTCHNL_PROTO_HDR_ETH,
170 VIRTCHNL_PROTO_HDR_IPV4,
171 VIRTCHNL_PROTO_HDR_UDP,
172 VIRTCHNL_PROTO_HDR_ESP,
173 VIRTCHNL_PROTO_HDR_NONE,
174};
175
176static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
177 VIRTCHNL_PROTO_HDR_ETH,
178 VIRTCHNL_PROTO_HDR_IPV6,
179 VIRTCHNL_PROTO_HDR_UDP,
180 VIRTCHNL_PROTO_HDR_ESP,
181 VIRTCHNL_PROTO_HDR_NONE,
182};
183
184static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
185 VIRTCHNL_PROTO_HDR_ETH,
186 VIRTCHNL_PROTO_HDR_IPV4,
187 VIRTCHNL_PROTO_HDR_UDP,
188 VIRTCHNL_PROTO_HDR_PFCP,
189 VIRTCHNL_PROTO_HDR_NONE,
190};
191
192static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
193 VIRTCHNL_PROTO_HDR_ETH,
194 VIRTCHNL_PROTO_HDR_IPV6,
195 VIRTCHNL_PROTO_HDR_UDP,
196 VIRTCHNL_PROTO_HDR_PFCP,
197 VIRTCHNL_PROTO_HDR_NONE,
198};
199
200struct virtchnl_fdir_pattern_match_item {
201 enum virtchnl_proto_hdr_type *list;
202 u64 input_set;
203 u64 *meta;
204};
205
206static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
207 {vc_pattern_ipv4, 0, NULL},
208 {vc_pattern_ipv4_tcp, 0, NULL},
209 {vc_pattern_ipv4_udp, 0, NULL},
210 {vc_pattern_ipv4_sctp, 0, NULL},
211 {vc_pattern_ipv6, 0, NULL},
212 {vc_pattern_ipv6_tcp, 0, NULL},
213 {vc_pattern_ipv6_udp, 0, NULL},
214 {vc_pattern_ipv6_sctp, 0, NULL},
215};
216
217static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
218 {vc_pattern_ipv4, 0, NULL},
219 {vc_pattern_ipv4_tcp, 0, NULL},
220 {vc_pattern_ipv4_udp, 0, NULL},
221 {vc_pattern_ipv4_sctp, 0, NULL},
222 {vc_pattern_ipv6, 0, NULL},
223 {vc_pattern_ipv6_tcp, 0, NULL},
224 {vc_pattern_ipv6_udp, 0, NULL},
225 {vc_pattern_ipv6_sctp, 0, NULL},
226 {vc_pattern_ether, 0, NULL},
227 {vc_pattern_ipv4_gtpu, 0, NULL},
228 {vc_pattern_ipv4_gtpu_eh, 0, NULL},
229 {vc_pattern_ipv4_l2tpv3, 0, NULL},
230 {vc_pattern_ipv6_l2tpv3, 0, NULL},
231 {vc_pattern_ipv4_esp, 0, NULL},
232 {vc_pattern_ipv6_esp, 0, NULL},
233 {vc_pattern_ipv4_ah, 0, NULL},
234 {vc_pattern_ipv6_ah, 0, NULL},
235 {vc_pattern_ipv4_nat_t_esp, 0, NULL},
236 {vc_pattern_ipv6_nat_t_esp, 0, NULL},
237 {vc_pattern_ipv4_pfcp, 0, NULL},
238 {vc_pattern_ipv6_pfcp, 0, NULL},
239};
240
241struct virtchnl_fdir_inset_map {
242 enum virtchnl_proto_hdr_field field;
243 enum ice_flow_field fld;
244 u64 flag;
245 u64 mask;
246};
247
248static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
249 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
250 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
251 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
252 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
253 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
254 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
255 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
256 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
257 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
258 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
259 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
260 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
261 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
262 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
263 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
264 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
265 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
266 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
267 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
268 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
269 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
270 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
271 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
272 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
273 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
274 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
275};
276
277/**
278 * ice_vc_fdir_param_check
279 * @vf: pointer to the VF structure
280 * @vsi_id: VF relative VSI ID
281 *
282 * Check for the valid VSI ID, PF's state and VF's state
283 *
284 * Return: 0 on success, and -EINVAL on error.
285 */
286static int
287ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
288{
289 struct ice_pf *pf = vf->pf;
290
291 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
292 return -EINVAL;
293
294 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
295 return -EINVAL;
296
297 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
298 return -EINVAL;
299
300 if (vsi_id != vf->lan_vsi_num)
301 return -EINVAL;
302
303 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
304 return -EINVAL;
305
306 if (!pf->vsi[vf->lan_vsi_idx])
307 return -EINVAL;
308
309 return 0;
310}
311
312/**
313 * ice_vf_start_ctrl_vsi
314 * @vf: pointer to the VF structure
315 *
316 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
317 *
318 * Return: 0 on success, and other on error.
319 */
320static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
321{
322 struct ice_pf *pf = vf->pf;
323 struct ice_vsi *ctrl_vsi;
324 struct device *dev;
325 int err;
326
327 dev = ice_pf_to_dev(pf);
328 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
329 return -EEXIST;
330
331 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
332 if (!ctrl_vsi) {
333 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
334 vf->vf_id);
335 return -ENOMEM;
336 }
337
338 err = ice_vsi_open_ctrl(ctrl_vsi);
339 if (err) {
340 dev_dbg(dev, "Could not open control VSI for VF %d\n",
341 vf->vf_id);
342 goto err_vsi_open;
343 }
344
345 return 0;
346
347err_vsi_open:
348 ice_vsi_release(ctrl_vsi);
349 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
350 pf->vsi[vf->ctrl_vsi_idx] = NULL;
351 vf->ctrl_vsi_idx = ICE_NO_VSI;
352 }
353 return err;
354}
355
356/**
357 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
358 * @vf: pointer to the VF structure
359 * @flow: filter flow type
360 *
361 * Return: 0 on success, and other on error.
362 */
363static int
364ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
365{
366 struct ice_vf_fdir *fdir = &vf->fdir;
367
368 if (!fdir->fdir_prof) {
369 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
370 ICE_FLTR_PTYPE_MAX,
371 sizeof(*fdir->fdir_prof),
372 GFP_KERNEL);
373 if (!fdir->fdir_prof)
374 return -ENOMEM;
375 }
376
377 if (!fdir->fdir_prof[flow]) {
378 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
379 sizeof(**fdir->fdir_prof),
380 GFP_KERNEL);
381 if (!fdir->fdir_prof[flow])
382 return -ENOMEM;
383 }
384
385 return 0;
386}
387
388/**
389 * ice_vc_fdir_free_prof - free profile for this filter flow type
390 * @vf: pointer to the VF structure
391 * @flow: filter flow type
392 */
393static void
394ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
395{
396 struct ice_vf_fdir *fdir = &vf->fdir;
397
398 if (!fdir->fdir_prof)
399 return;
400
401 if (!fdir->fdir_prof[flow])
402 return;
403
404 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
405 fdir->fdir_prof[flow] = NULL;
406}
407
408/**
409 * ice_vc_fdir_free_prof_all - free all the profile for this VF
410 * @vf: pointer to the VF structure
411 */
412static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
413{
414 struct ice_vf_fdir *fdir = &vf->fdir;
415 enum ice_fltr_ptype flow;
416
417 if (!fdir->fdir_prof)
418 return;
419
420 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
421 ice_vc_fdir_free_prof(vf, flow);
422
423 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
424 fdir->fdir_prof = NULL;
425}
426
427/**
428 * ice_vc_fdir_parse_flow_fld
429 * @proto_hdr: virtual channel protocol filter header
430 * @conf: FDIR configuration for each filter
431 * @fld: field type array
432 * @fld_cnt: field counter
433 *
434 * Parse the virtual channel filter header and store them into field type array
435 *
436 * Return: 0 on success, and other on error.
437 */
438static int
439ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
440 struct virtchnl_fdir_fltr_conf *conf,
441 enum ice_flow_field *fld, int *fld_cnt)
442{
443 struct virtchnl_proto_hdr hdr;
444 u32 i;
445
446 memcpy(&hdr, proto_hdr, sizeof(hdr));
447
448 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
449 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
450 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
451 if (fdir_inset_map[i].mask &&
452 ((fdir_inset_map[i].mask & conf->inset_flag) !=
453 fdir_inset_map[i].flag))
454 continue;
455
456 fld[*fld_cnt] = fdir_inset_map[i].fld;
457 *fld_cnt += 1;
458 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
459 return -EINVAL;
460 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
461 fdir_inset_map[i].field);
462 }
463
464 return 0;
465}
466
467/**
468 * ice_vc_fdir_set_flow_fld
469 * @vf: pointer to the VF structure
470 * @fltr: virtual channel add cmd buffer
471 * @conf: FDIR configuration for each filter
472 * @seg: array of one or more packet segments that describe the flow
473 *
474 * Parse the virtual channel add msg buffer's field vector and store them into
475 * flow's packet segment field
476 *
477 * Return: 0 on success, and other on error.
478 */
479static int
480ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
481 struct virtchnl_fdir_fltr_conf *conf,
482 struct ice_flow_seg_info *seg)
483{
484 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
485 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
486 struct device *dev = ice_pf_to_dev(vf->pf);
487 struct virtchnl_proto_hdrs *proto;
488 int fld_cnt = 0;
489 int i;
490
491 proto = &rule->proto_hdrs;
492 for (i = 0; i < proto->count; i++) {
493 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
494 int ret;
495
496 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
497 if (ret)
498 return ret;
499 }
500
501 if (fld_cnt == 0) {
502 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
503 return -EINVAL;
504 }
505
506 for (i = 0; i < fld_cnt; i++)
507 ice_flow_set_fld(seg, fld[i],
508 ICE_FLOW_FLD_OFF_INVAL,
509 ICE_FLOW_FLD_OFF_INVAL,
510 ICE_FLOW_FLD_OFF_INVAL, false);
511
512 return 0;
513}
514
515/**
516 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
517 * @vf: pointer to the VF structure
518 * @conf: FDIR configuration for each filter
519 * @seg: array of one or more packet segments that describe the flow
520 *
521 * Return: 0 on success, and other on error.
522 */
523static int
524ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
525 struct virtchnl_fdir_fltr_conf *conf,
526 struct ice_flow_seg_info *seg)
527{
528 enum ice_fltr_ptype flow = conf->input.flow_type;
529 enum ice_fdir_tunnel_type ttype = conf->ttype;
530 struct device *dev = ice_pf_to_dev(vf->pf);
531
532 switch (flow) {
533 case ICE_FLTR_PTYPE_NON_IP_L2:
534 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
535 break;
536 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
537 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
538 ICE_FLOW_SEG_HDR_IPV4 |
539 ICE_FLOW_SEG_HDR_IPV_OTHER);
540 break;
541 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
542 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
543 ICE_FLOW_SEG_HDR_IPV4 |
544 ICE_FLOW_SEG_HDR_IPV_OTHER);
545 break;
546 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
547 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
548 ICE_FLOW_SEG_HDR_IPV4 |
549 ICE_FLOW_SEG_HDR_IPV_OTHER);
550 break;
551 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
552 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
553 ICE_FLOW_SEG_HDR_IPV4 |
554 ICE_FLOW_SEG_HDR_IPV_OTHER);
555 break;
556 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
557 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
558 ICE_FLOW_SEG_HDR_IPV4 |
559 ICE_FLOW_SEG_HDR_IPV_OTHER);
560 break;
561 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
562 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
563 ICE_FLOW_SEG_HDR_IPV4 |
564 ICE_FLOW_SEG_HDR_IPV_OTHER);
565 break;
566 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
567 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
568 ICE_FLOW_SEG_HDR_IPV_OTHER);
569 break;
570 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
571 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
572 ICE_FLOW_SEG_HDR_IPV4 |
573 ICE_FLOW_SEG_HDR_IPV_OTHER);
574 break;
575 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
576 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
577 ICE_FLOW_SEG_HDR_IPV4 |
578 ICE_FLOW_SEG_HDR_IPV_OTHER);
579 break;
580 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
581 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
582 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
583 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
584 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
585 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
586 ICE_FLOW_SEG_HDR_IPV4 |
587 ICE_FLOW_SEG_HDR_IPV_OTHER);
588 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
589 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
590 ICE_FLOW_SEG_HDR_GTPU_IP |
591 ICE_FLOW_SEG_HDR_IPV4 |
592 ICE_FLOW_SEG_HDR_IPV_OTHER);
593 } else {
594 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
595 flow, vf->vf_id);
596 return -EINVAL;
597 }
598 break;
599 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
600 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
601 ICE_FLOW_SEG_HDR_IPV4 |
602 ICE_FLOW_SEG_HDR_IPV_OTHER);
603 break;
604 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
605 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
606 ICE_FLOW_SEG_HDR_IPV6 |
607 ICE_FLOW_SEG_HDR_IPV_OTHER);
608 break;
609 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
610 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
611 ICE_FLOW_SEG_HDR_IPV6 |
612 ICE_FLOW_SEG_HDR_IPV_OTHER);
613 break;
614 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
615 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
616 ICE_FLOW_SEG_HDR_IPV6 |
617 ICE_FLOW_SEG_HDR_IPV_OTHER);
618 break;
619 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
620 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
621 ICE_FLOW_SEG_HDR_IPV6 |
622 ICE_FLOW_SEG_HDR_IPV_OTHER);
623 break;
624 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
625 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
626 ICE_FLOW_SEG_HDR_IPV6 |
627 ICE_FLOW_SEG_HDR_IPV_OTHER);
628 break;
629 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
630 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
631 ICE_FLOW_SEG_HDR_IPV6 |
632 ICE_FLOW_SEG_HDR_IPV_OTHER);
633 break;
634 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
635 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
636 ICE_FLOW_SEG_HDR_IPV_OTHER);
637 break;
638 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
639 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
640 ICE_FLOW_SEG_HDR_IPV6 |
641 ICE_FLOW_SEG_HDR_IPV_OTHER);
642 break;
643 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
644 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
645 ICE_FLOW_SEG_HDR_IPV6 |
646 ICE_FLOW_SEG_HDR_IPV_OTHER);
647 break;
648 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
649 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
650 ICE_FLOW_SEG_HDR_IPV6 |
651 ICE_FLOW_SEG_HDR_IPV_OTHER);
652 break;
653 default:
654 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
655 flow, vf->vf_id);
656 return -EINVAL;
657 }
658
659 return 0;
660}
661
662/**
663 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
664 * @vf: pointer to the VF structure
665 * @flow: filter flow type
666 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
667 */
668static void
669ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
670{
671 struct ice_vf_fdir *fdir = &vf->fdir;
672 struct ice_fd_hw_prof *vf_prof;
673 struct ice_pf *pf = vf->pf;
674 struct ice_vsi *vf_vsi;
675 struct device *dev;
676 struct ice_hw *hw;
677 u64 prof_id;
678 int i;
679
680 dev = ice_pf_to_dev(pf);
681 hw = &pf->hw;
682 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
683 return;
684
685 vf_prof = fdir->fdir_prof[flow];
686
687 vf_vsi = pf->vsi[vf->lan_vsi_idx];
688 if (!vf_vsi) {
689 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
690 return;
691 }
692
693 if (!fdir->prof_entry_cnt[flow][tun])
694 return;
695
696 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
697 flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
698
699 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
700 if (vf_prof->entry_h[i][tun]) {
701 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
702
703 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
704 ice_flow_rem_entry(hw, ICE_BLK_FD,
705 vf_prof->entry_h[i][tun]);
706 vf_prof->entry_h[i][tun] = 0;
707 }
708
709 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
710 devm_kfree(dev, vf_prof->fdir_seg[tun]);
711 vf_prof->fdir_seg[tun] = NULL;
712
713 for (i = 0; i < vf_prof->cnt; i++)
714 vf_prof->vsi_h[i] = 0;
715
716 fdir->prof_entry_cnt[flow][tun] = 0;
717}
718
719/**
720 * ice_vc_fdir_rem_prof_all - remove profile for this VF
721 * @vf: pointer to the VF structure
722 */
723static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
724{
725 enum ice_fltr_ptype flow;
726
727 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
728 flow < ICE_FLTR_PTYPE_MAX; flow++) {
729 ice_vc_fdir_rem_prof(vf, flow, 0);
730 ice_vc_fdir_rem_prof(vf, flow, 1);
731 }
732}
733
734/**
735 * ice_vc_fdir_write_flow_prof
736 * @vf: pointer to the VF structure
737 * @flow: filter flow type
738 * @seg: array of one or more packet segments that describe the flow
739 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
740 *
741 * Write the flow's profile config and packet segment into the hardware
742 *
743 * Return: 0 on success, and other on error.
744 */
745static int
746ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
747 struct ice_flow_seg_info *seg, int tun)
748{
749 struct ice_vf_fdir *fdir = &vf->fdir;
750 struct ice_vsi *vf_vsi, *ctrl_vsi;
751 struct ice_flow_seg_info *old_seg;
752 struct ice_flow_prof *prof = NULL;
753 struct ice_fd_hw_prof *vf_prof;
754 enum ice_status status;
755 struct device *dev;
756 struct ice_pf *pf;
757 struct ice_hw *hw;
758 u64 entry1_h = 0;
759 u64 entry2_h = 0;
760 u64 prof_id;
761 int ret;
762
763 pf = vf->pf;
764 dev = ice_pf_to_dev(pf);
765 hw = &pf->hw;
766 vf_vsi = pf->vsi[vf->lan_vsi_idx];
767 if (!vf_vsi)
768 return -EINVAL;
769
770 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
771 if (!ctrl_vsi)
772 return -EINVAL;
773
774 vf_prof = fdir->fdir_prof[flow];
775 old_seg = vf_prof->fdir_seg[tun];
776 if (old_seg) {
777 if (!memcmp(old_seg, seg, sizeof(*seg))) {
778 dev_dbg(dev, "Duplicated profile for VF %d!\n",
779 vf->vf_id);
780 return -EEXIST;
781 }
782
783 if (fdir->fdir_fltr_cnt[flow][tun]) {
784 ret = -EINVAL;
785 dev_dbg(dev, "Input set conflicts for VF %d\n",
786 vf->vf_id);
787 goto err_exit;
788 }
789
790 /* remove previously allocated profile */
791 ice_vc_fdir_rem_prof(vf, flow, tun);
792 }
793
794 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
795 tun ? ICE_FLTR_PTYPE_MAX : 0);
796
797 status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
798 tun + 1, &prof);
799 ret = ice_status_to_errno(status);
800 if (ret) {
801 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
802 flow, vf->vf_id);
803 goto err_exit;
804 }
805
806 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
807 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
808 seg, &entry1_h);
809 ret = ice_status_to_errno(status);
810 if (ret) {
811 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
812 flow, vf->vf_id);
813 goto err_prof;
814 }
815
816 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
817 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
818 seg, &entry2_h);
819 ret = ice_status_to_errno(status);
820 if (ret) {
821 dev_dbg(dev,
822 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
823 flow, vf->vf_id);
824 goto err_entry_1;
825 }
826
827 vf_prof->fdir_seg[tun] = seg;
828 vf_prof->cnt = 0;
829 fdir->prof_entry_cnt[flow][tun] = 0;
830
831 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
832 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
833 vf_prof->cnt++;
834 fdir->prof_entry_cnt[flow][tun]++;
835
836 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
837 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
838 vf_prof->cnt++;
839 fdir->prof_entry_cnt[flow][tun]++;
840
841 return 0;
842
843err_entry_1:
844 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
845 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
846 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
847err_prof:
848 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
849err_exit:
850 return ret;
851}
852
853/**
854 * ice_vc_fdir_config_input_set
855 * @vf: pointer to the VF structure
856 * @fltr: virtual channel add cmd buffer
857 * @conf: FDIR configuration for each filter
858 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
859 *
860 * Config the input set type and value for virtual channel add msg buffer
861 *
862 * Return: 0 on success, and other on error.
863 */
864static int
865ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
866 struct virtchnl_fdir_fltr_conf *conf, int tun)
867{
868 struct ice_fdir_fltr *input = &conf->input;
869 struct device *dev = ice_pf_to_dev(vf->pf);
870 struct ice_flow_seg_info *seg;
871 enum ice_fltr_ptype flow;
872 int ret;
873
874 flow = input->flow_type;
875 ret = ice_vc_fdir_alloc_prof(vf, flow);
876 if (ret) {
877 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
878 return ret;
879 }
880
881 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
882 if (!seg)
883 return -ENOMEM;
884
885 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
886 if (ret) {
887 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
888 goto err_exit;
889 }
890
891 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
892 if (ret) {
893 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
894 goto err_exit;
895 }
896
897 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
898 if (ret == -EEXIST) {
899 devm_kfree(dev, seg);
900 } else if (ret) {
901 dev_dbg(dev, "Write flow profile for VF %d failed\n",
902 vf->vf_id);
903 goto err_exit;
904 }
905
906 return 0;
907
908err_exit:
909 devm_kfree(dev, seg);
910 return ret;
911}
912
913/**
914 * ice_vc_fdir_match_pattern
915 * @fltr: virtual channel add cmd buffer
916 * @type: virtual channel protocol filter header type
917 *
918 * Matching the header type by comparing fltr and type's value.
919 *
920 * Return: true on success, and false on error.
921 */
922static bool
923ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
924 enum virtchnl_proto_hdr_type *type)
925{
926 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
927 int i = 0;
928
929 while ((i < proto->count) &&
930 (*type == proto->proto_hdr[i].type) &&
931 (*type != VIRTCHNL_PROTO_HDR_NONE)) {
932 type++;
933 i++;
934 }
935
936 return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
937}
938
939/**
940 * ice_vc_fdir_get_pattern - get while list pattern
941 * @vf: pointer to the VF info
942 * @len: filter list length
943 *
944 * Return: pointer to allowed filter list
945 */
946static const struct virtchnl_fdir_pattern_match_item *
947ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
948{
949 const struct virtchnl_fdir_pattern_match_item *item;
950 struct ice_pf *pf = vf->pf;
951 struct ice_hw *hw;
952
953 hw = &pf->hw;
954 if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
955 sizeof(hw->active_pkg_name))) {
956 item = vc_fdir_pattern_comms;
957 *len = ARRAY_SIZE(vc_fdir_pattern_comms);
958 } else {
959 item = vc_fdir_pattern_os;
960 *len = ARRAY_SIZE(vc_fdir_pattern_os);
961 }
962
963 return item;
964}
965
966/**
967 * ice_vc_fdir_search_pattern
968 * @vf: pointer to the VF info
969 * @fltr: virtual channel add cmd buffer
970 *
971 * Search for matched pattern from supported pattern list
972 *
973 * Return: 0 on success, and other on error.
974 */
975static int
976ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
977{
978 const struct virtchnl_fdir_pattern_match_item *pattern;
979 int len, i;
980
981 pattern = ice_vc_fdir_get_pattern(vf, &len);
982
983 for (i = 0; i < len; i++)
984 if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
985 return 0;
986
987 return -EINVAL;
988}
989
990/**
991 * ice_vc_fdir_parse_pattern
992 * @vf: pointer to the VF info
993 * @fltr: virtual channel add cmd buffer
994 * @conf: FDIR configuration for each filter
995 *
996 * Parse the virtual channel filter's pattern and store them into conf
997 *
998 * Return: 0 on success, and other on error.
999 */
1000static int
1001ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1002 struct virtchnl_fdir_fltr_conf *conf)
1003{
1004 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1005 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
1006 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
1007 struct device *dev = ice_pf_to_dev(vf->pf);
1008 struct ice_fdir_fltr *input = &conf->input;
1009 int i;
1010
1011 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1012 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
1013 proto->count, vf->vf_id);
1014 return -EINVAL;
1015 }
1016
1017 for (i = 0; i < proto->count; i++) {
1018 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
1019 struct ip_esp_hdr *esph;
1020 struct ip_auth_hdr *ah;
1021 struct sctphdr *sctph;
1022 struct ipv6hdr *ip6h;
1023 struct udphdr *udph;
1024 struct tcphdr *tcph;
1025 struct ethhdr *eth;
1026 struct iphdr *iph;
1027 u8 s_field;
1028 u8 *rawh;
1029
1030 switch (hdr->type) {
1031 case VIRTCHNL_PROTO_HDR_ETH:
1032 eth = (struct ethhdr *)hdr->buffer;
1033 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1034
1035 if (hdr->field_selector)
1036 input->ext_data.ether_type = eth->h_proto;
1037 break;
1038 case VIRTCHNL_PROTO_HDR_IPV4:
1039 iph = (struct iphdr *)hdr->buffer;
1040 l3 = VIRTCHNL_PROTO_HDR_IPV4;
1041 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1042
1043 if (hdr->field_selector) {
1044 input->ip.v4.src_ip = iph->saddr;
1045 input->ip.v4.dst_ip = iph->daddr;
1046 input->ip.v4.tos = iph->tos;
1047 input->ip.v4.proto = iph->protocol;
1048 }
1049 break;
1050 case VIRTCHNL_PROTO_HDR_IPV6:
1051 ip6h = (struct ipv6hdr *)hdr->buffer;
1052 l3 = VIRTCHNL_PROTO_HDR_IPV6;
1053 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1054
1055 if (hdr->field_selector) {
1056 memcpy(input->ip.v6.src_ip,
1057 ip6h->saddr.in6_u.u6_addr8,
1058 sizeof(ip6h->saddr));
1059 memcpy(input->ip.v6.dst_ip,
1060 ip6h->daddr.in6_u.u6_addr8,
1061 sizeof(ip6h->daddr));
1062 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
1063 (ip6h->flow_lbl[0] >> 4);
1064 input->ip.v6.proto = ip6h->nexthdr;
1065 }
1066 break;
1067 case VIRTCHNL_PROTO_HDR_TCP:
1068 tcph = (struct tcphdr *)hdr->buffer;
1069 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1070 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1071 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1072 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1073
1074 if (hdr->field_selector) {
1075 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1076 input->ip.v4.src_port = tcph->source;
1077 input->ip.v4.dst_port = tcph->dest;
1078 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1079 input->ip.v6.src_port = tcph->source;
1080 input->ip.v6.dst_port = tcph->dest;
1081 }
1082 }
1083 break;
1084 case VIRTCHNL_PROTO_HDR_UDP:
1085 udph = (struct udphdr *)hdr->buffer;
1086 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1087 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1088 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1089 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1090
1091 if (hdr->field_selector) {
1092 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1093 input->ip.v4.src_port = udph->source;
1094 input->ip.v4.dst_port = udph->dest;
1095 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1096 input->ip.v6.src_port = udph->source;
1097 input->ip.v6.dst_port = udph->dest;
1098 }
1099 }
1100 break;
1101 case VIRTCHNL_PROTO_HDR_SCTP:
1102 sctph = (struct sctphdr *)hdr->buffer;
1103 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1104 input->flow_type =
1105 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1106 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1107 input->flow_type =
1108 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1109
1110 if (hdr->field_selector) {
1111 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1112 input->ip.v4.src_port = sctph->source;
1113 input->ip.v4.dst_port = sctph->dest;
1114 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1115 input->ip.v6.src_port = sctph->source;
1116 input->ip.v6.dst_port = sctph->dest;
1117 }
1118 }
1119 break;
1120 case VIRTCHNL_PROTO_HDR_L2TPV3:
1121 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1122 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
1123 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1124 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
1125
1126 if (hdr->field_selector)
1127 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
1128 break;
1129 case VIRTCHNL_PROTO_HDR_ESP:
1130 esph = (struct ip_esp_hdr *)hdr->buffer;
1131 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1132 l4 == VIRTCHNL_PROTO_HDR_UDP)
1133 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
1134 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1135 l4 == VIRTCHNL_PROTO_HDR_UDP)
1136 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
1137 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1138 l4 == VIRTCHNL_PROTO_HDR_NONE)
1139 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
1140 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1141 l4 == VIRTCHNL_PROTO_HDR_NONE)
1142 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
1143
1144 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
1145 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
1146 else
1147 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
1148
1149 if (hdr->field_selector) {
1150 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1151 input->ip.v4.sec_parm_idx = esph->spi;
1152 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1153 input->ip.v6.sec_parm_idx = esph->spi;
1154 }
1155 break;
1156 case VIRTCHNL_PROTO_HDR_AH:
1157 ah = (struct ip_auth_hdr *)hdr->buffer;
1158 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1159 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
1160 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1161 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
1162
1163 if (hdr->field_selector) {
1164 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1165 input->ip.v4.sec_parm_idx = ah->spi;
1166 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1167 input->ip.v6.sec_parm_idx = ah->spi;
1168 }
1169 break;
1170 case VIRTCHNL_PROTO_HDR_PFCP:
1171 rawh = (u8 *)hdr->buffer;
1172 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
1173 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
1174 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
1175 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
1176 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
1177 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
1178 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
1179 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
1180 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
1181
1182 if (hdr->field_selector) {
1183 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1184 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1185 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1186 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1187 }
1188 break;
1189 case VIRTCHNL_PROTO_HDR_GTPU_IP:
1190 rawh = (u8 *)hdr->buffer;
1191 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1192
1193 if (hdr->field_selector)
1194 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1195 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1196 break;
1197 case VIRTCHNL_PROTO_HDR_GTPU_EH:
1198 rawh = (u8 *)hdr->buffer;
1199
1200 if (hdr->field_selector)
1201 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1202 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1203 break;
1204 default:
1205 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1206 hdr->type, vf->vf_id);
1207 return -EINVAL;
1208 }
1209 }
1210
1211 return 0;
1212}
1213
1214/**
1215 * ice_vc_fdir_parse_action
1216 * @vf: pointer to the VF info
1217 * @fltr: virtual channel add cmd buffer
1218 * @conf: FDIR configuration for each filter
1219 *
1220 * Parse the virtual channel filter's action and store them into conf
1221 *
1222 * Return: 0 on success, and other on error.
1223 */
1224static int
1225ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1226 struct virtchnl_fdir_fltr_conf *conf)
1227{
1228 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1229 struct device *dev = ice_pf_to_dev(vf->pf);
1230 struct ice_fdir_fltr *input = &conf->input;
1231 u32 dest_num = 0;
1232 u32 mark_num = 0;
1233 int i;
1234
1235 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1236 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1237 as->count, vf->vf_id);
1238 return -EINVAL;
1239 }
1240
1241 for (i = 0; i < as->count; i++) {
1242 struct virtchnl_filter_action *action = &as->actions[i];
1243
1244 switch (action->type) {
1245 case VIRTCHNL_ACTION_PASSTHRU:
1246 dest_num++;
1247 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1248 break;
1249 case VIRTCHNL_ACTION_DROP:
1250 dest_num++;
1251 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1252 break;
1253 case VIRTCHNL_ACTION_QUEUE:
1254 dest_num++;
1255 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1256 input->q_index = action->act_conf.queue.index;
1257 break;
1258 case VIRTCHNL_ACTION_Q_REGION:
1259 dest_num++;
1260 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1261 input->q_index = action->act_conf.queue.index;
1262 input->q_region = action->act_conf.queue.region;
1263 break;
1264 case VIRTCHNL_ACTION_MARK:
1265 mark_num++;
1266 input->fltr_id = action->act_conf.mark_id;
1267 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1268 break;
1269 default:
1270 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1271 action->type, vf->vf_id);
1272 return -EINVAL;
1273 }
1274 }
1275
1276 if (dest_num == 0 || dest_num >= 2) {
1277 dev_dbg(dev, "Invalid destination action for VF %d\n",
1278 vf->vf_id);
1279 return -EINVAL;
1280 }
1281
1282 if (mark_num >= 2) {
1283 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1284 return -EINVAL;
1285 }
1286
1287 return 0;
1288}
1289
1290/**
1291 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1292 * @vf: pointer to the VF info
1293 * @fltr: virtual channel add cmd buffer
1294 * @conf: FDIR configuration for each filter
1295 *
1296 * Return: 0 on success, and other on error.
1297 */
1298static int
1299ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1300 struct virtchnl_fdir_fltr_conf *conf)
1301{
1302 int ret;
1303
1304 ret = ice_vc_fdir_search_pattern(vf, fltr);
1305 if (ret)
1306 return ret;
1307
1308 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1309 if (ret)
1310 return ret;
1311
1312 return ice_vc_fdir_parse_action(vf, fltr, conf);
1313}
1314
1315/**
1316 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1317 * @conf_a: FDIR configuration for filter a
1318 * @conf_b: FDIR configuration for filter b
1319 *
1320 * Return: 0 on success, and other on error.
1321 */
1322static bool
1323ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1324 struct virtchnl_fdir_fltr_conf *conf_b)
1325{
1326 struct ice_fdir_fltr *a = &conf_a->input;
1327 struct ice_fdir_fltr *b = &conf_b->input;
1328
1329 if (conf_a->ttype != conf_b->ttype)
1330 return false;
1331 if (a->flow_type != b->flow_type)
1332 return false;
1333 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1334 return false;
1335 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1336 return false;
1337 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1338 return false;
1339 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1340 return false;
1341 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1342 return false;
1343 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1344 return false;
1345 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1346 return false;
1347 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1348 return false;
1349
1350 return true;
1351}
1352
1353/**
1354 * ice_vc_fdir_is_dup_fltr
1355 * @vf: pointer to the VF info
1356 * @conf: FDIR configuration for each filter
1357 *
1358 * Check if there is duplicated rule with same conf value
1359 *
1360 * Return: 0 true success, and false on error.
1361 */
1362static bool
1363ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1364{
1365 struct ice_fdir_fltr *desc;
1366 bool ret;
1367
1368 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1369 struct virtchnl_fdir_fltr_conf *node =
1370 to_fltr_conf_from_desc(desc);
1371
1372 ret = ice_vc_fdir_comp_rules(node, conf);
1373 if (ret)
1374 return true;
1375 }
1376
1377 return false;
1378}
1379
1380/**
1381 * ice_vc_fdir_insert_entry
1382 * @vf: pointer to the VF info
1383 * @conf: FDIR configuration for each filter
1384 * @id: pointer to ID value allocated by driver
1385 *
1386 * Insert FDIR conf entry into list and allocate ID for this filter
1387 *
1388 * Return: 0 true success, and other on error.
1389 */
1390static int
1391ice_vc_fdir_insert_entry(struct ice_vf *vf,
1392 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1393{
1394 struct ice_fdir_fltr *input = &conf->input;
1395 int i;
1396
1397 /* alloc ID corresponding with conf */
1398 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1399 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1400 if (i < 0)
1401 return -EINVAL;
1402 *id = i;
1403
1404 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1405 return 0;
1406}
1407
1408/**
1409 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1410 * @vf: pointer to the VF info
1411 * @conf: FDIR configuration for each filter
1412 * @id: filter rule's ID
1413 */
1414static void
1415ice_vc_fdir_remove_entry(struct ice_vf *vf,
1416 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1417{
1418 struct ice_fdir_fltr *input = &conf->input;
1419
1420 idr_remove(&vf->fdir.fdir_rule_idr, id);
1421 list_del(&input->fltr_node);
1422}
1423
1424/**
1425 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1426 * @vf: pointer to the VF info
1427 * @id: filter rule's ID
1428 *
1429 * Return: NULL on error, and other on success.
1430 */
1431static struct virtchnl_fdir_fltr_conf *
1432ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1433{
1434 return idr_find(&vf->fdir.fdir_rule_idr, id);
1435}
1436
1437/**
1438 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1439 * @vf: pointer to the VF info
1440 */
1441static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1442{
1443 struct virtchnl_fdir_fltr_conf *conf;
1444 struct ice_fdir_fltr *desc, *temp;
1445
1446 list_for_each_entry_safe(desc, temp,
1447 &vf->fdir.fdir_rule_list, fltr_node) {
1448 conf = to_fltr_conf_from_desc(desc);
1449 list_del(&desc->fltr_node);
1450 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1451 }
1452}
1453
1454/**
1455 * ice_vc_fdir_write_fltr - write filter rule into hardware
1456 * @vf: pointer to the VF info
1457 * @conf: FDIR configuration for each filter
1458 * @add: true implies add rule, false implies del rules
1459 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1460 *
1461 * Return: 0 on success, and other on error.
1462 */
1463static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1464 struct virtchnl_fdir_fltr_conf *conf,
1465 bool add, bool is_tun)
1466{
1467 struct ice_fdir_fltr *input = &conf->input;
1468 struct ice_vsi *vsi, *ctrl_vsi;
1469 struct ice_fltr_desc desc;
1470 enum ice_status status;
1471 struct device *dev;
1472 struct ice_pf *pf;
1473 struct ice_hw *hw;
1474 int ret;
1475 u8 *pkt;
1476
1477 pf = vf->pf;
1478 dev = ice_pf_to_dev(pf);
1479 hw = &pf->hw;
1480 vsi = pf->vsi[vf->lan_vsi_idx];
1481 if (!vsi) {
1482 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1483 return -EINVAL;
1484 }
1485
1486 input->dest_vsi = vsi->idx;
1487 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1488
1489 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1490 if (!ctrl_vsi) {
1491 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1492 return -EINVAL;
1493 }
1494
1495 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1496 if (!pkt)
1497 return -ENOMEM;
1498
1499 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1500 status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1501 ret = ice_status_to_errno(status);
1502 if (ret) {
1503 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1504 vf->vf_id, input->flow_type);
1505 goto err_free_pkt;
1506 }
1507
1508 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1509 if (ret)
1510 goto err_free_pkt;
1511
1512 return 0;
1513
1514err_free_pkt:
1515 devm_kfree(dev, pkt);
1516 return ret;
1517}
1518
1519/**
1520 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1521 * @t: pointer to timer_list
1522 */
1523static void ice_vf_fdir_timer(struct timer_list *t)
1524{
1525 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1526 struct ice_vf_fdir_ctx *ctx_done;
1527 struct ice_vf_fdir *fdir;
1528 unsigned long flags;
1529 struct ice_vf *vf;
1530 struct ice_pf *pf;
1531
1532 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1533 vf = container_of(fdir, struct ice_vf, fdir);
1534 ctx_done = &fdir->ctx_done;
1535 pf = vf->pf;
1536 spin_lock_irqsave(&fdir->ctx_lock, flags);
1537 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1538 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1539 WARN_ON_ONCE(1);
1540 return;
1541 }
1542
1543 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1544
1545 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1546 ctx_done->conf = ctx_irq->conf;
1547 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1548 ctx_done->v_opcode = ctx_irq->v_opcode;
1549 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1550
1551 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1552 ice_service_task_schedule(pf);
1553}
1554
1555/**
1556 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1557 * @ctrl_vsi: pointer to a VF's CTRL VSI
1558 * @rx_desc: pointer to FDIR Rx queue descriptor
1559 */
1560void
1561ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1562 union ice_32b_rx_flex_desc *rx_desc)
1563{
1564 struct ice_pf *pf = ctrl_vsi->back;
1565 struct ice_vf_fdir_ctx *ctx_done;
1566 struct ice_vf_fdir_ctx *ctx_irq;
1567 struct ice_vf_fdir *fdir;
1568 unsigned long flags;
1569 struct device *dev;
1570 struct ice_vf *vf;
1571 int ret;
1572
1573 vf = &pf->vf[ctrl_vsi->vf_id];
1574
1575 fdir = &vf->fdir;
1576 ctx_done = &fdir->ctx_done;
1577 ctx_irq = &fdir->ctx_irq;
1578 dev = ice_pf_to_dev(pf);
1579 spin_lock_irqsave(&fdir->ctx_lock, flags);
1580 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1581 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1582 WARN_ON_ONCE(1);
1583 return;
1584 }
1585
1586 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1587
1588 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1589 ctx_done->conf = ctx_irq->conf;
1590 ctx_done->stat = ICE_FDIR_CTX_IRQ;
1591 ctx_done->v_opcode = ctx_irq->v_opcode;
1592 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1593 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1594
1595 ret = del_timer(&ctx_irq->rx_tmr);
1596 if (!ret)
1597 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1598
1599 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1600 ice_service_task_schedule(pf);
1601}
1602
1603/**
1604 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1605 * @vf: pointer to the VF info
1606 */
1607static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1608{
1609 struct ice_vsi *vf_vsi;
1610 u32 fd_size, fd_cnt;
1611 struct device *dev;
1612 struct ice_pf *pf;
1613 struct ice_hw *hw;
1614 u16 vsi_num;
1615
1616 pf = vf->pf;
1617 hw = &pf->hw;
1618 dev = ice_pf_to_dev(pf);
1619 vf_vsi = pf->vsi[vf->lan_vsi_idx];
1620 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1621
1622 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1623 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1624 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
1625 vf->vf_id,
1626 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1627 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
1628 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1629 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
1630}
1631
1632/**
1633 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1634 * @vf: pointer to the VF info
1635 * @ctx: FDIR context info for post processing
1636 * @status: virtchnl FDIR program status
1637 *
1638 * Return: 0 on success, and other on error.
1639 */
1640static int
1641ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1642 enum virtchnl_fdir_prgm_status *status)
1643{
1644 struct device *dev = ice_pf_to_dev(vf->pf);
1645 u32 stat_err, error, prog_id;
1646 int ret;
1647
1648 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1649 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
1650 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
1651 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1652 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1653 ret = -EINVAL;
1654 goto err_exit;
1655 }
1656
1657 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
1658 ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
1659 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1660 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1661 dev_err(dev, "VF %d: Desc show add, but ctx not",
1662 vf->vf_id);
1663 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1664 ret = -EINVAL;
1665 goto err_exit;
1666 }
1667
1668 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1669 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1670 dev_err(dev, "VF %d: Desc show del, but ctx not",
1671 vf->vf_id);
1672 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1673 ret = -EINVAL;
1674 goto err_exit;
1675 }
1676
1677 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
1678 ICE_FXD_FLTR_WB_QW1_FAIL_S;
1679 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1680 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1681 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1682 vf->vf_id);
1683 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1684 } else {
1685 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1686 vf->vf_id);
1687 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1688 }
1689 ret = -EINVAL;
1690 goto err_exit;
1691 }
1692
1693 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
1694 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
1695 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1696 dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1697 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1698 ret = -EINVAL;
1699 goto err_exit;
1700 }
1701
1702 *status = VIRTCHNL_FDIR_SUCCESS;
1703
1704 return 0;
1705
1706err_exit:
1707 ice_vf_fdir_dump_info(vf);
1708 return ret;
1709}
1710
1711/**
1712 * ice_vc_add_fdir_fltr_post
1713 * @vf: pointer to the VF structure
1714 * @ctx: FDIR context info for post processing
1715 * @status: virtchnl FDIR program status
1716 * @success: true implies success, false implies failure
1717 *
1718 * Post process for flow director add command. If success, then do post process
1719 * and send back success msg by virtchnl. Otherwise, do context reversion and
1720 * send back failure msg by virtchnl.
1721 *
1722 * Return: 0 on success, and other on error.
1723 */
1724static int
1725ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1726 enum virtchnl_fdir_prgm_status status,
1727 bool success)
1728{
1729 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1730 struct device *dev = ice_pf_to_dev(vf->pf);
1731 enum virtchnl_status_code v_ret;
1732 struct virtchnl_fdir_add *resp;
1733 int ret, len, is_tun;
1734
1735 v_ret = VIRTCHNL_STATUS_SUCCESS;
1736 len = sizeof(*resp);
1737 resp = kzalloc(len, GFP_KERNEL);
1738 if (!resp) {
1739 len = 0;
1740 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1741 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1742 goto err_exit;
1743 }
1744
1745 if (!success)
1746 goto err_exit;
1747
1748 is_tun = 0;
1749 resp->status = status;
1750 resp->flow_id = conf->flow_id;
1751 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1752
1753 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1754 (u8 *)resp, len);
1755 kfree(resp);
1756
1757 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1758 vf->vf_id, conf->flow_id,
1759 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1760 "add" : "del");
1761 return ret;
1762
1763err_exit:
1764 if (resp)
1765 resp->status = status;
1766 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1767 devm_kfree(dev, conf);
1768
1769 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1770 (u8 *)resp, len);
1771 kfree(resp);
1772 return ret;
1773}
1774
1775/**
1776 * ice_vc_del_fdir_fltr_post
1777 * @vf: pointer to the VF structure
1778 * @ctx: FDIR context info for post processing
1779 * @status: virtchnl FDIR program status
1780 * @success: true implies success, false implies failure
1781 *
1782 * Post process for flow director del command. If success, then do post process
1783 * and send back success msg by virtchnl. Otherwise, do context reversion and
1784 * send back failure msg by virtchnl.
1785 *
1786 * Return: 0 on success, and other on error.
1787 */
1788static int
1789ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1790 enum virtchnl_fdir_prgm_status status,
1791 bool success)
1792{
1793 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1794 struct device *dev = ice_pf_to_dev(vf->pf);
1795 enum virtchnl_status_code v_ret;
1796 struct virtchnl_fdir_del *resp;
1797 int ret, len, is_tun;
1798
1799 v_ret = VIRTCHNL_STATUS_SUCCESS;
1800 len = sizeof(*resp);
1801 resp = kzalloc(len, GFP_KERNEL);
1802 if (!resp) {
1803 len = 0;
1804 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1805 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1806 goto err_exit;
1807 }
1808
1809 if (!success)
1810 goto err_exit;
1811
1812 is_tun = 0;
1813 resp->status = status;
1814 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1815 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1816
1817 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1818 (u8 *)resp, len);
1819 kfree(resp);
1820
1821 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1822 vf->vf_id, conf->flow_id,
1823 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1824 "add" : "del");
1825 devm_kfree(dev, conf);
1826 return ret;
1827
1828err_exit:
1829 if (resp)
1830 resp->status = status;
1831 if (success)
1832 devm_kfree(dev, conf);
1833
1834 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1835 (u8 *)resp, len);
1836 kfree(resp);
1837 return ret;
1838}
1839
1840/**
1841 * ice_flush_fdir_ctx
1842 * @pf: pointer to the PF structure
1843 *
1844 * Flush all the pending event on ctx_done list and process them.
1845 */
1846void ice_flush_fdir_ctx(struct ice_pf *pf)
1847{
1848 int i;
1849
1850 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1851 return;
1852
1853 ice_for_each_vf(pf, i) {
1854 struct device *dev = ice_pf_to_dev(pf);
1855 enum virtchnl_fdir_prgm_status status;
1856 struct ice_vf *vf = &pf->vf[i];
1857 struct ice_vf_fdir_ctx *ctx;
1858 unsigned long flags;
1859 int ret;
1860
1861 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1862 continue;
1863
1864 if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1865 continue;
1866
1867 ctx = &vf->fdir.ctx_done;
1868 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1869 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1870 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1871 continue;
1872 }
1873 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1874
1875 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1876 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1877 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1878 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1879 vf->vf_id);
1880 goto err_exit;
1881 }
1882
1883 ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1884 if (ret)
1885 goto err_exit;
1886
1887 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1888 ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1889 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1890 ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1891 else
1892 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1893
1894 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1895 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1896 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1897 continue;
1898err_exit:
1899 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1900 ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1901 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1902 ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1903 else
1904 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1905
1906 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1907 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1908 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1909 }
1910}
1911
1912/**
1913 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1914 * @vf: pointer to the VF structure
1915 * @conf: FDIR configuration for each filter
1916 * @v_opcode: virtual channel operation code
1917 *
1918 * Return: 0 on success, and other on error.
1919 */
1920static int
1921ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1922 enum virtchnl_ops v_opcode)
1923{
1924 struct device *dev = ice_pf_to_dev(vf->pf);
1925 struct ice_vf_fdir_ctx *ctx;
1926 unsigned long flags;
1927
1928 ctx = &vf->fdir.ctx_irq;
1929 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1930 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1931 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1932 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1933 dev_dbg(dev, "VF %d: Last request is still in progress\n",
1934 vf->vf_id);
1935 return -EBUSY;
1936 }
1937 ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1938 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1939
1940 ctx->conf = conf;
1941 ctx->v_opcode = v_opcode;
1942 ctx->stat = ICE_FDIR_CTX_READY;
1943 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1944
1945 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1946
1947 return 0;
1948}
1949
1950/**
1951 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1952 * @vf: pointer to the VF structure
1953 *
1954 * Return: 0 on success, and other on error.
1955 */
1956static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1957{
1958 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1959 unsigned long flags;
1960
1961 del_timer(&ctx->rx_tmr);
1962 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1963 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1964 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1965}
1966
1967/**
1968 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1969 * @vf: pointer to the VF info
1970 * @msg: pointer to the msg buffer
1971 *
1972 * Return: 0 on success, and other on error.
1973 */
1974int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1975{
1976 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1977 struct virtchnl_fdir_add *stat = NULL;
1978 struct virtchnl_fdir_fltr_conf *conf;
1979 enum virtchnl_status_code v_ret;
1980 struct device *dev;
1981 struct ice_pf *pf;
1982 int is_tun = 0;
1983 int len = 0;
1984 int ret;
1985
1986 pf = vf->pf;
1987 dev = ice_pf_to_dev(pf);
1988 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1989 if (ret) {
1990 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1991 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1992 goto err_exit;
1993 }
1994
1995 ret = ice_vf_start_ctrl_vsi(vf);
1996 if (ret && (ret != -EEXIST)) {
1997 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1998 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1999 vf->vf_id, ret);
2000 goto err_exit;
2001 }
2002
2003 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2004 if (!stat) {
2005 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2006 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2007 goto err_exit;
2008 }
2009
2010 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
2011 if (!conf) {
2012 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2013 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
2014 goto err_exit;
2015 }
2016
2017 len = sizeof(*stat);
2018 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
2019 if (ret) {
2020 v_ret = VIRTCHNL_STATUS_SUCCESS;
2021 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
2022 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
2023 goto err_free_conf;
2024 }
2025
2026 if (fltr->validate_only) {
2027 v_ret = VIRTCHNL_STATUS_SUCCESS;
2028 stat->status = VIRTCHNL_FDIR_SUCCESS;
2029 devm_kfree(dev, conf);
2030 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
2031 v_ret, (u8 *)stat, len);
2032 goto exit;
2033 }
2034
2035 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
2036 if (ret) {
2037 v_ret = VIRTCHNL_STATUS_SUCCESS;
2038 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
2039 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
2040 vf->vf_id, ret);
2041 goto err_free_conf;
2042 }
2043
2044 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
2045 if (ret) {
2046 v_ret = VIRTCHNL_STATUS_SUCCESS;
2047 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
2048 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
2049 vf->vf_id);
2050 goto err_free_conf;
2051 }
2052
2053 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2054 if (ret) {
2055 v_ret = VIRTCHNL_STATUS_SUCCESS;
2056 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2057 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
2058 goto err_free_conf;
2059 }
2060
2061 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
2062 if (ret) {
2063 v_ret = VIRTCHNL_STATUS_SUCCESS;
2064 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2065 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2066 goto err_free_conf;
2067 }
2068
2069 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
2070 if (ret) {
2071 v_ret = VIRTCHNL_STATUS_SUCCESS;
2072 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2073 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2074 vf->vf_id, ret);
2075 goto err_rem_entry;
2076 }
2077
2078exit:
2079 kfree(stat);
2080 return ret;
2081
2082err_rem_entry:
2083 ice_vc_fdir_clear_irq_ctx(vf);
2084 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2085err_free_conf:
2086 devm_kfree(dev, conf);
2087err_exit:
2088 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
2089 (u8 *)stat, len);
2090 kfree(stat);
2091 return ret;
2092}
2093
2094/**
2095 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
2096 * @vf: pointer to the VF info
2097 * @msg: pointer to the msg buffer
2098 *
2099 * Return: 0 on success, and other on error.
2100 */
2101int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
2102{
2103 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
2104 struct virtchnl_fdir_del *stat = NULL;
2105 struct virtchnl_fdir_fltr_conf *conf;
2106 enum virtchnl_status_code v_ret;
2107 struct device *dev;
2108 struct ice_pf *pf;
2109 int is_tun = 0;
2110 int len = 0;
2111 int ret;
2112
2113 pf = vf->pf;
2114 dev = ice_pf_to_dev(pf);
2115 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2116 if (ret) {
2117 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2118 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2119 goto err_exit;
2120 }
2121
2122 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2123 if (!stat) {
2124 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2125 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2126 goto err_exit;
2127 }
2128
2129 len = sizeof(*stat);
2130
2131 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
2132 if (!conf) {
2133 v_ret = VIRTCHNL_STATUS_SUCCESS;
2134 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
2135 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
2136 vf->vf_id, fltr->flow_id);
2137 goto err_exit;
2138 }
2139
2140 /* Just return failure when ctrl_vsi idx is invalid */
2141 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
2142 v_ret = VIRTCHNL_STATUS_SUCCESS;
2143 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2144 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
2145 goto err_exit;
2146 }
2147
2148 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
2149 if (ret) {
2150 v_ret = VIRTCHNL_STATUS_SUCCESS;
2151 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2152 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2153 goto err_exit;
2154 }
2155
2156 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
2157 if (ret) {
2158 v_ret = VIRTCHNL_STATUS_SUCCESS;
2159 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2160 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2161 vf->vf_id, ret);
2162 goto err_del_tmr;
2163 }
2164
2165 kfree(stat);
2166
2167 return ret;
2168
2169err_del_tmr:
2170 ice_vc_fdir_clear_irq_ctx(vf);
2171err_exit:
2172 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2173 (u8 *)stat, len);
2174 kfree(stat);
2175 return ret;
2176}
2177
2178/**
2179 * ice_vf_fdir_init - init FDIR resource for VF
2180 * @vf: pointer to the VF info
2181 */
2182void ice_vf_fdir_init(struct ice_vf *vf)
2183{
2184 struct ice_vf_fdir *fdir = &vf->fdir;
2185
2186 idr_init(&fdir->fdir_rule_idr);
2187 INIT_LIST_HEAD(&fdir->fdir_rule_list);
2188
2189 spin_lock_init(&fdir->ctx_lock);
2190 fdir->ctx_irq.flags = 0;
2191 fdir->ctx_done.flags = 0;
2192}
2193
2194/**
2195 * ice_vf_fdir_exit - destroy FDIR resource for VF
2196 * @vf: pointer to the VF info
2197 */
2198void ice_vf_fdir_exit(struct ice_vf *vf)
2199{
2200 ice_vc_fdir_flush_entry(vf);
2201 idr_destroy(&vf->fdir.fdir_rule_idr);
2202 ice_vc_fdir_rem_prof_all(vf);
2203 ice_vc_fdir_free_prof_all(vf);
2204}