Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_FLOW_H_
5#define _ICE_FLOW_H_
6
7#include "ice_flex_type.h"
8#include "ice_parser.h"
9
10#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
11#define ICE_FLOW_FLD_OFF_INVAL 0xffff
12
13/* Generate flow hash field from flow field type(s) */
14#define ICE_FLOW_HASH_ETH \
15 (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
16 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
17#define ICE_FLOW_HASH_IPV4 \
18 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
19 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
20#define ICE_FLOW_HASH_IPV6 \
21 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
22 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
23#define ICE_FLOW_HASH_TCP_PORT \
24 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
25 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
26#define ICE_FLOW_HASH_UDP_PORT \
27 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
28 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
29#define ICE_FLOW_HASH_SCTP_PORT \
30 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
31 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
32
33#define ICE_HASH_INVALID 0
34#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
35#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
36#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
37#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
38#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
39#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
40
41#define ICE_FLOW_HASH_GTP_C_TEID \
42 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
43
44#define ICE_FLOW_HASH_GTP_C_IPV4_TEID \
45 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID)
46#define ICE_FLOW_HASH_GTP_C_IPV6_TEID \
47 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID)
48
49#define ICE_FLOW_HASH_GTP_U_TEID \
50 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
51
52#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
53 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
54#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
55 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
56
57#define ICE_FLOW_HASH_GTP_U_EH_TEID \
58 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
59
60#define ICE_FLOW_HASH_GTP_U_EH_QFI \
61 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
62
63#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
64 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
65 ICE_FLOW_HASH_GTP_U_EH_QFI)
66#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
67 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
68 ICE_FLOW_HASH_GTP_U_EH_QFI)
69
70#define ICE_FLOW_HASH_GTP_U_UP \
71 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
72#define ICE_FLOW_HASH_GTP_U_DWN \
73 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
74
75#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
76 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP)
77#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
78 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP)
79#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
80 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN)
81#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
82 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN)
83
84#define ICE_FLOW_HASH_PPPOE_SESS_ID \
85 (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
86
87#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
88 (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
89#define ICE_FLOW_HASH_PPPOE_TCP_ID \
90 (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
91#define ICE_FLOW_HASH_PPPOE_UDP_ID \
92 (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
93
94#define ICE_FLOW_HASH_PFCP_SEID \
95 (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
96#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
97 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
98#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
99 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
100
101#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
102 (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
103#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
104 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
105#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
106 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
107
108#define ICE_FLOW_HASH_ESP_SPI \
109 (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
110#define ICE_FLOW_HASH_ESP_IPV4_SPI \
111 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
112#define ICE_FLOW_HASH_ESP_IPV6_SPI \
113 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
114
115#define ICE_FLOW_HASH_AH_SPI \
116 (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
117#define ICE_FLOW_HASH_AH_IPV4_SPI \
118 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
119#define ICE_FLOW_HASH_AH_IPV6_SPI \
120 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
121
122#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
123 (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
124#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
125 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
126#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
127 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
128
129/* Protocol header fields within a packet segment. A segment consists of one or
130 * more protocol headers that make up a logical group of protocol headers. Each
131 * logical group of protocol headers encapsulates or is encapsulated using/by
132 * tunneling or encapsulation protocols for network virtualization such as GRE,
133 * VxLAN, etc.
134 */
135enum ice_flow_seg_hdr {
136 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
137 ICE_FLOW_SEG_HDR_ETH = 0x00000001,
138 ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
139 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
140 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
141 ICE_FLOW_SEG_HDR_ARP = 0x00000010,
142 ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
143 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
144 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
145 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
146 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
147 ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
148 ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
149 ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
150 ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
151 ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
152 ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
153 ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
154 ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
155 ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
156 ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
157 ICE_FLOW_SEG_HDR_ESP = 0x00100000,
158 ICE_FLOW_SEG_HDR_AH = 0x00200000,
159 ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
160 ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
161 /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
162 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
163 */
164 ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
165};
166
167/* These segments all have the same PTYPES, but are otherwise distinguished by
168 * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
169 *
170 * gtp_eh_pdu gtp_eh_pdu_link
171 * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
172 * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
173 * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
174 * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
175 */
176#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
177 ICE_FLOW_SEG_HDR_GTPU_EH | \
178 ICE_FLOW_SEG_HDR_GTPU_DWN | \
179 ICE_FLOW_SEG_HDR_GTPU_UP)
180#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
181 ICE_FLOW_SEG_HDR_PFCP_SESSION)
182
183enum ice_flow_field {
184 /* L2 */
185 ICE_FLOW_FIELD_IDX_ETH_DA,
186 ICE_FLOW_FIELD_IDX_ETH_SA,
187 ICE_FLOW_FIELD_IDX_S_VLAN,
188 ICE_FLOW_FIELD_IDX_C_VLAN,
189 ICE_FLOW_FIELD_IDX_ETH_TYPE,
190 /* L3 */
191 ICE_FLOW_FIELD_IDX_IPV4_DSCP,
192 ICE_FLOW_FIELD_IDX_IPV6_DSCP,
193 ICE_FLOW_FIELD_IDX_IPV4_TTL,
194 ICE_FLOW_FIELD_IDX_IPV4_PROT,
195 ICE_FLOW_FIELD_IDX_IPV6_TTL,
196 ICE_FLOW_FIELD_IDX_IPV6_PROT,
197 ICE_FLOW_FIELD_IDX_IPV4_SA,
198 ICE_FLOW_FIELD_IDX_IPV4_DA,
199 ICE_FLOW_FIELD_IDX_IPV6_SA,
200 ICE_FLOW_FIELD_IDX_IPV6_DA,
201 /* L4 */
202 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
203 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
204 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
205 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
206 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
207 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
208 ICE_FLOW_FIELD_IDX_TCP_FLAGS,
209 /* ARP */
210 ICE_FLOW_FIELD_IDX_ARP_SIP,
211 ICE_FLOW_FIELD_IDX_ARP_DIP,
212 ICE_FLOW_FIELD_IDX_ARP_SHA,
213 ICE_FLOW_FIELD_IDX_ARP_DHA,
214 ICE_FLOW_FIELD_IDX_ARP_OP,
215 /* ICMP */
216 ICE_FLOW_FIELD_IDX_ICMP_TYPE,
217 ICE_FLOW_FIELD_IDX_ICMP_CODE,
218 /* GRE */
219 ICE_FLOW_FIELD_IDX_GRE_KEYID,
220 /* GTPC_TEID */
221 ICE_FLOW_FIELD_IDX_GTPC_TEID,
222 /* GTPU_IP */
223 ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
224 /* GTPU_EH */
225 ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
226 ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
227 /* GTPU_UP */
228 ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
229 /* GTPU_DWN */
230 ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
231 /* PPPoE */
232 ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
233 /* PFCP */
234 ICE_FLOW_FIELD_IDX_PFCP_SEID,
235 /* L2TPv3 */
236 ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
237 /* ESP */
238 ICE_FLOW_FIELD_IDX_ESP_SPI,
239 /* AH */
240 ICE_FLOW_FIELD_IDX_AH_SPI,
241 /* NAT_T ESP */
242 ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
243 /* The total number of enums must not exceed 64 */
244 ICE_FLOW_FIELD_IDX_MAX
245};
246
247#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
248#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
249#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
250#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
251#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
252#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
253#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
254#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
255#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
256 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
257#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
258 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
259
260#define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
261#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
262#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
263#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
264#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
265 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
266
267/* Flow headers and fields for AVF support */
268enum ice_flow_avf_hdr_field {
269 /* Values 0 - 28 are reserved for future use */
270 ICE_AVF_FLOW_FIELD_INVALID = 0,
271 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
272 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
273 ICE_AVF_FLOW_FIELD_IPV4_UDP,
274 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
275 ICE_AVF_FLOW_FIELD_IPV4_TCP,
276 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
277 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
278 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
279 /* Values 37-38 are reserved */
280 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
281 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
282 ICE_AVF_FLOW_FIELD_IPV6_UDP,
283 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
284 ICE_AVF_FLOW_FIELD_IPV6_TCP,
285 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
286 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
287 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
288 ICE_AVF_FLOW_FIELD_RSVD47,
289 ICE_AVF_FLOW_FIELD_FCOE_OX,
290 ICE_AVF_FLOW_FIELD_FCOE_RX,
291 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
292 /* Values 51-62 are reserved */
293 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
294 ICE_AVF_FLOW_FIELD_MAX
295};
296
297/* Supported RSS offloads This macro is defined to support
298 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
299 * capabilities to the caller of this ops.
300 */
301#define ICE_DEFAULT_RSS_HENA ( \
302 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
303 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
304 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
305 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
306 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
307 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
308 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
309 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
310 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
311 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
312 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
313 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
314 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
315 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
316 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
317 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
318
319enum ice_rss_cfg_hdr_type {
320 ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
321 ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
322 /* take inner headers as inputset for packet with outer ipv4. */
323 ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
324 /* take inner headers as inputset for packet with outer ipv6. */
325 ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
326 /* take outer headers first then inner headers as inputset */
327 ICE_RSS_ANY_HEADERS
328};
329
330struct ice_vsi;
331struct ice_rss_hash_cfg {
332 u32 addl_hdrs; /* protocol header fields */
333 u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
334 enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
335 bool symm; /* symmetric or asymmetric hash */
336};
337
338enum ice_flow_dir {
339 ICE_FLOW_RX = 0x02,
340};
341
342enum ice_flow_priority {
343 ICE_FLOW_PRIO_LOW,
344 ICE_FLOW_PRIO_NORMAL,
345 ICE_FLOW_PRIO_HIGH
346};
347
348#define ICE_FLOW_SEG_SINGLE 1
349#define ICE_FLOW_SEG_MAX 2
350#define ICE_FLOW_SEG_RAW_FLD_MAX 2
351#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
352#define ICE_FLOW_FV_EXTRACT_SZ 2
353
354#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
355
356struct ice_flow_seg_xtrct {
357 u8 prot_id; /* Protocol ID of extracted header field */
358 u16 off; /* Starting offset of the field in header in bytes */
359 u8 idx; /* Index of FV entry used */
360 u8 disp; /* Displacement of field in bits fr. FV entry's start */
361 u16 mask; /* Mask for field */
362};
363
364enum ice_flow_fld_match_type {
365 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
366 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
367 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
368 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
369};
370
371struct ice_flow_fld_loc {
372 /* Describe offsets of field information relative to the beginning of
373 * input buffer provided when adding flow entries.
374 */
375 u16 val; /* Offset where the value is located */
376 u16 mask; /* Offset where the mask/prefix value is located */
377 u16 last; /* Length or offset where the upper value is located */
378};
379
380struct ice_flow_fld_info {
381 enum ice_flow_fld_match_type type;
382 /* Location where to retrieve data from an input buffer */
383 struct ice_flow_fld_loc src;
384 /* Location where to put the data into the final entry buffer */
385 struct ice_flow_fld_loc entry;
386 struct ice_flow_seg_xtrct xtrct;
387};
388
389struct ice_flow_seg_fld_raw {
390 struct ice_flow_fld_info info;
391 u16 off; /* Offset from the start of the segment */
392};
393
394struct ice_flow_seg_info {
395 u32 hdrs; /* Bitmask indicating protocol headers present */
396 u64 match; /* Bitmask indicating header fields to be matched */
397 u64 range; /* Bitmask indicating header fields matched as ranges */
398
399 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
400
401 u8 raws_cnt; /* Number of raw fields to be matched */
402 struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
403};
404
405/* This structure describes a flow entry, and is tracked only in this file */
406struct ice_flow_entry {
407 struct list_head l_entry;
408
409 u64 id;
410 struct ice_flow_prof *prof;
411 enum ice_flow_priority priority;
412 u16 vsi_handle;
413};
414
415#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
416#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
417
418struct ice_flow_prof {
419 struct list_head l_entry;
420
421 u64 id;
422 enum ice_flow_dir dir;
423 u8 segs_cnt;
424
425 /* Keep track of flow entries associated with this flow profile */
426 struct mutex entries_lock;
427 struct list_head entries;
428
429 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
430
431 /* software VSI handles referenced by this flow profile */
432 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
433
434 bool symm; /* Symmetric Hash for RSS */
435};
436
437struct ice_rss_cfg {
438 struct list_head l_entry;
439 /* bitmap of VSIs added to the RSS entry */
440 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
441 struct ice_rss_hash_cfg hash;
442};
443
444int
445ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
446 struct ice_flow_seg_info *segs, u8 segs_cnt,
447 bool symm, struct ice_flow_prof **prof);
448int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
449int
450ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
451 struct ice_parser_profile *prof, enum ice_block blk);
452int
453ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
454 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
455 void *data, u64 *entry_h);
456int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
457void
458ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
459 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
460void
461ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
462 u16 val_loc, u16 mask_loc);
463int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
464void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
465int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
466int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
467int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
468 u64 hashed_flds);
469int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
470int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
471 const struct ice_rss_hash_cfg *cfg);
472int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
473 const struct ice_rss_hash_cfg *cfg);
474u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
475#endif /* _ICE_FLOW_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_FLOW_H_
5#define _ICE_FLOW_H_
6
7#include "ice_flex_type.h"
8
9#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
10#define ICE_FLOW_FLD_OFF_INVAL 0xffff
11
12/* Generate flow hash field from flow field type(s) */
13#define ICE_FLOW_HASH_ETH \
14 (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
15 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
16#define ICE_FLOW_HASH_IPV4 \
17 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
18 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
19#define ICE_FLOW_HASH_IPV6 \
20 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
21 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
22#define ICE_FLOW_HASH_TCP_PORT \
23 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
24 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
25#define ICE_FLOW_HASH_UDP_PORT \
26 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
27 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
28#define ICE_FLOW_HASH_SCTP_PORT \
29 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
30 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
31
32#define ICE_HASH_INVALID 0
33#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
34#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
35#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
36#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
37#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
38#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
39
40#define ICE_FLOW_HASH_GTP_TEID \
41 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
42
43#define ICE_FLOW_HASH_GTP_IPV4_TEID \
44 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
45#define ICE_FLOW_HASH_GTP_IPV6_TEID \
46 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
47
48#define ICE_FLOW_HASH_GTP_U_TEID \
49 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
50
51#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
52 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
53#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
54 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
55
56#define ICE_FLOW_HASH_GTP_U_EH_TEID \
57 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
58
59#define ICE_FLOW_HASH_GTP_U_EH_QFI \
60 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
61
62#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
63 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
64 ICE_FLOW_HASH_GTP_U_EH_QFI)
65#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
66 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
67 ICE_FLOW_HASH_GTP_U_EH_QFI)
68
69#define ICE_FLOW_HASH_PPPOE_SESS_ID \
70 (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
71
72#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
73 (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
74#define ICE_FLOW_HASH_PPPOE_TCP_ID \
75 (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
76#define ICE_FLOW_HASH_PPPOE_UDP_ID \
77 (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
78
79#define ICE_FLOW_HASH_PFCP_SEID \
80 (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
81#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
82 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
83#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
84 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
85
86#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
87 (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
88#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
89 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
90#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
91 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
92
93#define ICE_FLOW_HASH_ESP_SPI \
94 (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
95#define ICE_FLOW_HASH_ESP_IPV4_SPI \
96 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
97#define ICE_FLOW_HASH_ESP_IPV6_SPI \
98 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
99
100#define ICE_FLOW_HASH_AH_SPI \
101 (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
102#define ICE_FLOW_HASH_AH_IPV4_SPI \
103 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
104#define ICE_FLOW_HASH_AH_IPV6_SPI \
105 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
106
107#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
108 (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
109#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
110 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
111#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
112 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
113
114/* Protocol header fields within a packet segment. A segment consists of one or
115 * more protocol headers that make up a logical group of protocol headers. Each
116 * logical group of protocol headers encapsulates or is encapsulated using/by
117 * tunneling or encapsulation protocols for network virtualization such as GRE,
118 * VxLAN, etc.
119 */
120enum ice_flow_seg_hdr {
121 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
122 ICE_FLOW_SEG_HDR_ETH = 0x00000001,
123 ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
124 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
125 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
126 ICE_FLOW_SEG_HDR_ARP = 0x00000010,
127 ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
128 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
129 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
130 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
131 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
132 ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
133 ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
134 ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
135 ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
136 ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
137 ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
138 ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
139 ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
140 ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
141 ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
142 ICE_FLOW_SEG_HDR_ESP = 0x00100000,
143 ICE_FLOW_SEG_HDR_AH = 0x00200000,
144 ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
145 ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
146 /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
147 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
148 */
149 ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
150};
151
152/* These segments all have the same PTYPES, but are otherwise distinguished by
153 * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
154 *
155 * gtp_eh_pdu gtp_eh_pdu_link
156 * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
157 * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
158 * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
159 * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
160 */
161#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
162 ICE_FLOW_SEG_HDR_GTPU_EH | \
163 ICE_FLOW_SEG_HDR_GTPU_DWN | \
164 ICE_FLOW_SEG_HDR_GTPU_UP)
165#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
166 ICE_FLOW_SEG_HDR_PFCP_SESSION)
167
168enum ice_flow_field {
169 /* L2 */
170 ICE_FLOW_FIELD_IDX_ETH_DA,
171 ICE_FLOW_FIELD_IDX_ETH_SA,
172 ICE_FLOW_FIELD_IDX_S_VLAN,
173 ICE_FLOW_FIELD_IDX_C_VLAN,
174 ICE_FLOW_FIELD_IDX_ETH_TYPE,
175 /* L3 */
176 ICE_FLOW_FIELD_IDX_IPV4_DSCP,
177 ICE_FLOW_FIELD_IDX_IPV6_DSCP,
178 ICE_FLOW_FIELD_IDX_IPV4_TTL,
179 ICE_FLOW_FIELD_IDX_IPV4_PROT,
180 ICE_FLOW_FIELD_IDX_IPV6_TTL,
181 ICE_FLOW_FIELD_IDX_IPV6_PROT,
182 ICE_FLOW_FIELD_IDX_IPV4_SA,
183 ICE_FLOW_FIELD_IDX_IPV4_DA,
184 ICE_FLOW_FIELD_IDX_IPV6_SA,
185 ICE_FLOW_FIELD_IDX_IPV6_DA,
186 /* L4 */
187 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
188 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
189 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
190 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
191 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
192 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
193 ICE_FLOW_FIELD_IDX_TCP_FLAGS,
194 /* ARP */
195 ICE_FLOW_FIELD_IDX_ARP_SIP,
196 ICE_FLOW_FIELD_IDX_ARP_DIP,
197 ICE_FLOW_FIELD_IDX_ARP_SHA,
198 ICE_FLOW_FIELD_IDX_ARP_DHA,
199 ICE_FLOW_FIELD_IDX_ARP_OP,
200 /* ICMP */
201 ICE_FLOW_FIELD_IDX_ICMP_TYPE,
202 ICE_FLOW_FIELD_IDX_ICMP_CODE,
203 /* GRE */
204 ICE_FLOW_FIELD_IDX_GRE_KEYID,
205 /* GTPC_TEID */
206 ICE_FLOW_FIELD_IDX_GTPC_TEID,
207 /* GTPU_IP */
208 ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
209 /* GTPU_EH */
210 ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
211 ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
212 /* GTPU_UP */
213 ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
214 /* GTPU_DWN */
215 ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
216 /* PPPoE */
217 ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
218 /* PFCP */
219 ICE_FLOW_FIELD_IDX_PFCP_SEID,
220 /* L2TPv3 */
221 ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
222 /* ESP */
223 ICE_FLOW_FIELD_IDX_ESP_SPI,
224 /* AH */
225 ICE_FLOW_FIELD_IDX_AH_SPI,
226 /* NAT_T ESP */
227 ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
228 /* The total number of enums must not exceed 64 */
229 ICE_FLOW_FIELD_IDX_MAX
230};
231
232#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
233#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
234#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
235#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
236#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
237#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
238#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
239#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
240#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
241 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
242#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
243 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
244
245/* Flow headers and fields for AVF support */
246enum ice_flow_avf_hdr_field {
247 /* Values 0 - 28 are reserved for future use */
248 ICE_AVF_FLOW_FIELD_INVALID = 0,
249 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
250 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
251 ICE_AVF_FLOW_FIELD_IPV4_UDP,
252 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
253 ICE_AVF_FLOW_FIELD_IPV4_TCP,
254 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
255 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
256 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
257 /* Values 37-38 are reserved */
258 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
259 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
260 ICE_AVF_FLOW_FIELD_IPV6_UDP,
261 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
262 ICE_AVF_FLOW_FIELD_IPV6_TCP,
263 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
264 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
265 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
266 ICE_AVF_FLOW_FIELD_RSVD47,
267 ICE_AVF_FLOW_FIELD_FCOE_OX,
268 ICE_AVF_FLOW_FIELD_FCOE_RX,
269 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
270 /* Values 51-62 are reserved */
271 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
272 ICE_AVF_FLOW_FIELD_MAX
273};
274
275/* Supported RSS offloads This macro is defined to support
276 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
277 * capabilities to the caller of this ops.
278 */
279#define ICE_DEFAULT_RSS_HENA ( \
280 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
281 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
282 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
283 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
284 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
285 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
286 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
287 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
288 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
289 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
290 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
291 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
292 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
293 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
294 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
295 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
296
297enum ice_rss_cfg_hdr_type {
298 ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
299 ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
300 /* take inner headers as inputset for packet with outer ipv4. */
301 ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
302 /* take inner headers as inputset for packet with outer ipv6. */
303 ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
304 /* take outer headers first then inner headers as inputset */
305 ICE_RSS_ANY_HEADERS
306};
307
308struct ice_rss_hash_cfg {
309 u32 addl_hdrs; /* protocol header fields */
310 u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
311 enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
312 bool symm; /* symmetric or asymmetric hash */
313};
314
315enum ice_flow_dir {
316 ICE_FLOW_RX = 0x02,
317};
318
319enum ice_flow_priority {
320 ICE_FLOW_PRIO_LOW,
321 ICE_FLOW_PRIO_NORMAL,
322 ICE_FLOW_PRIO_HIGH
323};
324
325#define ICE_FLOW_SEG_SINGLE 1
326#define ICE_FLOW_SEG_MAX 2
327#define ICE_FLOW_SEG_RAW_FLD_MAX 2
328#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
329#define ICE_FLOW_FV_EXTRACT_SZ 2
330
331#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
332
333struct ice_flow_seg_xtrct {
334 u8 prot_id; /* Protocol ID of extracted header field */
335 u16 off; /* Starting offset of the field in header in bytes */
336 u8 idx; /* Index of FV entry used */
337 u8 disp; /* Displacement of field in bits fr. FV entry's start */
338 u16 mask; /* Mask for field */
339};
340
341enum ice_flow_fld_match_type {
342 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
343 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
344 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
345 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
346};
347
348struct ice_flow_fld_loc {
349 /* Describe offsets of field information relative to the beginning of
350 * input buffer provided when adding flow entries.
351 */
352 u16 val; /* Offset where the value is located */
353 u16 mask; /* Offset where the mask/prefix value is located */
354 u16 last; /* Length or offset where the upper value is located */
355};
356
357struct ice_flow_fld_info {
358 enum ice_flow_fld_match_type type;
359 /* Location where to retrieve data from an input buffer */
360 struct ice_flow_fld_loc src;
361 /* Location where to put the data into the final entry buffer */
362 struct ice_flow_fld_loc entry;
363 struct ice_flow_seg_xtrct xtrct;
364};
365
366struct ice_flow_seg_fld_raw {
367 struct ice_flow_fld_info info;
368 u16 off; /* Offset from the start of the segment */
369};
370
371struct ice_flow_seg_info {
372 u32 hdrs; /* Bitmask indicating protocol headers present */
373 u64 match; /* Bitmask indicating header fields to be matched */
374 u64 range; /* Bitmask indicating header fields matched as ranges */
375
376 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
377
378 u8 raws_cnt; /* Number of raw fields to be matched */
379 struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
380};
381
382/* This structure describes a flow entry, and is tracked only in this file */
383struct ice_flow_entry {
384 struct list_head l_entry;
385
386 u64 id;
387 struct ice_flow_prof *prof;
388 enum ice_flow_priority priority;
389 u16 vsi_handle;
390};
391
392#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
393#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
394
395struct ice_flow_prof {
396 struct list_head l_entry;
397
398 u64 id;
399 enum ice_flow_dir dir;
400 u8 segs_cnt;
401
402 /* Keep track of flow entries associated with this flow profile */
403 struct mutex entries_lock;
404 struct list_head entries;
405
406 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
407
408 /* software VSI handles referenced by this flow profile */
409 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
410
411 bool symm; /* Symmetric Hash for RSS */
412};
413
414struct ice_rss_cfg {
415 struct list_head l_entry;
416 /* bitmap of VSIs added to the RSS entry */
417 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
418 struct ice_rss_hash_cfg hash;
419};
420
421int
422ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
423 struct ice_flow_seg_info *segs, u8 segs_cnt,
424 bool symm, struct ice_flow_prof **prof);
425int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
426int
427ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
428 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
429 void *data, u64 *entry_h);
430int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
431void
432ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
433 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
434void
435ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
436 u16 val_loc, u16 mask_loc);
437int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
438void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
439int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
440int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
441int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
442 u64 hashed_flds);
443int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
444int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
445 const struct ice_rss_hash_cfg *cfg);
446int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
447 const struct ice_rss_hash_cfg *cfg);
448u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
449#endif /* _ICE_FLOW_H_ */