Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_FLOW_H_
5#define _ICE_FLOW_H_
6
7#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
8#define ICE_FLOW_FLD_OFF_INVAL 0xffff
9
10/* Generate flow hash field from flow field type(s) */
11#define ICE_FLOW_HASH_IPV4 \
12 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
13 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
14#define ICE_FLOW_HASH_IPV6 \
15 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
16 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
17#define ICE_FLOW_HASH_TCP_PORT \
18 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
19 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
20#define ICE_FLOW_HASH_UDP_PORT \
21 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
22 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
23#define ICE_FLOW_HASH_SCTP_PORT \
24 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
25 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
26
27#define ICE_HASH_INVALID 0
28#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
29#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
30#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
31#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
32
33/* Protocol header fields within a packet segment. A segment consists of one or
34 * more protocol headers that make up a logical group of protocol headers. Each
35 * logical group of protocol headers encapsulates or is encapsulated using/by
36 * tunneling or encapsulation protocols for network virtualization such as GRE,
37 * VxLAN, etc.
38 */
39enum ice_flow_seg_hdr {
40 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
41 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
42 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
43 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
44 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
45 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
46 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
47};
48
49enum ice_flow_field {
50 /* L3 */
51 ICE_FLOW_FIELD_IDX_IPV4_SA,
52 ICE_FLOW_FIELD_IDX_IPV4_DA,
53 ICE_FLOW_FIELD_IDX_IPV6_SA,
54 ICE_FLOW_FIELD_IDX_IPV6_DA,
55 /* L4 */
56 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
57 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
58 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
59 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
60 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
61 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
62 /* GRE */
63 ICE_FLOW_FIELD_IDX_GRE_KEYID,
64 /* The total number of enums must not exceed 64 */
65 ICE_FLOW_FIELD_IDX_MAX
66};
67
68/* Flow headers and fields for AVF support */
69enum ice_flow_avf_hdr_field {
70 /* Values 0 - 28 are reserved for future use */
71 ICE_AVF_FLOW_FIELD_INVALID = 0,
72 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
73 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
74 ICE_AVF_FLOW_FIELD_IPV4_UDP,
75 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
76 ICE_AVF_FLOW_FIELD_IPV4_TCP,
77 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
78 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
79 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
80 /* Values 37-38 are reserved */
81 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
82 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
83 ICE_AVF_FLOW_FIELD_IPV6_UDP,
84 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
85 ICE_AVF_FLOW_FIELD_IPV6_TCP,
86 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
87 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
88 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
89 ICE_AVF_FLOW_FIELD_RSVD47,
90 ICE_AVF_FLOW_FIELD_FCOE_OX,
91 ICE_AVF_FLOW_FIELD_FCOE_RX,
92 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
93 /* Values 51-62 are reserved */
94 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
95 ICE_AVF_FLOW_FIELD_MAX
96};
97
98/* Supported RSS offloads This macro is defined to support
99 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
100 * capabilities to the caller of this ops.
101 */
102#define ICE_DEFAULT_RSS_HENA ( \
103 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
104 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
105 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
106 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
107 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
108 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
109 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
110 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
111 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
112 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
113 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
114 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
115 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
116 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
117 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
118 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
119
120enum ice_flow_dir {
121 ICE_FLOW_RX = 0x02,
122};
123
124enum ice_flow_priority {
125 ICE_FLOW_PRIO_LOW,
126 ICE_FLOW_PRIO_NORMAL,
127 ICE_FLOW_PRIO_HIGH
128};
129
130#define ICE_FLOW_SEG_MAX 2
131#define ICE_FLOW_SEG_RAW_FLD_MAX 2
132#define ICE_FLOW_FV_EXTRACT_SZ 2
133
134#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
135
136struct ice_flow_seg_xtrct {
137 u8 prot_id; /* Protocol ID of extracted header field */
138 u16 off; /* Starting offset of the field in header in bytes */
139 u8 idx; /* Index of FV entry used */
140 u8 disp; /* Displacement of field in bits fr. FV entry's start */
141};
142
143enum ice_flow_fld_match_type {
144 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
145 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
146 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
147 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
148};
149
150struct ice_flow_fld_loc {
151 /* Describe offsets of field information relative to the beginning of
152 * input buffer provided when adding flow entries.
153 */
154 u16 val; /* Offset where the value is located */
155 u16 mask; /* Offset where the mask/prefix value is located */
156 u16 last; /* Length or offset where the upper value is located */
157};
158
159struct ice_flow_fld_info {
160 enum ice_flow_fld_match_type type;
161 /* Location where to retrieve data from an input buffer */
162 struct ice_flow_fld_loc src;
163 /* Location where to put the data into the final entry buffer */
164 struct ice_flow_fld_loc entry;
165 struct ice_flow_seg_xtrct xtrct;
166};
167
168struct ice_flow_seg_fld_raw {
169 struct ice_flow_fld_info info;
170 u16 off; /* Offset from the start of the segment */
171};
172
173struct ice_flow_seg_info {
174 u32 hdrs; /* Bitmask indicating protocol headers present */
175 u64 match; /* Bitmask indicating header fields to be matched */
176 u64 range; /* Bitmask indicating header fields matched as ranges */
177
178 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
179
180 u8 raws_cnt; /* Number of raw fields to be matched */
181 struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
182};
183
184/* This structure describes a flow entry, and is tracked only in this file */
185struct ice_flow_entry {
186 struct list_head l_entry;
187
188 u64 id;
189 struct ice_flow_prof *prof;
190 /* Flow entry's content */
191 void *entry;
192 enum ice_flow_priority priority;
193 u16 vsi_handle;
194 u16 entry_sz;
195};
196
197#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
198#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
199
200struct ice_flow_prof {
201 struct list_head l_entry;
202
203 u64 id;
204 enum ice_flow_dir dir;
205 u8 segs_cnt;
206
207 /* Keep track of flow entries associated with this flow profile */
208 struct mutex entries_lock;
209 struct list_head entries;
210
211 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
212
213 /* software VSI handles referenced by this flow profile */
214 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
215};
216
217struct ice_rss_cfg {
218 struct list_head l_entry;
219 /* bitmap of VSIs added to the RSS entry */
220 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
221 u64 hashed_flds;
222 u32 packet_hdr;
223};
224
225enum ice_status
226ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
227 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
228 struct ice_flow_prof **prof);
229enum ice_status
230ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
231enum ice_status
232ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
233 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
234 void *data, u64 *entry_h);
235enum ice_status
236ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
237void
238ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
239 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
240void
241ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
242 u16 val_loc, u16 mask_loc);
243void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
244enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
245enum ice_status
246ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
247enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
248enum ice_status
249ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
250 u32 addl_hdrs);
251u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
252#endif /* _ICE_FLOW_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_FLOW_H_
5#define _ICE_FLOW_H_
6
7#include "ice_flex_type.h"
8
9#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
10#define ICE_FLOW_FLD_OFF_INVAL 0xffff
11
12/* Generate flow hash field from flow field type(s) */
13#define ICE_FLOW_HASH_ETH \
14 (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
15 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
16#define ICE_FLOW_HASH_IPV4 \
17 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
18 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
19#define ICE_FLOW_HASH_IPV6 \
20 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
21 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
22#define ICE_FLOW_HASH_TCP_PORT \
23 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
24 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
25#define ICE_FLOW_HASH_UDP_PORT \
26 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
27 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
28#define ICE_FLOW_HASH_SCTP_PORT \
29 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
30 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
31
32#define ICE_HASH_INVALID 0
33#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
34#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
35#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
36#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
37
38#define ICE_FLOW_HASH_GTP_TEID \
39 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
40
41#define ICE_FLOW_HASH_GTP_IPV4_TEID \
42 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
43#define ICE_FLOW_HASH_GTP_IPV6_TEID \
44 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
45
46#define ICE_FLOW_HASH_GTP_U_TEID \
47 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
48
49#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
50 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
51#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
52 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
53
54#define ICE_FLOW_HASH_GTP_U_EH_TEID \
55 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
56
57#define ICE_FLOW_HASH_GTP_U_EH_QFI \
58 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
59
60#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
61 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
62 ICE_FLOW_HASH_GTP_U_EH_QFI)
63#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
64 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
65 ICE_FLOW_HASH_GTP_U_EH_QFI)
66
67#define ICE_FLOW_HASH_PPPOE_SESS_ID \
68 (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
69
70#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
71 (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
72#define ICE_FLOW_HASH_PPPOE_TCP_ID \
73 (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
74#define ICE_FLOW_HASH_PPPOE_UDP_ID \
75 (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
76
77#define ICE_FLOW_HASH_PFCP_SEID \
78 (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
79#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
80 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
81#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
82 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
83
84#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
85 (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
86#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
87 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
88#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
89 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
90
91#define ICE_FLOW_HASH_ESP_SPI \
92 (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
93#define ICE_FLOW_HASH_ESP_IPV4_SPI \
94 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
95#define ICE_FLOW_HASH_ESP_IPV6_SPI \
96 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
97
98#define ICE_FLOW_HASH_AH_SPI \
99 (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
100#define ICE_FLOW_HASH_AH_IPV4_SPI \
101 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
102#define ICE_FLOW_HASH_AH_IPV6_SPI \
103 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
104
105#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
106 (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
107#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
108 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
109#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
110 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
111
112/* Protocol header fields within a packet segment. A segment consists of one or
113 * more protocol headers that make up a logical group of protocol headers. Each
114 * logical group of protocol headers encapsulates or is encapsulated using/by
115 * tunneling or encapsulation protocols for network virtualization such as GRE,
116 * VxLAN, etc.
117 */
118enum ice_flow_seg_hdr {
119 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
120 ICE_FLOW_SEG_HDR_ETH = 0x00000001,
121 ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
122 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
123 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
124 ICE_FLOW_SEG_HDR_ARP = 0x00000010,
125 ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
126 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
127 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
128 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
129 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
130 ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
131 ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
132 ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
133 ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
134 ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
135 ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
136 ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
137 ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
138 ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
139 ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
140 ICE_FLOW_SEG_HDR_ESP = 0x00100000,
141 ICE_FLOW_SEG_HDR_AH = 0x00200000,
142 ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
143 ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
144 /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
145 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
146 */
147 ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
148};
149
150/* These segments all have the same PTYPES, but are otherwise distinguished by
151 * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
152 *
153 * gtp_eh_pdu gtp_eh_pdu_link
154 * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
155 * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
156 * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
157 * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
158 */
159#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
160 ICE_FLOW_SEG_HDR_GTPU_EH | \
161 ICE_FLOW_SEG_HDR_GTPU_DWN | \
162 ICE_FLOW_SEG_HDR_GTPU_UP)
163#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
164 ICE_FLOW_SEG_HDR_PFCP_SESSION)
165
166enum ice_flow_field {
167 /* L2 */
168 ICE_FLOW_FIELD_IDX_ETH_DA,
169 ICE_FLOW_FIELD_IDX_ETH_SA,
170 ICE_FLOW_FIELD_IDX_S_VLAN,
171 ICE_FLOW_FIELD_IDX_C_VLAN,
172 ICE_FLOW_FIELD_IDX_ETH_TYPE,
173 /* L3 */
174 ICE_FLOW_FIELD_IDX_IPV4_DSCP,
175 ICE_FLOW_FIELD_IDX_IPV6_DSCP,
176 ICE_FLOW_FIELD_IDX_IPV4_TTL,
177 ICE_FLOW_FIELD_IDX_IPV4_PROT,
178 ICE_FLOW_FIELD_IDX_IPV6_TTL,
179 ICE_FLOW_FIELD_IDX_IPV6_PROT,
180 ICE_FLOW_FIELD_IDX_IPV4_SA,
181 ICE_FLOW_FIELD_IDX_IPV4_DA,
182 ICE_FLOW_FIELD_IDX_IPV6_SA,
183 ICE_FLOW_FIELD_IDX_IPV6_DA,
184 /* L4 */
185 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
186 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
187 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
188 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
189 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
190 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
191 ICE_FLOW_FIELD_IDX_TCP_FLAGS,
192 /* ARP */
193 ICE_FLOW_FIELD_IDX_ARP_SIP,
194 ICE_FLOW_FIELD_IDX_ARP_DIP,
195 ICE_FLOW_FIELD_IDX_ARP_SHA,
196 ICE_FLOW_FIELD_IDX_ARP_DHA,
197 ICE_FLOW_FIELD_IDX_ARP_OP,
198 /* ICMP */
199 ICE_FLOW_FIELD_IDX_ICMP_TYPE,
200 ICE_FLOW_FIELD_IDX_ICMP_CODE,
201 /* GRE */
202 ICE_FLOW_FIELD_IDX_GRE_KEYID,
203 /* GTPC_TEID */
204 ICE_FLOW_FIELD_IDX_GTPC_TEID,
205 /* GTPU_IP */
206 ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
207 /* GTPU_EH */
208 ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
209 ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
210 /* GTPU_UP */
211 ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
212 /* GTPU_DWN */
213 ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
214 /* PPPoE */
215 ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
216 /* PFCP */
217 ICE_FLOW_FIELD_IDX_PFCP_SEID,
218 /* L2TPv3 */
219 ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
220 /* ESP */
221 ICE_FLOW_FIELD_IDX_ESP_SPI,
222 /* AH */
223 ICE_FLOW_FIELD_IDX_AH_SPI,
224 /* NAT_T ESP */
225 ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
226 /* The total number of enums must not exceed 64 */
227 ICE_FLOW_FIELD_IDX_MAX
228};
229
230/* Flow headers and fields for AVF support */
231enum ice_flow_avf_hdr_field {
232 /* Values 0 - 28 are reserved for future use */
233 ICE_AVF_FLOW_FIELD_INVALID = 0,
234 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
235 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
236 ICE_AVF_FLOW_FIELD_IPV4_UDP,
237 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
238 ICE_AVF_FLOW_FIELD_IPV4_TCP,
239 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
240 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
241 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
242 /* Values 37-38 are reserved */
243 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
244 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
245 ICE_AVF_FLOW_FIELD_IPV6_UDP,
246 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
247 ICE_AVF_FLOW_FIELD_IPV6_TCP,
248 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
249 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
250 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
251 ICE_AVF_FLOW_FIELD_RSVD47,
252 ICE_AVF_FLOW_FIELD_FCOE_OX,
253 ICE_AVF_FLOW_FIELD_FCOE_RX,
254 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
255 /* Values 51-62 are reserved */
256 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
257 ICE_AVF_FLOW_FIELD_MAX
258};
259
260/* Supported RSS offloads This macro is defined to support
261 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
262 * capabilities to the caller of this ops.
263 */
264#define ICE_DEFAULT_RSS_HENA ( \
265 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
266 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
267 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
268 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
269 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
270 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
271 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
272 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
273 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
274 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
275 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
276 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
277 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
278 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
279 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
280 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
281
282enum ice_flow_dir {
283 ICE_FLOW_RX = 0x02,
284};
285
286enum ice_flow_priority {
287 ICE_FLOW_PRIO_LOW,
288 ICE_FLOW_PRIO_NORMAL,
289 ICE_FLOW_PRIO_HIGH
290};
291
292#define ICE_FLOW_SEG_MAX 2
293#define ICE_FLOW_SEG_RAW_FLD_MAX 2
294#define ICE_FLOW_FV_EXTRACT_SZ 2
295
296#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
297
298struct ice_flow_seg_xtrct {
299 u8 prot_id; /* Protocol ID of extracted header field */
300 u16 off; /* Starting offset of the field in header in bytes */
301 u8 idx; /* Index of FV entry used */
302 u8 disp; /* Displacement of field in bits fr. FV entry's start */
303 u16 mask; /* Mask for field */
304};
305
306enum ice_flow_fld_match_type {
307 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
308 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
309 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
310 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
311};
312
313struct ice_flow_fld_loc {
314 /* Describe offsets of field information relative to the beginning of
315 * input buffer provided when adding flow entries.
316 */
317 u16 val; /* Offset where the value is located */
318 u16 mask; /* Offset where the mask/prefix value is located */
319 u16 last; /* Length or offset where the upper value is located */
320};
321
322struct ice_flow_fld_info {
323 enum ice_flow_fld_match_type type;
324 /* Location where to retrieve data from an input buffer */
325 struct ice_flow_fld_loc src;
326 /* Location where to put the data into the final entry buffer */
327 struct ice_flow_fld_loc entry;
328 struct ice_flow_seg_xtrct xtrct;
329};
330
331struct ice_flow_seg_fld_raw {
332 struct ice_flow_fld_info info;
333 u16 off; /* Offset from the start of the segment */
334};
335
336struct ice_flow_seg_info {
337 u32 hdrs; /* Bitmask indicating protocol headers present */
338 u64 match; /* Bitmask indicating header fields to be matched */
339 u64 range; /* Bitmask indicating header fields matched as ranges */
340
341 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
342
343 u8 raws_cnt; /* Number of raw fields to be matched */
344 struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
345};
346
347/* This structure describes a flow entry, and is tracked only in this file */
348struct ice_flow_entry {
349 struct list_head l_entry;
350
351 u64 id;
352 struct ice_flow_prof *prof;
353 /* Flow entry's content */
354 void *entry;
355 enum ice_flow_priority priority;
356 u16 vsi_handle;
357 u16 entry_sz;
358};
359
360#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
361#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
362
363struct ice_flow_prof {
364 struct list_head l_entry;
365
366 u64 id;
367 enum ice_flow_dir dir;
368 u8 segs_cnt;
369
370 /* Keep track of flow entries associated with this flow profile */
371 struct mutex entries_lock;
372 struct list_head entries;
373
374 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
375
376 /* software VSI handles referenced by this flow profile */
377 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
378};
379
380struct ice_rss_cfg {
381 struct list_head l_entry;
382 /* bitmap of VSIs added to the RSS entry */
383 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
384 u64 hashed_flds;
385 u32 packet_hdr;
386};
387
388int
389ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
390 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
391 struct ice_flow_prof **prof);
392int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
393int
394ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
395 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
396 void *data, u64 *entry_h);
397int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
398void
399ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
400 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
401void
402ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
403 u16 val_loc, u16 mask_loc);
404int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
405void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
406int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
407int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
408int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
409int
410ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
411 u32 addl_hdrs);
412int
413ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
414 u32 addl_hdrs);
415u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
416#endif /* _ICE_FLOW_H_ */