Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019, Intel Corporation. */
   3
   4#include "ice_common.h"
   5#include "ice_flow.h"
   6#include <net/gre.h>
   7
   8/* Describe properties of a protocol header field */
   9struct ice_flow_field_info {
  10	enum ice_flow_seg_hdr hdr;
  11	s16 off;	/* Offset from start of a protocol header, in bits */
  12	u16 size;	/* Size of fields in bits */
  13	u16 mask;	/* 16-bit mask for field */
  14};
  15
  16#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
  17	.hdr = _hdr, \
  18	.off = (_offset_bytes) * BITS_PER_BYTE, \
  19	.size = (_size_bytes) * BITS_PER_BYTE, \
  20	.mask = 0, \
  21}
  22
  23#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
  24	.hdr = _hdr, \
  25	.off = (_offset_bytes) * BITS_PER_BYTE, \
  26	.size = (_size_bytes) * BITS_PER_BYTE, \
  27	.mask = _mask, \
  28}
  29
  30/* Table containing properties of supported protocol header fields */
  31static const
  32struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
  33	/* Ether */
  34	/* ICE_FLOW_FIELD_IDX_ETH_DA */
  35	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
  36	/* ICE_FLOW_FIELD_IDX_ETH_SA */
  37	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
  38	/* ICE_FLOW_FIELD_IDX_S_VLAN */
  39	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
  40	/* ICE_FLOW_FIELD_IDX_C_VLAN */
  41	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
  42	/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
  43	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
  44	/* IPv4 / IPv6 */
  45	/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
  46	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
  47	/* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
  48	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
  49	/* ICE_FLOW_FIELD_IDX_IPV4_TTL */
  50	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
  51	/* ICE_FLOW_FIELD_IDX_IPV4_PROT */
  52	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
  53	/* ICE_FLOW_FIELD_IDX_IPV6_TTL */
  54	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
  55	/* ICE_FLOW_FIELD_IDX_IPV6_PROT */
  56	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
  57	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
  58	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
  59	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
  60	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
  61	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
  62	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
  63	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
  64	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
  65	/* Transport */
  66	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
  67	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
  68	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
  69	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
  70	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
  71	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
  72	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
  73	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
  74	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
  75	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
  76	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
  77	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
  78	/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
  79	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
  80	/* ARP */
  81	/* ICE_FLOW_FIELD_IDX_ARP_SIP */
  82	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
  83	/* ICE_FLOW_FIELD_IDX_ARP_DIP */
  84	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
  85	/* ICE_FLOW_FIELD_IDX_ARP_SHA */
  86	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
  87	/* ICE_FLOW_FIELD_IDX_ARP_DHA */
  88	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
  89	/* ICE_FLOW_FIELD_IDX_ARP_OP */
  90	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
  91	/* ICMP */
  92	/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
  93	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
  94	/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
  95	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
  96	/* GRE */
  97	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
  98	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
  99			  sizeof_field(struct gre_full_hdr, key)),
 100	/* GTP */
 101	/* ICE_FLOW_FIELD_IDX_GTPC_TEID */
 102	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)),
 103	/* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
 104	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)),
 105	/* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
 106	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)),
 107	/* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
 108	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
 109			      0x3f00),
 110	/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
 111	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
 112	/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
 113	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
 114	/* PPPoE */
 115	/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
 116	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
 117	/* PFCP */
 118	/* ICE_FLOW_FIELD_IDX_PFCP_SEID */
 119	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)),
 120	/* L2TPv3 */
 121	/* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
 122	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)),
 123	/* ESP */
 124	/* ICE_FLOW_FIELD_IDX_ESP_SPI */
 125	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)),
 126	/* AH */
 127	/* ICE_FLOW_FIELD_IDX_AH_SPI */
 128	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
 129	/* NAT_T_ESP */
 130	/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
 131	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
 132};
 133
 134/* Bitmaps indicating relevant packet types for a particular protocol header
 135 *
 136 * Packet types for packets with an Outer/First/Single MAC header
 137 */
 138static const u32 ice_ptypes_mac_ofos[] = {
 139	0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
 140	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
 141	0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
 142	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 143	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 144	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 145	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 146	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 147};
 148
 149/* Packet types for packets with an Innermost/Last MAC VLAN header */
 150static const u32 ice_ptypes_macvlan_il[] = {
 151	0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
 152	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
 153	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 154	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 155	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 156	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 157	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 158	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 159};
 160
 161/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
 162 * include IPv4 other PTYPEs
 163 */
 164static const u32 ice_ptypes_ipv4_ofos[] = {
 165	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
 166	0x00000000, 0x00000155, 0x00000000, 0x00000000,
 167	0x00000000, 0x000FC000, 0x00000000, 0x00000000,
 168	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 169	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 170	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 171	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 172	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 173};
 174
 175/* Packet types for packets with an Outer/First/Single IPv4 header, includes
 176 * IPv4 other PTYPEs
 177 */
 178static const u32 ice_ptypes_ipv4_ofos_all[] = {
 179	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
 180	0x00000000, 0x00000155, 0x00000000, 0x00000000,
 181	0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
 182	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 183	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 184	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 185	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 186	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 187};
 188
 189/* Packet types for packets with an Innermost/Last IPv4 header */
 190static const u32 ice_ptypes_ipv4_il[] = {
 191	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
 192	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
 193	0x00000000, 0x00000000, 0x001FF800, 0x00000000,
 194	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 195	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 196	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 197	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 198	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 199};
 200
 201/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
 202 * include IPv6 other PTYPEs
 203 */
 204static const u32 ice_ptypes_ipv6_ofos[] = {
 205	0x00000000, 0x00000000, 0x77000000, 0x10002000,
 206	0x00000000, 0x000002AA, 0x00000000, 0x00000000,
 207	0x00000000, 0x03F00000, 0x00000000, 0x00000000,
 208	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 209	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 210	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 211	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 212	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 213};
 214
 215/* Packet types for packets with an Outer/First/Single IPv6 header, includes
 216 * IPv6 other PTYPEs
 217 */
 218static const u32 ice_ptypes_ipv6_ofos_all[] = {
 219	0x00000000, 0x00000000, 0x77000000, 0x10002000,
 220	0x00000000, 0x000002AA, 0x00000000, 0x00000000,
 221	0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
 222	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 223	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 224	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 225	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 226	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 227};
 228
 229/* Packet types for packets with an Innermost/Last IPv6 header */
 230static const u32 ice_ptypes_ipv6_il[] = {
 231	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
 232	0x00000770, 0x00000000, 0x00000000, 0x00000000,
 233	0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
 234	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 235	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 236	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 237	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 238	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 239};
 240
 241/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
 242static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
 243	0x10C00000, 0x04000800, 0x00000000, 0x00000000,
 244	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 245	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 246	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 247	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 248	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 249	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 250	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 251};
 252
 253/* Packet types for packets with an Outermost/First ARP header */
 254static const u32 ice_ptypes_arp_of[] = {
 255	0x00000800, 0x00000000, 0x00000000, 0x00000000,
 256	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 257	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 258	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 259	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 260	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 261	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 262	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 263};
 264
 265/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
 266static const u32 ice_ptypes_ipv4_il_no_l4[] = {
 267	0x60000000, 0x18043008, 0x80000002, 0x6010c021,
 268	0x00000008, 0x00000000, 0x00000000, 0x00000000,
 269	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 270	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 271	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 272	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 273	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 274	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 275};
 276
 277/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
 278static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
 279	0x00000000, 0x00000000, 0x43000000, 0x10002000,
 280	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 281	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 282	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 283	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 284	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 285	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 286	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 287};
 288
 289/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
 290static const u32 ice_ptypes_ipv6_il_no_l4[] = {
 291	0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
 292	0x00000430, 0x00000000, 0x00000000, 0x00000000,
 293	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 294	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 295	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 296	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 297	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 298	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 299};
 300
 301/* UDP Packet types for non-tunneled packets or tunneled
 302 * packets with inner UDP.
 303 */
 304static const u32 ice_ptypes_udp_il[] = {
 305	0x81000000, 0x20204040, 0x04000010, 0x80810102,
 306	0x00000040, 0x00000000, 0x00000000, 0x00000000,
 307	0x00000000, 0x00410000, 0x90842000, 0x00000007,
 308	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 309	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 310	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 311	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 312	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 313};
 314
 315/* Packet types for packets with an Innermost/Last TCP header */
 316static const u32 ice_ptypes_tcp_il[] = {
 317	0x04000000, 0x80810102, 0x10000040, 0x02040408,
 318	0x00000102, 0x00000000, 0x00000000, 0x00000000,
 319	0x00000000, 0x00820000, 0x21084000, 0x00000000,
 320	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 321	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 322	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 323	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 324	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 325};
 326
 327/* Packet types for packets with an Innermost/Last SCTP header */
 328static const u32 ice_ptypes_sctp_il[] = {
 329	0x08000000, 0x01020204, 0x20000081, 0x04080810,
 330	0x00000204, 0x00000000, 0x00000000, 0x00000000,
 331	0x00000000, 0x01040000, 0x00000000, 0x00000000,
 332	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 333	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 334	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 335	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 336	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 337};
 338
 339/* Packet types for packets with an Outermost/First ICMP header */
 340static const u32 ice_ptypes_icmp_of[] = {
 341	0x10000000, 0x00000000, 0x00000000, 0x00000000,
 342	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 343	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 344	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 345	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 346	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 347	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 348	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 349};
 350
 351/* Packet types for packets with an Innermost/Last ICMP header */
 352static const u32 ice_ptypes_icmp_il[] = {
 353	0x00000000, 0x02040408, 0x40000102, 0x08101020,
 354	0x00000408, 0x00000000, 0x00000000, 0x00000000,
 355	0x00000000, 0x00000000, 0x42108000, 0x00000000,
 356	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 357	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 358	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 359	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 360	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 361};
 362
 363/* Packet types for packets with an Outermost/First GRE header */
 364static const u32 ice_ptypes_gre_of[] = {
 365	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
 366	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
 367	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 368	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 369	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 370	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 371	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 372	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 373};
 374
 375/* Packet types for packets with an Innermost/Last MAC header */
 376static const u32 ice_ptypes_mac_il[] = {
 377	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 378	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 379	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 380	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 381	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 382	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 383	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 384	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 385};
 386
 387/* Packet types for GTPC */
 388static const u32 ice_ptypes_gtpc[] = {
 389	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 390	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 391	0x00000000, 0x00000000, 0x00000180, 0x00000000,
 392	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 393	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 394	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 395	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 396	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 397};
 398
 399/* Packet types for GTPC with TEID */
 400static const u32 ice_ptypes_gtpc_tid[] = {
 401	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 402	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 403	0x00000000, 0x00000000, 0x00000060, 0x00000000,
 404	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 405	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 406	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 407	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 408	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 409};
 410
 411/* Packet types for GTPU */
 412static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
 413	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_SESSION },
 414	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_SESSION },
 415	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 416	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_SESSION },
 417	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_SESSION },
 418	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_SESSION },
 419	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_SESSION },
 420	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 421	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_SESSION },
 422	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_SESSION },
 423	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_SESSION },
 424	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_SESSION },
 425	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 426	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_SESSION },
 427	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
 428	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_SESSION },
 429	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_SESSION },
 430	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 431	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_SESSION },
 432	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
 433};
 434
 435static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
 436	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 437	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 438	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 439	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 440	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 441	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 442	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 443	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 444	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 445	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 446	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 447	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 448	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 449	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 450	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
 451	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 452	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 453	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 454	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
 455	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
 456};
 457
 458static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
 459	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 460	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 461	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 462	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 463	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 464	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 465	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 466	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 467	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 468	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 469	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 470	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 471	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 472	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 473	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 474	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 475	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 476	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 477	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 478	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 479};
 480
 481static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
 482	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 483	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 484	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 485	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 486	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 487	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 488	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 489	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 490	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 491	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 492	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 493	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 494	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 495	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 496	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
 497	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 498	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 499	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 500	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
 501	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
 502};
 503
 504static const u32 ice_ptypes_gtpu[] = {
 505	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 506	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 507	0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
 508	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 509	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 510	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 511	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 512	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 513};
 514
 515/* Packet types for PPPoE */
 516static const u32 ice_ptypes_pppoe[] = {
 517	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 518	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 519	0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
 520	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 521	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 522	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 523	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 524	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 525};
 526
 527/* Packet types for packets with PFCP NODE header */
 528static const u32 ice_ptypes_pfcp_node[] = {
 529	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 530	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 531	0x00000000, 0x00000000, 0x80000000, 0x00000002,
 532	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 533	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 534	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 535	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 536	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 537};
 538
 539/* Packet types for packets with PFCP SESSION header */
 540static const u32 ice_ptypes_pfcp_session[] = {
 541	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 542	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 543	0x00000000, 0x00000000, 0x00000000, 0x00000005,
 544	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 545	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 546	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 547	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 548	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 549};
 550
 551/* Packet types for L2TPv3 */
 552static const u32 ice_ptypes_l2tpv3[] = {
 553	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 554	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 555	0x00000000, 0x00000000, 0x00000000, 0x00000300,
 556	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 557	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 558	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 559	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 560	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 561};
 562
 563/* Packet types for ESP */
 564static const u32 ice_ptypes_esp[] = {
 565	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 566	0x00000000, 0x00000003, 0x00000000, 0x00000000,
 567	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 568	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 569	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 570	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 571	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 572	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 573};
 574
 575/* Packet types for AH */
 576static const u32 ice_ptypes_ah[] = {
 577	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 578	0x00000000, 0x0000000C, 0x00000000, 0x00000000,
 579	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 580	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 581	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 582	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 583	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 584	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 585};
 586
 587/* Packet types for packets with NAT_T ESP header */
 588static const u32 ice_ptypes_nat_t_esp[] = {
 589	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 590	0x00000000, 0x00000030, 0x00000000, 0x00000000,
 591	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 592	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 593	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 594	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 595	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 596	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 597};
 598
 599static const u32 ice_ptypes_mac_non_ip_ofos[] = {
 600	0x00000846, 0x00000000, 0x00000000, 0x00000000,
 601	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 602	0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
 603	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 604	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 605	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 606	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 607	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 608};
 609
 610/* Manage parameters and info. used during the creation of a flow profile */
 611struct ice_flow_prof_params {
 612	enum ice_block blk;
 613	u16 entry_length; /* # of bytes formatted entry will require */
 614	u8 es_cnt;
 615	struct ice_flow_prof *prof;
 616
 617	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
 618	 * This will give us the direction flags.
 619	 */
 620	struct ice_fv_word es[ICE_MAX_FV_WORDS];
 621	/* attributes can be used to add attributes to a particular PTYPE */
 622	const struct ice_ptype_attributes *attr;
 623	u16 attr_cnt;
 624
 625	u16 mask[ICE_MAX_FV_WORDS];
 626	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
 627};
 628
 629#define ICE_FLOW_RSS_HDRS_INNER_MASK \
 630	(ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
 631	ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
 632	ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
 633	ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
 634	ICE_FLOW_SEG_HDR_NAT_T_ESP)
 635
 636#define ICE_FLOW_SEG_HDRS_L3_MASK	\
 637	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
 638#define ICE_FLOW_SEG_HDRS_L4_MASK	\
 639	(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
 640	 ICE_FLOW_SEG_HDR_SCTP)
 641/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
 642#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER	\
 643	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
 644
 645/**
 646 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
 647 * @segs: array of one or more packet segments that describe the flow
 648 * @segs_cnt: number of packet segments provided
 649 */
 650static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
 
 651{
 652	u8 i;
 653
 654	for (i = 0; i < segs_cnt; i++) {
 655		/* Multiple L3 headers */
 656		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
 657		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
 658			return -EINVAL;
 659
 660		/* Multiple L4 headers */
 661		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
 662		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
 663			return -EINVAL;
 664	}
 665
 666	return 0;
 667}
 668
 669/* Sizes of fixed known protocol headers without header options */
 670#define ICE_FLOW_PROT_HDR_SZ_MAC	14
 671#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN	(ICE_FLOW_PROT_HDR_SZ_MAC + 2)
 672#define ICE_FLOW_PROT_HDR_SZ_IPV4	20
 673#define ICE_FLOW_PROT_HDR_SZ_IPV6	40
 674#define ICE_FLOW_PROT_HDR_SZ_ARP	28
 675#define ICE_FLOW_PROT_HDR_SZ_ICMP	8
 676#define ICE_FLOW_PROT_HDR_SZ_TCP	20
 677#define ICE_FLOW_PROT_HDR_SZ_UDP	8
 678#define ICE_FLOW_PROT_HDR_SZ_SCTP	12
 679
 680/**
 681 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
 682 * @params: information about the flow to be processed
 683 * @seg: index of packet segment whose header size is to be determined
 684 */
 685static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
 686{
 687	u16 sz;
 688
 689	/* L2 headers */
 690	sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
 691		ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
 692
 693	/* L3 headers */
 694	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
 695		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
 696	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
 697		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
 698	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
 699		sz += ICE_FLOW_PROT_HDR_SZ_ARP;
 700	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
 701		/* An L3 header is required if L4 is specified */
 702		return 0;
 703
 704	/* L4 headers */
 705	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
 706		sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
 707	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
 708		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
 709	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
 710		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
 711	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
 712		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
 713
 714	return sz;
 715}
 716
 717/**
 718 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
 719 * @params: information about the flow to be processed
 720 *
 721 * This function identifies the packet types associated with the protocol
 722 * headers being present in packet segments of the specified flow profile.
 723 */
 724static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
 
 725{
 726	struct ice_flow_prof *prof;
 727	u8 i;
 728
 729	memset(params->ptypes, 0xff, sizeof(params->ptypes));
 730
 731	prof = params->prof;
 732
 733	for (i = 0; i < params->prof->segs_cnt; i++) {
 734		const unsigned long *src;
 735		u32 hdrs;
 736
 737		hdrs = prof->segs[i].hdrs;
 738
 739		if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
 740			src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
 741				(const unsigned long *)ice_ptypes_mac_il;
 742			bitmap_and(params->ptypes, params->ptypes, src,
 743				   ICE_FLOW_PTYPE_MAX);
 744		}
 745
 746		if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
 747			src = (const unsigned long *)ice_ptypes_macvlan_il;
 748			bitmap_and(params->ptypes, params->ptypes, src,
 749				   ICE_FLOW_PTYPE_MAX);
 750		}
 751
 752		if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
 753			bitmap_and(params->ptypes, params->ptypes,
 754				   (const unsigned long *)ice_ptypes_arp_of,
 755				   ICE_FLOW_PTYPE_MAX);
 756		}
 757
 758		if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
 759		    (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
 760			src = i ? (const unsigned long *)ice_ptypes_ipv4_il :
 761				(const unsigned long *)ice_ptypes_ipv4_ofos_all;
 762			bitmap_and(params->ptypes, params->ptypes, src,
 763				   ICE_FLOW_PTYPE_MAX);
 764		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
 765			   (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
 766			src = i ? (const unsigned long *)ice_ptypes_ipv6_il :
 767				(const unsigned long *)ice_ptypes_ipv6_ofos_all;
 768			bitmap_and(params->ptypes, params->ptypes, src,
 769				   ICE_FLOW_PTYPE_MAX);
 770		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
 771			   !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
 772			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos_no_l4 :
 773				(const unsigned long *)ice_ptypes_ipv4_il_no_l4;
 774			bitmap_and(params->ptypes, params->ptypes, src,
 775				   ICE_FLOW_PTYPE_MAX);
 776		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 777			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
 778				(const unsigned long *)ice_ptypes_ipv4_il;
 779			bitmap_and(params->ptypes, params->ptypes, src,
 780				   ICE_FLOW_PTYPE_MAX);
 781		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
 782			   !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
 783			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos_no_l4 :
 784				(const unsigned long *)ice_ptypes_ipv6_il_no_l4;
 785			bitmap_and(params->ptypes, params->ptypes, src,
 786				   ICE_FLOW_PTYPE_MAX);
 787		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 788			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
 789				(const unsigned long *)ice_ptypes_ipv6_il;
 790			bitmap_and(params->ptypes, params->ptypes, src,
 791				   ICE_FLOW_PTYPE_MAX);
 792		}
 793
 794		if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
 795			src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
 796			bitmap_and(params->ptypes, params->ptypes, src,
 797				   ICE_FLOW_PTYPE_MAX);
 798		} else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
 799			src = (const unsigned long *)ice_ptypes_pppoe;
 800			bitmap_and(params->ptypes, params->ptypes, src,
 801				   ICE_FLOW_PTYPE_MAX);
 802		} else {
 803			src = (const unsigned long *)ice_ptypes_pppoe;
 804			bitmap_andnot(params->ptypes, params->ptypes, src,
 805				      ICE_FLOW_PTYPE_MAX);
 806		}
 807
 808		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
 809			src = (const unsigned long *)ice_ptypes_udp_il;
 810			bitmap_and(params->ptypes, params->ptypes, src,
 811				   ICE_FLOW_PTYPE_MAX);
 812		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
 813			bitmap_and(params->ptypes, params->ptypes,
 814				   (const unsigned long *)ice_ptypes_tcp_il,
 815				   ICE_FLOW_PTYPE_MAX);
 816		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 817			src = (const unsigned long *)ice_ptypes_sctp_il;
 818			bitmap_and(params->ptypes, params->ptypes, src,
 819				   ICE_FLOW_PTYPE_MAX);
 820		}
 821
 822		if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
 823			src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
 824				(const unsigned long *)ice_ptypes_icmp_il;
 825			bitmap_and(params->ptypes, params->ptypes, src,
 826				   ICE_FLOW_PTYPE_MAX);
 827		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
 828			if (!i) {
 829				src = (const unsigned long *)ice_ptypes_gre_of;
 830				bitmap_and(params->ptypes, params->ptypes,
 831					   src, ICE_FLOW_PTYPE_MAX);
 832			}
 833		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
 834			src = (const unsigned long *)ice_ptypes_gtpc;
 835			bitmap_and(params->ptypes, params->ptypes, src,
 836				   ICE_FLOW_PTYPE_MAX);
 837		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
 838			src = (const unsigned long *)ice_ptypes_gtpc_tid;
 839			bitmap_and(params->ptypes, params->ptypes, src,
 840				   ICE_FLOW_PTYPE_MAX);
 841		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
 842			src = (const unsigned long *)ice_ptypes_gtpu;
 843			bitmap_and(params->ptypes, params->ptypes, src,
 844				   ICE_FLOW_PTYPE_MAX);
 845
 846			/* Attributes for GTP packet with downlink */
 847			params->attr = ice_attr_gtpu_down;
 848			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
 849		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
 850			src = (const unsigned long *)ice_ptypes_gtpu;
 851			bitmap_and(params->ptypes, params->ptypes, src,
 852				   ICE_FLOW_PTYPE_MAX);
 853
 854			/* Attributes for GTP packet with uplink */
 855			params->attr = ice_attr_gtpu_up;
 856			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
 857		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
 858			src = (const unsigned long *)ice_ptypes_gtpu;
 859			bitmap_and(params->ptypes, params->ptypes, src,
 860				   ICE_FLOW_PTYPE_MAX);
 861
 862			/* Attributes for GTP packet with Extension Header */
 863			params->attr = ice_attr_gtpu_eh;
 864			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
 865		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
 866			src = (const unsigned long *)ice_ptypes_gtpu;
 867			bitmap_and(params->ptypes, params->ptypes, src,
 868				   ICE_FLOW_PTYPE_MAX);
 869		} else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
 870			src = (const unsigned long *)ice_ptypes_l2tpv3;
 871			bitmap_and(params->ptypes, params->ptypes, src,
 872				   ICE_FLOW_PTYPE_MAX);
 873		} else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
 874			src = (const unsigned long *)ice_ptypes_esp;
 875			bitmap_and(params->ptypes, params->ptypes, src,
 876				   ICE_FLOW_PTYPE_MAX);
 877		} else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
 878			src = (const unsigned long *)ice_ptypes_ah;
 879			bitmap_and(params->ptypes, params->ptypes, src,
 880				   ICE_FLOW_PTYPE_MAX);
 881		} else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
 882			src = (const unsigned long *)ice_ptypes_nat_t_esp;
 883			bitmap_and(params->ptypes, params->ptypes, src,
 884				   ICE_FLOW_PTYPE_MAX);
 885		}
 886
 887		if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
 888			if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
 889				src = (const unsigned long *)ice_ptypes_pfcp_node;
 890			else
 891				src = (const unsigned long *)ice_ptypes_pfcp_session;
 892
 893			bitmap_and(params->ptypes, params->ptypes, src,
 894				   ICE_FLOW_PTYPE_MAX);
 895		} else {
 896			src = (const unsigned long *)ice_ptypes_pfcp_node;
 897			bitmap_andnot(params->ptypes, params->ptypes, src,
 898				      ICE_FLOW_PTYPE_MAX);
 899
 900			src = (const unsigned long *)ice_ptypes_pfcp_session;
 901			bitmap_andnot(params->ptypes, params->ptypes, src,
 902				      ICE_FLOW_PTYPE_MAX);
 903		}
 904	}
 905
 906	return 0;
 907}
 908
 909/**
 910 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
 911 * @hw: pointer to the HW struct
 912 * @params: information about the flow to be processed
 913 * @seg: packet segment index of the field to be extracted
 914 * @fld: ID of field to be extracted
 915 * @match: bit field of all fields
 916 *
 917 * This function determines the protocol ID, offset, and size of the given
 918 * field. It then allocates one or more extraction sequence entries for the
 919 * given field, and fill the entries with protocol ID and offset information.
 920 */
 921static int
 922ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
 923		    u8 seg, enum ice_flow_field fld, u64 match)
 924{
 925	enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
 926	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
 927	u8 fv_words = hw->blk[params->blk].es.fvw;
 928	struct ice_flow_fld_info *flds;
 929	u16 cnt, ese_bits, i;
 930	u16 sib_mask = 0;
 931	u16 mask;
 932	u16 off;
 933
 934	flds = params->prof->segs[seg].fields;
 935
 936	switch (fld) {
 937	case ICE_FLOW_FIELD_IDX_ETH_DA:
 938	case ICE_FLOW_FIELD_IDX_ETH_SA:
 939	case ICE_FLOW_FIELD_IDX_S_VLAN:
 940	case ICE_FLOW_FIELD_IDX_C_VLAN:
 941		prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
 942		break;
 943	case ICE_FLOW_FIELD_IDX_ETH_TYPE:
 944		prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
 945		break;
 946	case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
 947		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
 948		break;
 949	case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
 950		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
 951		break;
 952	case ICE_FLOW_FIELD_IDX_IPV4_TTL:
 953	case ICE_FLOW_FIELD_IDX_IPV4_PROT:
 954		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
 955
 956		/* TTL and PROT share the same extraction seq. entry.
 957		 * Each is considered a sibling to the other in terms of sharing
 958		 * the same extraction sequence entry.
 959		 */
 960		if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
 961			sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
 962		else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
 963			sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
 964
 965		/* If the sibling field is also included, that field's
 966		 * mask needs to be included.
 967		 */
 968		if (match & BIT(sib))
 969			sib_mask = ice_flds_info[sib].mask;
 970		break;
 971	case ICE_FLOW_FIELD_IDX_IPV6_TTL:
 972	case ICE_FLOW_FIELD_IDX_IPV6_PROT:
 973		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
 974
 975		/* TTL and PROT share the same extraction seq. entry.
 976		 * Each is considered a sibling to the other in terms of sharing
 977		 * the same extraction sequence entry.
 978		 */
 979		if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
 980			sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
 981		else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
 982			sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
 983
 984		/* If the sibling field is also included, that field's
 985		 * mask needs to be included.
 986		 */
 987		if (match & BIT(sib))
 988			sib_mask = ice_flds_info[sib].mask;
 989		break;
 990	case ICE_FLOW_FIELD_IDX_IPV4_SA:
 991	case ICE_FLOW_FIELD_IDX_IPV4_DA:
 992		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
 993		break;
 994	case ICE_FLOW_FIELD_IDX_IPV6_SA:
 995	case ICE_FLOW_FIELD_IDX_IPV6_DA:
 996		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
 997		break;
 998	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
 999	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1000	case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1001		prot_id = ICE_PROT_TCP_IL;
1002		break;
1003	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1004	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1005		prot_id = ICE_PROT_UDP_IL_OR_S;
1006		break;
1007	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1008	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1009		prot_id = ICE_PROT_SCTP_IL;
1010		break;
1011	case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1012	case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1013	case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1014	case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1015	case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1016	case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1017		/* GTP is accessed through UDP OF protocol */
1018		prot_id = ICE_PROT_UDP_OF;
1019		break;
1020	case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1021		prot_id = ICE_PROT_PPPOE;
1022		break;
1023	case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1024		prot_id = ICE_PROT_UDP_IL_OR_S;
1025		break;
1026	case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1027		prot_id = ICE_PROT_L2TPV3;
1028		break;
1029	case ICE_FLOW_FIELD_IDX_ESP_SPI:
1030		prot_id = ICE_PROT_ESP_F;
1031		break;
1032	case ICE_FLOW_FIELD_IDX_AH_SPI:
1033		prot_id = ICE_PROT_ESP_2;
1034		break;
1035	case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1036		prot_id = ICE_PROT_UDP_IL_OR_S;
1037		break;
1038	case ICE_FLOW_FIELD_IDX_ARP_SIP:
1039	case ICE_FLOW_FIELD_IDX_ARP_DIP:
1040	case ICE_FLOW_FIELD_IDX_ARP_SHA:
1041	case ICE_FLOW_FIELD_IDX_ARP_DHA:
1042	case ICE_FLOW_FIELD_IDX_ARP_OP:
1043		prot_id = ICE_PROT_ARP_OF;
1044		break;
1045	case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1046	case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1047		/* ICMP type and code share the same extraction seq. entry */
1048		prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
1049				ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1050		sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1051			ICE_FLOW_FIELD_IDX_ICMP_CODE :
1052			ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1053		break;
1054	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1055		prot_id = ICE_PROT_GRE_OF;
1056		break;
1057	default:
1058		return -EOPNOTSUPP;
1059	}
1060
1061	/* Each extraction sequence entry is a word in size, and extracts a
1062	 * word-aligned offset from a protocol header.
1063	 */
1064	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1065
1066	flds[fld].xtrct.prot_id = prot_id;
1067	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1068		ICE_FLOW_FV_EXTRACT_SZ;
1069	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1070	flds[fld].xtrct.idx = params->es_cnt;
1071	flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1072
1073	/* Adjust the next field-entry index after accommodating the number of
1074	 * entries this field consumes
1075	 */
1076	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
1077			   ese_bits);
1078
1079	/* Fill in the extraction sequence entries needed for this field */
1080	off = flds[fld].xtrct.off;
1081	mask = flds[fld].xtrct.mask;
1082	for (i = 0; i < cnt; i++) {
1083		/* Only consume an extraction sequence entry if there is no
1084		 * sibling field associated with this field or the sibling entry
1085		 * already extracts the word shared with this field.
1086		 */
1087		if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1088		    flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1089		    flds[sib].xtrct.off != off) {
1090			u8 idx;
1091
1092			/* Make sure the number of extraction sequence required
1093			 * does not exceed the block's capability
1094			 */
1095			if (params->es_cnt >= fv_words)
1096				return -ENOSPC;
1097
1098			/* some blocks require a reversed field vector layout */
1099			if (hw->blk[params->blk].es.reverse)
1100				idx = fv_words - params->es_cnt - 1;
1101			else
1102				idx = params->es_cnt;
1103
1104			params->es[idx].prot_id = prot_id;
1105			params->es[idx].off = off;
1106			params->mask[idx] = mask | sib_mask;
1107			params->es_cnt++;
1108		}
1109
1110		off += ICE_FLOW_FV_EXTRACT_SZ;
1111	}
1112
1113	return 0;
1114}
1115
1116/**
1117 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1118 * @hw: pointer to the HW struct
1119 * @params: information about the flow to be processed
1120 * @seg: index of packet segment whose raw fields are to be extracted
1121 */
1122static int
1123ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1124		     u8 seg)
1125{
1126	u16 fv_words;
1127	u16 hdrs_sz;
1128	u8 i;
1129
1130	if (!params->prof->segs[seg].raws_cnt)
1131		return 0;
1132
1133	if (params->prof->segs[seg].raws_cnt >
1134	    ARRAY_SIZE(params->prof->segs[seg].raws))
1135		return -ENOSPC;
1136
1137	/* Offsets within the segment headers are not supported */
1138	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1139	if (!hdrs_sz)
1140		return -EINVAL;
1141
1142	fv_words = hw->blk[params->blk].es.fvw;
1143
1144	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1145		struct ice_flow_seg_fld_raw *raw;
1146		u16 off, cnt, j;
1147
1148		raw = &params->prof->segs[seg].raws[i];
1149
1150		/* Storing extraction information */
1151		raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1152		raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1153			ICE_FLOW_FV_EXTRACT_SZ;
1154		raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1155			BITS_PER_BYTE;
1156		raw->info.xtrct.idx = params->es_cnt;
1157
1158		/* Determine the number of field vector entries this raw field
1159		 * consumes.
1160		 */
1161		cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
1162				   (raw->info.src.last * BITS_PER_BYTE),
1163				   (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
1164		off = raw->info.xtrct.off;
1165		for (j = 0; j < cnt; j++) {
1166			u16 idx;
1167
1168			/* Make sure the number of extraction sequence required
1169			 * does not exceed the block's capability
1170			 */
1171			if (params->es_cnt >= hw->blk[params->blk].es.count ||
1172			    params->es_cnt >= ICE_MAX_FV_WORDS)
1173				return -ENOSPC;
1174
1175			/* some blocks require a reversed field vector layout */
1176			if (hw->blk[params->blk].es.reverse)
1177				idx = fv_words - params->es_cnt - 1;
1178			else
1179				idx = params->es_cnt;
1180
1181			params->es[idx].prot_id = raw->info.xtrct.prot_id;
1182			params->es[idx].off = off;
1183			params->es_cnt++;
1184			off += ICE_FLOW_FV_EXTRACT_SZ;
1185		}
1186	}
1187
1188	return 0;
1189}
1190
1191/**
1192 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1193 * @hw: pointer to the HW struct
1194 * @params: information about the flow to be processed
1195 *
1196 * This function iterates through all matched fields in the given segments, and
1197 * creates an extraction sequence for the fields.
1198 */
1199static int
1200ice_flow_create_xtrct_seq(struct ice_hw *hw,
1201			  struct ice_flow_prof_params *params)
1202{
1203	struct ice_flow_prof *prof = params->prof;
1204	int status = 0;
1205	u8 i;
1206
1207	for (i = 0; i < prof->segs_cnt; i++) {
1208		u64 match = params->prof->segs[i].match;
1209		enum ice_flow_field j;
1210
1211		for_each_set_bit(j, (unsigned long *)&match,
1212				 ICE_FLOW_FIELD_IDX_MAX) {
1213			status = ice_flow_xtract_fld(hw, params, i, j, match);
 
1214			if (status)
1215				return status;
1216			clear_bit(j, (unsigned long *)&match);
1217		}
1218
1219		/* Process raw matching bytes */
1220		status = ice_flow_xtract_raws(hw, params, i);
1221		if (status)
1222			return status;
1223	}
1224
1225	return status;
1226}
1227
1228/**
1229 * ice_flow_proc_segs - process all packet segments associated with a profile
1230 * @hw: pointer to the HW struct
1231 * @params: information about the flow to be processed
1232 */
1233static int
1234ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1235{
1236	int status;
1237
1238	status = ice_flow_proc_seg_hdrs(params);
1239	if (status)
1240		return status;
1241
1242	status = ice_flow_create_xtrct_seq(hw, params);
1243	if (status)
1244		return status;
1245
1246	switch (params->blk) {
1247	case ICE_BLK_FD:
1248	case ICE_BLK_RSS:
1249		status = 0;
1250		break;
1251	default:
1252		return -EOPNOTSUPP;
1253	}
1254
1255	return status;
1256}
1257
1258#define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
1259#define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
1260#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
1261#define ICE_FLOW_FIND_PROF_CHK_SYMM	0x00000008
1262
1263/**
1264 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1265 * @hw: pointer to the HW struct
1266 * @blk: classification stage
1267 * @dir: flow direction
1268 * @segs: array of one or more packet segments that describe the flow
1269 * @segs_cnt: number of packet segments provided
1270 * @symm: symmetric setting for RSS profiles
1271 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1272 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1273 */
1274static struct ice_flow_prof *
1275ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1276			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1277			 u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds)
1278{
1279	struct ice_flow_prof *p, *prof = NULL;
1280
1281	mutex_lock(&hw->fl_profs_locks[blk]);
1282	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
1283		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1284		    segs_cnt && segs_cnt == p->segs_cnt) {
1285			u8 i;
1286
1287			/* Check for profile-VSI association if specified */
1288			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1289			    ice_is_vsi_valid(hw, vsi_handle) &&
1290			    !test_bit(vsi_handle, p->vsis))
1291				continue;
1292
1293			/* Check for symmetric settings */
1294			if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
1295			    p->symm != symm)
1296				continue;
1297
1298			/* Protocol headers must be checked. Matched fields are
1299			 * checked if specified.
1300			 */
1301			for (i = 0; i < segs_cnt; i++)
1302				if (segs[i].hdrs != p->segs[i].hdrs ||
1303				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1304				     segs[i].match != p->segs[i].match))
1305					break;
1306
1307			/* A match is found if all segments are matched */
1308			if (i == segs_cnt) {
1309				prof = p;
1310				break;
1311			}
1312		}
1313	mutex_unlock(&hw->fl_profs_locks[blk]);
1314
1315	return prof;
1316}
1317
1318/**
1319 * ice_flow_find_prof_id - Look up a profile with given profile ID
1320 * @hw: pointer to the HW struct
1321 * @blk: classification stage
1322 * @prof_id: unique ID to identify this flow profile
1323 */
1324static struct ice_flow_prof *
1325ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1326{
1327	struct ice_flow_prof *p;
1328
1329	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
1330		if (p->id == prof_id)
1331			return p;
1332
1333	return NULL;
1334}
1335
1336/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1337 * ice_flow_rem_entry_sync - Remove a flow entry
1338 * @hw: pointer to the HW struct
1339 * @blk: classification stage
1340 * @entry: flow entry to be removed
1341 */
1342static int
1343ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
1344			struct ice_flow_entry *entry)
1345{
1346	if (!entry)
1347		return -EINVAL;
1348
1349	list_del(&entry->l_entry);
1350
1351	devm_kfree(ice_hw_to_dev(hw), entry);
1352
1353	return 0;
1354}
1355
1356/**
1357 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1358 * @hw: pointer to the HW struct
1359 * @blk: classification stage
1360 * @dir: flow direction
 
1361 * @segs: array of one or more packet segments that describe the flow
1362 * @segs_cnt: number of packet segments provided
1363 * @symm: symmetric setting for RSS profiles
1364 * @prof: stores the returned flow profile added
1365 *
1366 * Assumption: the caller has acquired the lock to the profile list
1367 */
1368static int
1369ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1370		       enum ice_flow_dir dir,
1371		       struct ice_flow_seg_info *segs, u8 segs_cnt,
1372		       bool symm, struct ice_flow_prof **prof)
1373{
1374	struct ice_flow_prof_params *params;
1375	struct ice_prof_id *ids;
1376	int status;
1377	u64 prof_id;
1378	u8 i;
1379
1380	if (!prof)
1381		return -EINVAL;
1382
1383	ids = &hw->blk[blk].prof_id;
1384	prof_id = find_first_zero_bit(ids->id, ids->count);
1385	if (prof_id >= ids->count)
1386		return -ENOSPC;
1387
1388	params = kzalloc(sizeof(*params), GFP_KERNEL);
1389	if (!params)
1390		return -ENOMEM;
1391
1392	params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
1393				    GFP_KERNEL);
1394	if (!params->prof) {
1395		status = -ENOMEM;
1396		goto free_params;
1397	}
1398
1399	/* initialize extraction sequence to all invalid (0xff) */
1400	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1401		params->es[i].prot_id = ICE_PROT_INVALID;
1402		params->es[i].off = ICE_FV_OFFSET_INVAL;
1403	}
1404
1405	params->blk = blk;
1406	params->prof->id = prof_id;
1407	params->prof->dir = dir;
1408	params->prof->segs_cnt = segs_cnt;
1409	params->prof->symm = symm;
1410
1411	/* Make a copy of the segments that need to be persistent in the flow
1412	 * profile instance
1413	 */
1414	for (i = 0; i < segs_cnt; i++)
1415		memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs));
1416
1417	status = ice_flow_proc_segs(hw, params);
1418	if (status) {
1419		ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
 
1420		goto out;
1421	}
1422
1423	/* Add a HW profile for this flow profile */
1424	status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1425			      params->attr, params->attr_cnt, params->es,
1426			      params->mask, symm, true);
1427	if (status) {
1428		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1429		goto out;
1430	}
1431
1432	INIT_LIST_HEAD(&params->prof->entries);
1433	mutex_init(&params->prof->entries_lock);
1434	set_bit(prof_id, ids->id);
1435	*prof = params->prof;
1436
1437out:
1438	if (status)
1439		devm_kfree(ice_hw_to_dev(hw), params->prof);
1440free_params:
1441	kfree(params);
1442
1443	return status;
1444}
1445
1446/**
1447 * ice_flow_rem_prof_sync - remove a flow profile
1448 * @hw: pointer to the hardware structure
1449 * @blk: classification stage
1450 * @prof: pointer to flow profile to remove
1451 *
1452 * Assumption: the caller has acquired the lock to the profile list
1453 */
1454static int
1455ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1456		       struct ice_flow_prof *prof)
1457{
1458	int status;
1459
1460	/* Remove all remaining flow entries before removing the flow profile */
1461	if (!list_empty(&prof->entries)) {
1462		struct ice_flow_entry *e, *t;
1463
1464		mutex_lock(&prof->entries_lock);
1465
1466		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
1467			status = ice_flow_rem_entry_sync(hw, blk, e);
1468			if (status)
1469				break;
1470		}
1471
1472		mutex_unlock(&prof->entries_lock);
1473	}
1474
1475	/* Remove all hardware profiles associated with this flow profile */
1476	status = ice_rem_prof(hw, blk, prof->id);
1477	if (!status) {
1478		clear_bit(prof->id, hw->blk[blk].prof_id.id);
1479		list_del(&prof->l_entry);
1480		mutex_destroy(&prof->entries_lock);
1481		devm_kfree(ice_hw_to_dev(hw), prof);
1482	}
1483
1484	return status;
1485}
1486
1487/**
1488 * ice_flow_assoc_prof - associate a VSI with a flow profile
1489 * @hw: pointer to the hardware structure
1490 * @blk: classification stage
1491 * @prof: pointer to flow profile
1492 * @vsi_handle: software VSI handle
1493 *
1494 * Assumption: the caller has acquired the lock to the profile list
1495 * and the software VSI handle has been validated
1496 */
1497static int
1498ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1499		    struct ice_flow_prof *prof, u16 vsi_handle)
1500{
1501	int status = 0;
1502
1503	if (!test_bit(vsi_handle, prof->vsis)) {
1504		status = ice_add_prof_id_flow(hw, blk,
1505					      ice_get_hw_vsi_num(hw,
1506								 vsi_handle),
1507					      prof->id);
1508		if (!status)
1509			set_bit(vsi_handle, prof->vsis);
1510		else
1511			ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
 
1512				  status);
1513	}
1514
1515	return status;
1516}
1517
1518/**
1519 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1520 * @hw: pointer to the hardware structure
1521 * @blk: classification stage
1522 * @prof: pointer to flow profile
1523 * @vsi_handle: software VSI handle
1524 *
1525 * Assumption: the caller has acquired the lock to the profile list
1526 * and the software VSI handle has been validated
1527 */
1528static int
1529ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1530		       struct ice_flow_prof *prof, u16 vsi_handle)
1531{
1532	int status = 0;
1533
1534	if (test_bit(vsi_handle, prof->vsis)) {
1535		status = ice_rem_prof_id_flow(hw, blk,
1536					      ice_get_hw_vsi_num(hw,
1537								 vsi_handle),
1538					      prof->id);
1539		if (!status)
1540			clear_bit(vsi_handle, prof->vsis);
1541		else
1542			ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
 
1543				  status);
1544	}
1545
1546	return status;
1547}
1548
1549#define FLAG_GTP_EH_PDU_LINK	BIT_ULL(13)
1550#define FLAG_GTP_EH_PDU		BIT_ULL(14)
1551
1552#define HI_BYTE_IN_WORD		GENMASK(15, 8)
1553#define LO_BYTE_IN_WORD		GENMASK(7, 0)
1554
1555#define FLAG_GTPU_MSK	\
1556	(FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
1557#define FLAG_GTPU_UP	\
1558	(FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
1559#define FLAG_GTPU_DW	FLAG_GTP_EH_PDU
1560
1561/**
1562 * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info
1563 * @hw: pointer to the HW struct
1564 * @dest_vsi: dest VSI
1565 * @fdir_vsi: fdir programming VSI
1566 * @prof: stores parsed profile info from raw flow
1567 * @blk: classification blk
1568 *
1569 * Return: 0 on success or negative errno on failure.
1570 */
1571int
1572ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
1573			 struct ice_parser_profile *prof, enum ice_block blk)
1574{
1575	u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
1576	struct ice_flow_prof_params *params __free(kfree);
1577	u8 fv_words = hw->blk[blk].es.fvw;
1578	int status;
1579	int i, idx;
1580
1581	params = kzalloc(sizeof(*params), GFP_KERNEL);
1582	if (!params)
1583		return -ENOMEM;
1584
1585	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1586		params->es[i].prot_id = ICE_PROT_INVALID;
1587		params->es[i].off = ICE_FV_OFFSET_INVAL;
1588	}
1589
1590	for (i = 0; i < prof->fv_num; i++) {
1591		if (hw->blk[blk].es.reverse)
1592			idx = fv_words - i - 1;
1593		else
1594			idx = i;
1595		params->es[idx].prot_id = prof->fv[i].proto_id;
1596		params->es[idx].off = prof->fv[i].offset;
1597		params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
1598				      HI_BYTE_IN_WORD) |
1599				    (((prof->fv[i].msk) >> BITS_PER_BYTE) &
1600				      LO_BYTE_IN_WORD);
1601	}
1602
1603	switch (prof->flags) {
1604	case FLAG_GTPU_DW:
1605		params->attr = ice_attr_gtpu_down;
1606		params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1607		break;
1608	case FLAG_GTPU_UP:
1609		params->attr = ice_attr_gtpu_up;
1610		params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1611		break;
1612	default:
1613		if (prof->flags_msk & FLAG_GTPU_MSK) {
1614			params->attr = ice_attr_gtpu_session;
1615			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1616		}
1617		break;
1618	}
1619
1620	status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
1621			      params->attr, params->attr_cnt,
1622			      params->es, params->mask, false, false);
1623	if (status)
1624		return status;
1625
1626	status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id);
1627	if (status)
1628		ice_rem_prof(hw, blk, id);
1629
1630	return status;
1631}
1632
1633/**
1634 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1635 * @hw: pointer to the HW struct
1636 * @blk: classification stage
1637 * @dir: flow direction
 
1638 * @segs: array of one or more packet segments that describe the flow
1639 * @segs_cnt: number of packet segments provided
1640 * @symm: symmetric setting for RSS profiles
1641 * @prof: stores the returned flow profile added
1642 */
1643int
1644ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1645		  struct ice_flow_seg_info *segs, u8 segs_cnt,
1646		  bool symm, struct ice_flow_prof **prof)
1647{
1648	int status;
1649
1650	if (segs_cnt > ICE_FLOW_SEG_MAX)
1651		return -ENOSPC;
1652
1653	if (!segs_cnt)
1654		return -EINVAL;
1655
1656	if (!segs)
1657		return -EINVAL;
1658
1659	status = ice_flow_val_hdrs(segs, segs_cnt);
1660	if (status)
1661		return status;
1662
1663	mutex_lock(&hw->fl_profs_locks[blk]);
1664
1665	status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
1666					symm, prof);
1667	if (!status)
1668		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
1669
1670	mutex_unlock(&hw->fl_profs_locks[blk]);
1671
1672	return status;
1673}
1674
1675/**
1676 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1677 * @hw: pointer to the HW struct
1678 * @blk: the block for which the flow profile is to be removed
1679 * @prof_id: unique ID of the flow profile to be removed
1680 */
1681int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
 
1682{
1683	struct ice_flow_prof *prof;
1684	int status;
1685
1686	mutex_lock(&hw->fl_profs_locks[blk]);
1687
1688	prof = ice_flow_find_prof_id(hw, blk, prof_id);
1689	if (!prof) {
1690		status = -ENOENT;
1691		goto out;
1692	}
1693
1694	/* prof becomes invalid after the call */
1695	status = ice_flow_rem_prof_sync(hw, blk, prof);
1696
1697out:
1698	mutex_unlock(&hw->fl_profs_locks[blk]);
1699
1700	return status;
1701}
1702
1703/**
1704 * ice_flow_add_entry - Add a flow entry
1705 * @hw: pointer to the HW struct
1706 * @blk: classification stage
1707 * @prof_id: ID of the profile to add a new flow entry to
1708 * @entry_id: unique ID to identify this flow entry
1709 * @vsi_handle: software VSI handle for the flow entry
1710 * @prio: priority of the flow entry
1711 * @data: pointer to a data buffer containing flow entry's match values/masks
1712 * @entry_h: pointer to buffer that receives the new flow entry's handle
1713 */
1714int
1715ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1716		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
1717		   void *data, u64 *entry_h)
1718{
1719	struct ice_flow_entry *e = NULL;
1720	struct ice_flow_prof *prof;
1721	int status;
1722
1723	/* No flow entry data is expected for RSS */
1724	if (!entry_h || (!data && blk != ICE_BLK_RSS))
1725		return -EINVAL;
1726
1727	if (!ice_is_vsi_valid(hw, vsi_handle))
1728		return -EINVAL;
1729
1730	mutex_lock(&hw->fl_profs_locks[blk]);
1731
1732	prof = ice_flow_find_prof_id(hw, blk, prof_id);
1733	if (!prof) {
1734		status = -ENOENT;
1735	} else {
1736		/* Allocate memory for the entry being added and associate
1737		 * the VSI to the found flow profile
1738		 */
1739		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
1740		if (!e)
1741			status = -ENOMEM;
1742		else
1743			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1744	}
1745
1746	mutex_unlock(&hw->fl_profs_locks[blk]);
1747	if (status)
1748		goto out;
1749
1750	e->id = entry_id;
1751	e->vsi_handle = vsi_handle;
1752	e->prof = prof;
1753	e->priority = prio;
1754
1755	switch (blk) {
1756	case ICE_BLK_FD:
1757	case ICE_BLK_RSS:
1758		break;
1759	default:
1760		status = -EOPNOTSUPP;
1761		goto out;
1762	}
1763
1764	mutex_lock(&prof->entries_lock);
1765	list_add(&e->l_entry, &prof->entries);
1766	mutex_unlock(&prof->entries_lock);
1767
1768	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
1769
1770out:
1771	if (status)
 
 
1772		devm_kfree(ice_hw_to_dev(hw), e);
 
1773
1774	return status;
1775}
1776
1777/**
1778 * ice_flow_rem_entry - Remove a flow entry
1779 * @hw: pointer to the HW struct
1780 * @blk: classification stage
1781 * @entry_h: handle to the flow entry to be removed
1782 */
1783int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
 
1784{
1785	struct ice_flow_entry *entry;
1786	struct ice_flow_prof *prof;
1787	int status = 0;
1788
1789	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1790		return -EINVAL;
1791
1792	entry = ICE_FLOW_ENTRY_PTR(entry_h);
1793
1794	/* Retain the pointer to the flow profile as the entry will be freed */
1795	prof = entry->prof;
1796
1797	if (prof) {
1798		mutex_lock(&prof->entries_lock);
1799		status = ice_flow_rem_entry_sync(hw, blk, entry);
1800		mutex_unlock(&prof->entries_lock);
1801	}
1802
1803	return status;
1804}
1805
1806/**
1807 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1808 * @seg: packet segment the field being set belongs to
1809 * @fld: field to be set
1810 * @field_type: type of the field
1811 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1812 *           entry's input buffer
1813 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1814 *            input buffer
1815 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1816 *            entry's input buffer
1817 *
1818 * This helper function stores information of a field being matched, including
1819 * the type of the field and the locations of the value to match, the mask, and
1820 * the upper-bound value in the start of the input buffer for a flow entry.
1821 * This function should only be used for fixed-size data structures.
1822 *
1823 * This function also opportunistically determines the protocol headers to be
1824 * present based on the fields being set. Some fields cannot be used alone to
1825 * determine the protocol headers present. Sometimes, fields for particular
1826 * protocol headers are not matched. In those cases, the protocol headers
1827 * must be explicitly set.
1828 */
1829static void
1830ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1831		     enum ice_flow_fld_match_type field_type, u16 val_loc,
1832		     u16 mask_loc, u16 last_loc)
1833{
1834	u64 bit = BIT_ULL(fld);
1835
1836	seg->match |= bit;
1837	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1838		seg->range |= bit;
1839
1840	seg->fields[fld].type = field_type;
1841	seg->fields[fld].src.val = val_loc;
1842	seg->fields[fld].src.mask = mask_loc;
1843	seg->fields[fld].src.last = last_loc;
1844
1845	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1846}
1847
1848/**
1849 * ice_flow_set_fld - specifies locations of field from entry's input buffer
1850 * @seg: packet segment the field being set belongs to
1851 * @fld: field to be set
1852 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1853 *           entry's input buffer
1854 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1855 *            input buffer
1856 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1857 *            entry's input buffer
1858 * @range: indicate if field being matched is to be in a range
1859 *
1860 * This function specifies the locations, in the form of byte offsets from the
1861 * start of the input buffer for a flow entry, from where the value to match,
1862 * the mask value, and upper value can be extracted. These locations are then
1863 * stored in the flow profile. When adding a flow entry associated with the
1864 * flow profile, these locations will be used to quickly extract the values and
1865 * create the content of a match entry. This function should only be used for
1866 * fixed-size data structures.
1867 */
1868void
1869ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1870		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1871{
1872	enum ice_flow_fld_match_type t = range ?
1873		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1874
1875	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1876}
1877
1878/**
1879 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1880 * @seg: packet segment the field being set belongs to
1881 * @off: offset of the raw field from the beginning of the segment in bytes
1882 * @len: length of the raw pattern to be matched
1883 * @val_loc: location of the value to match from entry's input buffer
1884 * @mask_loc: location of mask value from entry's input buffer
1885 *
1886 * This function specifies the offset of the raw field to be match from the
1887 * beginning of the specified packet segment, and the locations, in the form of
1888 * byte offsets from the start of the input buffer for a flow entry, from where
1889 * the value to match and the mask value to be extracted. These locations are
1890 * then stored in the flow profile. When adding flow entries to the associated
1891 * flow profile, these locations can be used to quickly extract the values to
1892 * create the content of a match entry. This function should only be used for
1893 * fixed-size data structures.
1894 */
1895void
1896ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1897		     u16 val_loc, u16 mask_loc)
1898{
1899	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1900		seg->raws[seg->raws_cnt].off = off;
1901		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1902		seg->raws[seg->raws_cnt].info.src.val = val_loc;
1903		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1904		/* The "last" field is used to store the length of the field */
1905		seg->raws[seg->raws_cnt].info.src.last = len;
1906	}
1907
1908	/* Overflows of "raws" will be handled as an error condition later in
1909	 * the flow when this information is processed.
1910	 */
1911	seg->raws_cnt++;
1912}
1913
1914/**
1915 * ice_flow_rem_vsi_prof - remove VSI from flow profile
1916 * @hw: pointer to the hardware structure
1917 * @vsi_handle: software VSI handle
1918 * @prof_id: unique ID to identify this flow profile
1919 *
1920 * This function removes the flow entries associated to the input
1921 * VSI handle and disassociate the VSI from the flow profile.
1922 */
1923int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
1924{
1925	struct ice_flow_prof *prof;
1926	int status = 0;
1927
1928	if (!ice_is_vsi_valid(hw, vsi_handle))
1929		return -EINVAL;
1930
1931	/* find flow profile pointer with input package block and profile ID */
1932	prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
1933	if (!prof) {
1934		ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n",
1935			  prof_id);
1936		return -ENOENT;
1937	}
1938
1939	/* Remove all remaining flow entries before removing the flow profile */
1940	if (!list_empty(&prof->entries)) {
1941		struct ice_flow_entry *e, *t;
1942
1943		mutex_lock(&prof->entries_lock);
1944		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
1945			if (e->vsi_handle != vsi_handle)
1946				continue;
1947
1948			status = ice_flow_rem_entry_sync(hw, ICE_BLK_FD, e);
1949			if (status)
1950				break;
1951		}
1952		mutex_unlock(&prof->entries_lock);
1953	}
1954	if (status)
1955		return status;
1956
1957	/* disassociate the flow profile from sw VSI handle */
1958	status = ice_flow_disassoc_prof(hw, ICE_BLK_FD, prof, vsi_handle);
1959	if (status)
1960		ice_debug(hw, ICE_DBG_PKG, "ice_flow_disassoc_prof() failed with status=%d\n",
1961			  status);
1962	return status;
1963}
1964
1965#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
1966	(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
1967
1968#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1969	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1970
1971#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1972	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1973
1974#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1975	(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
1976	 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1977	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1978
1979/**
1980 * ice_flow_set_rss_seg_info - setup packet segments for RSS
1981 * @segs: pointer to the flow field segment(s)
1982 * @seg_cnt: segment count
1983 * @cfg: configure parameters
1984 *
1985 * Helper function to extract fields from hash bitmap and use flow
1986 * header value to set flow field segment for further use in flow
1987 * profile entry or removal.
1988 */
1989static int
1990ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
1991			  const struct ice_rss_hash_cfg *cfg)
1992{
1993	struct ice_flow_seg_info *seg;
1994	u64 val;
1995	u16 i;
1996
1997	/* set inner most segment */
1998	seg = &segs[seg_cnt - 1];
1999
2000	for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
2001			 (u16)ICE_FLOW_FIELD_IDX_MAX)
2002		ice_flow_set_fld(seg, (enum ice_flow_field)i,
2003				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
2004				 ICE_FLOW_FLD_OFF_INVAL, false);
2005
2006	ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
2007
2008	/* set outer most header */
2009	if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
2010		segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
2011						    ICE_FLOW_SEG_HDR_IPV_OTHER;
2012	else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
2013		segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
2014						    ICE_FLOW_SEG_HDR_IPV_OTHER;
2015
2016	if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2017	    ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
2018		return -EINVAL;
2019
2020	val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2021	if (val && !is_power_of_2(val))
2022		return -EIO;
2023
2024	val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2025	if (val && !is_power_of_2(val))
2026		return -EIO;
2027
2028	return 0;
2029}
2030
2031/**
2032 * ice_rem_vsi_rss_list - remove VSI from RSS list
2033 * @hw: pointer to the hardware structure
2034 * @vsi_handle: software VSI handle
2035 *
2036 * Remove the VSI from all RSS configurations in the list.
2037 */
2038void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2039{
2040	struct ice_rss_cfg *r, *tmp;
2041
2042	if (list_empty(&hw->rss_list_head))
2043		return;
2044
2045	mutex_lock(&hw->rss_locks);
2046	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
2047		if (test_and_clear_bit(vsi_handle, r->vsis))
2048			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
2049				list_del(&r->l_entry);
2050				devm_kfree(ice_hw_to_dev(hw), r);
2051			}
2052	mutex_unlock(&hw->rss_locks);
2053}
2054
2055/**
2056 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2057 * @hw: pointer to the hardware structure
2058 * @vsi_handle: software VSI handle
2059 *
2060 * This function will iterate through all flow profiles and disassociate
2061 * the VSI from that profile. If the flow profile has no VSIs it will
2062 * be removed.
2063 */
2064int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2065{
2066	const enum ice_block blk = ICE_BLK_RSS;
2067	struct ice_flow_prof *p, *t;
2068	int status = 0;
2069
2070	if (!ice_is_vsi_valid(hw, vsi_handle))
2071		return -EINVAL;
2072
2073	if (list_empty(&hw->fl_profs[blk]))
2074		return 0;
2075
2076	mutex_lock(&hw->rss_locks);
2077	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
2078		if (test_bit(vsi_handle, p->vsis)) {
2079			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
2080			if (status)
2081				break;
2082
2083			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
2084				status = ice_flow_rem_prof(hw, blk, p->id);
2085				if (status)
2086					break;
2087			}
2088		}
2089	mutex_unlock(&hw->rss_locks);
2090
2091	return status;
2092}
2093
2094/**
2095 * ice_get_rss_hdr_type - get a RSS profile's header type
2096 * @prof: RSS flow profile
2097 */
2098static enum ice_rss_cfg_hdr_type
2099ice_get_rss_hdr_type(struct ice_flow_prof *prof)
2100{
2101	if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
2102		return ICE_RSS_OUTER_HEADERS;
2103	} else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
2104		const struct ice_flow_seg_info *s;
2105
2106		s = &prof->segs[ICE_RSS_OUTER_HEADERS];
2107		if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
2108			return ICE_RSS_INNER_HEADERS;
2109		if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
2110			return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
2111		if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
2112			return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
2113	}
2114
2115	return ICE_RSS_ANY_HEADERS;
2116}
2117
2118static bool
2119ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof,
2120		   enum ice_rss_cfg_hdr_type hdr_type)
2121{
2122	return (r->hash.hdr_type == hdr_type &&
2123		r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
2124		r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs);
2125}
2126
2127/**
2128 * ice_rem_rss_list - remove RSS configuration from list
2129 * @hw: pointer to the hardware structure
2130 * @vsi_handle: software VSI handle
2131 * @prof: pointer to flow profile
2132 *
2133 * Assumption: lock has already been acquired for RSS list
2134 */
2135static void
2136ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
2137{
2138	enum ice_rss_cfg_hdr_type hdr_type;
2139	struct ice_rss_cfg *r, *tmp;
2140
2141	/* Search for RSS hash fields associated to the VSI that match the
2142	 * hash configurations associated to the flow profile. If found
2143	 * remove from the RSS entry list of the VSI context and delete entry.
2144	 */
2145	hdr_type = ice_get_rss_hdr_type(prof);
2146	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
2147		if (ice_rss_match_prof(r, prof, hdr_type)) {
 
2148			clear_bit(vsi_handle, r->vsis);
2149			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
2150				list_del(&r->l_entry);
2151				devm_kfree(ice_hw_to_dev(hw), r);
2152			}
2153			return;
2154		}
2155}
2156
2157/**
2158 * ice_add_rss_list - add RSS configuration to list
2159 * @hw: pointer to the hardware structure
2160 * @vsi_handle: software VSI handle
2161 * @prof: pointer to flow profile
2162 *
2163 * Assumption: lock has already been acquired for RSS list
2164 */
2165static int
2166ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
2167{
2168	enum ice_rss_cfg_hdr_type hdr_type;
2169	struct ice_rss_cfg *r, *rss_cfg;
2170
2171	hdr_type = ice_get_rss_hdr_type(prof);
2172	list_for_each_entry(r, &hw->rss_list_head, l_entry)
2173		if (ice_rss_match_prof(r, prof, hdr_type)) {
 
2174			set_bit(vsi_handle, r->vsis);
2175			return 0;
2176		}
2177
2178	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
2179			       GFP_KERNEL);
2180	if (!rss_cfg)
2181		return -ENOMEM;
2182
2183	rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
2184	rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
2185	rss_cfg->hash.hdr_type = hdr_type;
2186	rss_cfg->hash.symm = prof->symm;
2187	set_bit(vsi_handle, rss_cfg->vsis);
2188
2189	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
2190
2191	return 0;
2192}
2193
2194/**
2195 * ice_rss_config_xor_word - set the HSYMM registers for one input set word
2196 * @hw: pointer to the hardware structure
2197 * @prof_id: RSS hardware profile id
2198 * @src: the FV index used by the protocol's source field
2199 * @dst: the FV index used by the protocol's destination field
2200 *
2201 * Write to the HSYMM register with the index of @src FV the value of the @dst
2202 * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst]
2203 * while calculating the RSS input set.
2204 */
2205static void
2206ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
2207{
2208	u32 val, reg, bits_shift;
2209	u8 reg_idx;
2210
2211	reg_idx = src / GLQF_HSYMM_REG_SIZE;
2212	bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3);
2213	val = dst | GLQF_HSYMM_ENABLE_BIT;
2214
2215	reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx));
2216	reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift);
2217	wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg);
2218}
2219
2220/**
2221 * ice_rss_config_xor - set the symmetric registers for a profile's protocol
2222 * @hw: pointer to the hardware structure
2223 * @prof_id: RSS hardware profile id
2224 * @src: the FV index used by the protocol's source field
2225 * @dst: the FV index used by the protocol's destination field
2226 * @len: length of the source/destination fields in words
2227 */
2228static void
2229ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
2230{
2231	int fv_last_word =
2232		ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
2233	int i;
2234
2235	for (i = 0; i < len; i++) {
2236		ice_rss_config_xor_word(hw, prof_id,
2237					/* Yes, field vector in GLQF_HSYMM and
2238					 * GLQF_HINSET is inversed!
2239					 */
2240					fv_last_word - (src + i),
2241					fv_last_word - (dst + i));
2242		ice_rss_config_xor_word(hw, prof_id,
2243					fv_last_word - (dst + i),
2244					fv_last_word - (src + i));
2245	}
2246}
2247
2248/**
2249 * ice_rss_set_symm - set the symmetric settings for an RSS profile
2250 * @hw: pointer to the hardware structure
2251 * @prof: pointer to flow profile
2252 *
2253 * The symmetric hash will result from XORing the protocol's fields with
2254 * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's
2255 * GLQF_HSYMM registers.
2256 */
2257static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
2258{
2259	struct ice_prof_map *map;
2260	u8 prof_id, m;
2261
2262	mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
2263	map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
2264	if (map)
2265		prof_id = map->prof_id;
2266	mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
2267
2268	if (!map)
2269		return;
2270
2271	/* clear to default */
2272	for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++)
2273		wr32(hw, GLQF_HSYMM(prof_id, m), 0);
2274
2275	if (prof->symm) {
2276		struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst;
2277		struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst;
2278		struct ice_flow_seg_xtrct *sctp_src, *sctp_dst;
2279		struct ice_flow_seg_xtrct *tcp_src, *tcp_dst;
2280		struct ice_flow_seg_xtrct *udp_src, *udp_dst;
2281		struct ice_flow_seg_info *seg;
2282
2283		seg = &prof->segs[prof->segs_cnt - 1];
2284
2285		ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
2286		ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
2287
2288		ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
2289		ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
2290
2291		tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
2292		tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
2293
2294		udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
2295		udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
2296
2297		sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
2298		sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
2299
2300		/* xor IPv4 */
2301		if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
2302			ice_rss_config_xor(hw, prof_id,
2303					   ipv4_src->idx, ipv4_dst->idx, 2);
2304
2305		/* xor IPv6 */
2306		if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
2307			ice_rss_config_xor(hw, prof_id,
2308					   ipv6_src->idx, ipv6_dst->idx, 8);
2309
2310		/* xor TCP */
2311		if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
2312			ice_rss_config_xor(hw, prof_id,
2313					   tcp_src->idx, tcp_dst->idx, 1);
2314
2315		/* xor UDP */
2316		if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
2317			ice_rss_config_xor(hw, prof_id,
2318					   udp_src->idx, udp_dst->idx, 1);
2319
2320		/* xor SCTP */
2321		if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
2322			ice_rss_config_xor(hw, prof_id,
2323					   sctp_src->idx, sctp_dst->idx, 1);
2324	}
2325}
2326
2327/**
2328 * ice_add_rss_cfg_sync - add an RSS configuration
2329 * @hw: pointer to the hardware structure
2330 * @vsi_handle: software VSI handle
2331 * @cfg: configure parameters
 
 
2332 *
2333 * Assumption: lock has already been acquired for RSS list
2334 */
2335static int
2336ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
2337		     const struct ice_rss_hash_cfg *cfg)
2338{
2339	const enum ice_block blk = ICE_BLK_RSS;
2340	struct ice_flow_prof *prof = NULL;
2341	struct ice_flow_seg_info *segs;
2342	u8 segs_cnt;
2343	int status;
2344
2345	segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
2346			ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
2347
2348	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
2349	if (!segs)
2350		return -ENOMEM;
2351
2352	/* Construct the packet segment info from the hashed fields */
2353	status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
 
2354	if (status)
2355		goto exit;
2356
2357	/* Search for a flow profile that has matching headers, hash fields,
2358	 * symm and has the input VSI associated to it. If found, no further
2359	 * operations required and exit.
2360	 */
2361	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2362					cfg->symm, vsi_handle,
2363					ICE_FLOW_FIND_PROF_CHK_FLDS |
2364					ICE_FLOW_FIND_PROF_CHK_SYMM |
2365					ICE_FLOW_FIND_PROF_CHK_VSI);
2366	if (prof)
2367		goto exit;
2368
2369	/* Check if a flow profile exists with the same protocol headers and
2370	 * associated with the input VSI. If so disassociate the VSI from
2371	 * this profile. The VSI will be added to a new profile created with
2372	 * the protocol header and new hash field configuration.
2373	 */
2374	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2375					cfg->symm, vsi_handle,
2376					ICE_FLOW_FIND_PROF_CHK_VSI);
2377	if (prof) {
2378		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
2379		if (!status)
2380			ice_rem_rss_list(hw, vsi_handle, prof);
2381		else
2382			goto exit;
2383
2384		/* Remove profile if it has no VSIs associated */
2385		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
2386			status = ice_flow_rem_prof(hw, blk, prof->id);
2387			if (status)
2388				goto exit;
2389		}
2390	}
2391
2392	/* Search for a profile that has the same match fields and symmetric
2393	 * setting. If this exists then associate the VSI to this profile.
2394	 */
2395	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2396					cfg->symm, vsi_handle,
2397					ICE_FLOW_FIND_PROF_CHK_SYMM |
2398					ICE_FLOW_FIND_PROF_CHK_FLDS);
2399	if (prof) {
2400		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2401		if (!status)
2402			status = ice_add_rss_list(hw, vsi_handle, prof);
2403		goto exit;
2404	}
2405
2406	/* Create a new flow profile with packet segment information. */
 
 
2407	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
2408				   segs, segs_cnt, cfg->symm, &prof);
 
 
 
2409	if (status)
2410		goto exit;
2411
2412	prof->symm = cfg->symm;
2413	ice_rss_set_symm(hw, prof);
2414	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2415	/* If association to a new flow profile failed then this profile can
2416	 * be removed.
2417	 */
2418	if (status) {
2419		ice_flow_rem_prof(hw, blk, prof->id);
2420		goto exit;
2421	}
2422
2423	status = ice_add_rss_list(hw, vsi_handle, prof);
2424
2425exit:
2426	kfree(segs);
2427	return status;
2428}
2429
2430/**
2431 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
2432 * @hw: pointer to the hardware structure
2433 * @vsi: VSI to add the RSS configuration to
2434 * @cfg: configure parameters
 
2435 *
2436 * This function will generate a flow profile based on fields associated with
2437 * the input fields to hash on, the flow type and use the VSI number to add
2438 * a flow entry to the profile.
2439 */
2440int
2441ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
2442		const struct ice_rss_hash_cfg *cfg)
2443{
2444	struct ice_rss_hash_cfg local_cfg;
2445	u16 vsi_handle;
2446	int status;
2447
2448	if (!vsi)
2449		return -EINVAL;
2450
2451	vsi_handle = vsi->idx;
2452	if (!ice_is_vsi_valid(hw, vsi_handle) ||
2453	    !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
2454	    cfg->hash_flds == ICE_HASH_INVALID)
2455		return -EINVAL;
2456
2457	mutex_lock(&hw->rss_locks);
2458	local_cfg = *cfg;
2459	if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
2460		status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
2461	} else {
2462		local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2463		status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
2464		if (!status) {
2465			local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
2466			status = ice_add_rss_cfg_sync(hw, vsi_handle,
2467						      &local_cfg);
2468		}
2469	}
2470	mutex_unlock(&hw->rss_locks);
2471
2472	return status;
2473}
2474
2475/**
2476 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
2477 * @hw: pointer to the hardware structure
2478 * @vsi_handle: software VSI handle
2479 * @cfg: configure parameters
2480 *
2481 * Assumption: lock has already been acquired for RSS list
2482 */
2483static int
2484ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
2485		     const struct ice_rss_hash_cfg *cfg)
2486{
2487	const enum ice_block blk = ICE_BLK_RSS;
2488	struct ice_flow_seg_info *segs;
2489	struct ice_flow_prof *prof;
2490	u8 segs_cnt;
2491	int status;
2492
2493	segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
2494			ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
2495	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
2496	if (!segs)
2497		return -ENOMEM;
2498
2499	/* Construct the packet segment info from the hashed fields */
2500	status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
2501	if (status)
2502		goto out;
2503
2504	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2505					cfg->symm, vsi_handle,
2506					ICE_FLOW_FIND_PROF_CHK_FLDS);
2507	if (!prof) {
2508		status = -ENOENT;
2509		goto out;
2510	}
2511
2512	status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
2513	if (status)
2514		goto out;
2515
2516	/* Remove RSS configuration from VSI context before deleting
2517	 * the flow profile.
2518	 */
2519	ice_rem_rss_list(hw, vsi_handle, prof);
2520
2521	if (bitmap_empty(prof->vsis, ICE_MAX_VSI))
2522		status = ice_flow_rem_prof(hw, blk, prof->id);
2523
2524out:
2525	kfree(segs);
2526	return status;
2527}
2528
2529/**
2530 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
2531 * @hw: pointer to the hardware structure
2532 * @vsi_handle: software VSI handle
2533 * @cfg: configure parameters
2534 *
2535 * This function will lookup the flow profile based on the input
2536 * hash field bitmap, iterate through the profile entry list of
2537 * that profile and find entry associated with input VSI to be
2538 * removed. Calls are made to underlying flow apis which will in
2539 * turn build or update buffers for RSS XLT1 section.
2540 */
2541int
2542ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
2543		const struct ice_rss_hash_cfg *cfg)
2544{
2545	struct ice_rss_hash_cfg local_cfg;
2546	int status;
2547
2548	if (!ice_is_vsi_valid(hw, vsi_handle) ||
2549	    !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
2550	    cfg->hash_flds == ICE_HASH_INVALID)
2551		return -EINVAL;
2552
2553	mutex_lock(&hw->rss_locks);
2554	local_cfg = *cfg;
2555	if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
2556		status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
2557	} else {
2558		local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2559		status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
2560		if (!status) {
2561			local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
2562			status = ice_rem_rss_cfg_sync(hw, vsi_handle,
2563						      &local_cfg);
2564		}
2565	}
2566	mutex_unlock(&hw->rss_locks);
2567
2568	return status;
2569}
2570
2571/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
2572 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
2573 * convert its values to their appropriate flow L3, L4 values.
2574 */
2575#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
2576	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
2577	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
2578#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
2579	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
2580	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
2581#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
2582	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
2583	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
2584	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
2585#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
2586	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
2587	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
2588
2589#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
2590	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
2591	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
2592#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
2593	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
2594	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
2595	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
2596#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
2597	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
2598	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
2599#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
2600	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
2601	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
2602
2603/**
2604 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
2605 * @hw: pointer to the hardware structure
2606 * @vsi: VF's VSI
2607 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
2608 *
2609 * This function will take the hash bitmap provided by the AVF driver via a
2610 * message, convert it to ICE-compatible values, and configure RSS flow
2611 * profiles.
2612 */
2613int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
 
2614{
2615	struct ice_rss_hash_cfg hcfg;
2616	u16 vsi_handle;
2617	int status = 0;
2618	u64 hash_flds;
2619
2620	if (!vsi)
2621		return -EINVAL;
2622
2623	vsi_handle = vsi->idx;
2624	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
2625	    !ice_is_vsi_valid(hw, vsi_handle))
2626		return -EINVAL;
2627
2628	/* Make sure no unsupported bits are specified */
2629	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
2630			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
2631		return -EIO;
2632
2633	hash_flds = avf_hash;
2634
2635	/* Always create an L3 RSS configuration for any L4 RSS configuration */
2636	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
2637		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
2638
2639	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
2640		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
2641
2642	/* Create the corresponding RSS configuration for each valid hash bit */
2643	while (hash_flds) {
2644		u64 rss_hash = ICE_HASH_INVALID;
2645
2646		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
2647			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
2648				rss_hash = ICE_FLOW_HASH_IPV4;
2649				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
2650			} else if (hash_flds &
2651				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
2652				rss_hash = ICE_FLOW_HASH_IPV4 |
2653					ICE_FLOW_HASH_TCP_PORT;
2654				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
2655			} else if (hash_flds &
2656				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
2657				rss_hash = ICE_FLOW_HASH_IPV4 |
2658					ICE_FLOW_HASH_UDP_PORT;
2659				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
2660			} else if (hash_flds &
2661				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
2662				rss_hash = ICE_FLOW_HASH_IPV4 |
2663					ICE_FLOW_HASH_SCTP_PORT;
2664				hash_flds &=
2665					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
2666			}
2667		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
2668			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
2669				rss_hash = ICE_FLOW_HASH_IPV6;
2670				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
2671			} else if (hash_flds &
2672				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
2673				rss_hash = ICE_FLOW_HASH_IPV6 |
2674					ICE_FLOW_HASH_TCP_PORT;
2675				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
2676			} else if (hash_flds &
2677				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
2678				rss_hash = ICE_FLOW_HASH_IPV6 |
2679					ICE_FLOW_HASH_UDP_PORT;
2680				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
2681			} else if (hash_flds &
2682				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
2683				rss_hash = ICE_FLOW_HASH_IPV6 |
2684					ICE_FLOW_HASH_SCTP_PORT;
2685				hash_flds &=
2686					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
2687			}
2688		}
2689
2690		if (rss_hash == ICE_HASH_INVALID)
2691			return -EIO;
2692
2693		hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2694		hcfg.hash_flds = rss_hash;
2695		hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
2696		hcfg.symm = false;
2697		status = ice_add_rss_cfg(hw, vsi, &hcfg);
2698		if (status)
2699			break;
2700	}
2701
2702	return status;
2703}
2704
2705static bool rss_cfg_symm_valid(u64 hfld)
2706{
2707	return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^
2708		  !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) ||
2709		 (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^
2710		  !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) ||
2711		 (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^
2712		  !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) ||
2713		 (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^
2714		  !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) ||
2715		 (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^
2716		  !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)));
2717}
2718
2719/**
2720 * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations
2721 * @hw: pointer to the hardware structure
2722 * @vsi: VSI to set/unset Symmetric RSS
2723 * @symm: TRUE to set Symmetric RSS hashing
2724 */
2725int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm)
2726{
2727	struct ice_rss_hash_cfg	local;
2728	struct ice_rss_cfg *r, *tmp;
2729	u16 vsi_handle = vsi->idx;
2730	int status = 0;
2731
2732	if (!ice_is_vsi_valid(hw, vsi_handle))
2733		return -EINVAL;
2734
2735	mutex_lock(&hw->rss_locks);
2736	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) {
2737		if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) {
2738			local = r->hash;
2739			local.symm = symm;
2740			if (symm && !rss_cfg_symm_valid(r->hash.hash_flds))
2741				continue;
2742
2743			status = ice_add_rss_cfg_sync(hw, vsi_handle, &local);
2744			if (status)
2745				break;
2746		}
2747	}
2748	mutex_unlock(&hw->rss_locks);
2749
2750	return status;
2751}
2752
2753/**
2754 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
2755 * @hw: pointer to the hardware structure
2756 * @vsi_handle: software VSI handle
2757 */
2758int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2759{
 
2760	struct ice_rss_cfg *r;
2761	int status = 0;
2762
2763	if (!ice_is_vsi_valid(hw, vsi_handle))
2764		return -EINVAL;
2765
2766	mutex_lock(&hw->rss_locks);
2767	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
2768		if (test_bit(vsi_handle, r->vsis)) {
2769			status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
 
 
 
 
 
 
 
 
 
2770			if (status)
2771				break;
2772		}
2773	}
2774	mutex_unlock(&hw->rss_locks);
2775
2776	return status;
2777}
2778
2779/**
2780 * ice_get_rss_cfg - returns hashed fields for the given header types
2781 * @hw: pointer to the hardware structure
2782 * @vsi_handle: software VSI handle
2783 * @hdrs: protocol header type
2784 * @symm: whether the RSS is symmetric (bool, output)
2785 *
2786 * This function will return the match fields of the first instance of flow
2787 * profile having the given header types and containing input VSI
2788 */
2789u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm)
2790{
2791	u64 rss_hash = ICE_HASH_INVALID;
2792	struct ice_rss_cfg *r;
2793
2794	/* verify if the protocol header is non zero and VSI is valid */
2795	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
2796		return ICE_HASH_INVALID;
2797
2798	mutex_lock(&hw->rss_locks);
2799	list_for_each_entry(r, &hw->rss_list_head, l_entry)
2800		if (test_bit(vsi_handle, r->vsis) &&
2801		    r->hash.addl_hdrs == hdrs) {
2802			rss_hash = r->hash.hash_flds;
2803			*symm = r->hash.symm;
2804			break;
2805		}
2806	mutex_unlock(&hw->rss_locks);
2807
2808	return rss_hash;
2809}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019, Intel Corporation. */
   3
   4#include "ice_common.h"
   5#include "ice_flow.h"
 
   6
   7/* Describe properties of a protocol header field */
   8struct ice_flow_field_info {
   9	enum ice_flow_seg_hdr hdr;
  10	s16 off;	/* Offset from start of a protocol header, in bits */
  11	u16 size;	/* Size of fields in bits */
 
  12};
  13
  14#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
  15	.hdr = _hdr, \
  16	.off = (_offset_bytes) * BITS_PER_BYTE, \
  17	.size = (_size_bytes) * BITS_PER_BYTE, \
 
 
 
 
 
 
 
 
  18}
  19
  20/* Table containing properties of supported protocol header fields */
  21static const
  22struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
 
 
 
 
 
 
 
 
 
 
 
  23	/* IPv4 / IPv6 */
 
 
 
 
 
 
 
 
 
 
 
 
  24	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
  25	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
  26	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
  27	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
  28	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
  29	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
  30	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
  31	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
  32	/* Transport */
  33	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
  34	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
  35	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
  36	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
  37	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
  38	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
  39	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
  40	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
  41	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
  42	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
  43	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
  44	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45	/* GRE */
  46	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
  47	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
  48			  sizeof_field(struct gre_full_hdr, key)),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49};
  50
  51/* Bitmaps indicating relevant packet types for a particular protocol header
  52 *
  53 * Packet types for packets with an Outer/First/Single IPv4 header
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54 */
  55static const u32 ice_ptypes_ipv4_ofos[] = {
  56	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
 
 
  57	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  58	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  59	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  60	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  61	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 
 
 
 
 
 
 
 
 
 
 
 
  62	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  63	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  64};
  65
  66/* Packet types for packets with an Innermost/Last IPv4 header */
  67static const u32 ice_ptypes_ipv4_il[] = {
  68	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
  69	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
  70	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  71	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  72	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  73	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  74	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  75	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  76};
  77
  78/* Packet types for packets with an Outer/First/Single IPv6 header */
 
 
  79static const u32 ice_ptypes_ipv6_ofos[] = {
  80	0x00000000, 0x00000000, 0x77000000, 0x10002000,
 
 
 
 
 
  81	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  82	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 
 
 
 
 
 
 
 
 
  83	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  84	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  85	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  86	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  87	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  88};
  89
  90/* Packet types for packets with an Innermost/Last IPv6 header */
  91static const u32 ice_ptypes_ipv6_il[] = {
  92	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
  93	0x00000770, 0x00000000, 0x00000000, 0x00000000,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  95	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  96	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  97	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  98	0x00000000, 0x00000000, 0x00000000, 0x00000000,
  99	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 100};
 101
 102/* UDP Packet types for non-tunneled packets or tunneled
 103 * packets with inner UDP.
 104 */
 105static const u32 ice_ptypes_udp_il[] = {
 106	0x81000000, 0x20204040, 0x04000010, 0x80810102,
 107	0x00000040, 0x00000000, 0x00000000, 0x00000000,
 108	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 109	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 110	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 111	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 112	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 113	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 114};
 115
 116/* Packet types for packets with an Innermost/Last TCP header */
 117static const u32 ice_ptypes_tcp_il[] = {
 118	0x04000000, 0x80810102, 0x10000040, 0x02040408,
 119	0x00000102, 0x00000000, 0x00000000, 0x00000000,
 120	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 121	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 122	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 123	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 124	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 125	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 126};
 127
 128/* Packet types for packets with an Innermost/Last SCTP header */
 129static const u32 ice_ptypes_sctp_il[] = {
 130	0x08000000, 0x01020204, 0x20000081, 0x04080810,
 131	0x00000204, 0x00000000, 0x00000000, 0x00000000,
 
 132	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 133	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 134	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 135	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 136	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 138};
 139
 140/* Packet types for packets with an Outermost/First GRE header */
 141static const u32 ice_ptypes_gre_of[] = {
 142	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
 143	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
 144	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 145	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 146	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 147	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 148	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 149	0x00000000, 0x00000000, 0x00000000, 0x00000000,
 150};
 151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152/* Manage parameters and info. used during the creation of a flow profile */
 153struct ice_flow_prof_params {
 154	enum ice_block blk;
 155	u16 entry_length; /* # of bytes formatted entry will require */
 156	u8 es_cnt;
 157	struct ice_flow_prof *prof;
 158
 159	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
 160	 * This will give us the direction flags.
 161	 */
 162	struct ice_fv_word es[ICE_MAX_FV_WORDS];
 
 
 
 
 
 163	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
 164};
 165
 
 
 
 
 
 
 
 166#define ICE_FLOW_SEG_HDRS_L3_MASK	\
 167	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
 168#define ICE_FLOW_SEG_HDRS_L4_MASK	\
 
 
 
 
 169	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
 170
 171/**
 172 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
 173 * @segs: array of one or more packet segments that describe the flow
 174 * @segs_cnt: number of packet segments provided
 175 */
 176static enum ice_status
 177ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
 178{
 179	u8 i;
 180
 181	for (i = 0; i < segs_cnt; i++) {
 182		/* Multiple L3 headers */
 183		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
 184		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
 185			return ICE_ERR_PARAM;
 186
 187		/* Multiple L4 headers */
 188		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
 189		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
 190			return ICE_ERR_PARAM;
 191	}
 192
 193	return 0;
 194}
 195
 196/* Sizes of fixed known protocol headers without header options */
 197#define ICE_FLOW_PROT_HDR_SZ_MAC	14
 
 198#define ICE_FLOW_PROT_HDR_SZ_IPV4	20
 199#define ICE_FLOW_PROT_HDR_SZ_IPV6	40
 
 
 200#define ICE_FLOW_PROT_HDR_SZ_TCP	20
 201#define ICE_FLOW_PROT_HDR_SZ_UDP	8
 202#define ICE_FLOW_PROT_HDR_SZ_SCTP	12
 203
 204/**
 205 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
 206 * @params: information about the flow to be processed
 207 * @seg: index of packet segment whose header size is to be determined
 208 */
 209static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
 210{
 211	u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
 
 
 
 
 212
 213	/* L3 headers */
 214	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
 215		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
 216	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
 217		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
 
 
 
 
 
 218
 219	/* L4 headers */
 220	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
 
 
 221		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
 222	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
 223		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
 224	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
 225		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
 226
 227	return sz;
 228}
 229
 230/**
 231 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
 232 * @params: information about the flow to be processed
 233 *
 234 * This function identifies the packet types associated with the protocol
 235 * headers being present in packet segments of the specified flow profile.
 236 */
 237static enum ice_status
 238ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
 239{
 240	struct ice_flow_prof *prof;
 241	u8 i;
 242
 243	memset(params->ptypes, 0xff, sizeof(params->ptypes));
 244
 245	prof = params->prof;
 246
 247	for (i = 0; i < params->prof->segs_cnt; i++) {
 248		const unsigned long *src;
 249		u32 hdrs;
 250
 251		hdrs = prof->segs[i].hdrs;
 252
 253		if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
 255				(const unsigned long *)ice_ptypes_ipv4_il;
 256			bitmap_and(params->ptypes, params->ptypes, src,
 257				   ICE_FLOW_PTYPE_MAX);
 
 
 
 
 
 
 258		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 259			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
 260				(const unsigned long *)ice_ptypes_ipv6_il;
 261			bitmap_and(params->ptypes, params->ptypes, src,
 262				   ICE_FLOW_PTYPE_MAX);
 263		}
 264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 265		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
 266			src = (const unsigned long *)ice_ptypes_udp_il;
 267			bitmap_and(params->ptypes, params->ptypes, src,
 268				   ICE_FLOW_PTYPE_MAX);
 269		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
 270			bitmap_and(params->ptypes, params->ptypes,
 271				   (const unsigned long *)ice_ptypes_tcp_il,
 272				   ICE_FLOW_PTYPE_MAX);
 273		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 274			src = (const unsigned long *)ice_ptypes_sctp_il;
 275			bitmap_and(params->ptypes, params->ptypes, src,
 276				   ICE_FLOW_PTYPE_MAX);
 
 
 
 
 
 
 
 277		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
 278			if (!i) {
 279				src = (const unsigned long *)ice_ptypes_gre_of;
 280				bitmap_and(params->ptypes, params->ptypes,
 281					   src, ICE_FLOW_PTYPE_MAX);
 282			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283		}
 284	}
 285
 286	return 0;
 287}
 288
 289/**
 290 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
 291 * @hw: pointer to the HW struct
 292 * @params: information about the flow to be processed
 293 * @seg: packet segment index of the field to be extracted
 294 * @fld: ID of field to be extracted
 
 295 *
 296 * This function determines the protocol ID, offset, and size of the given
 297 * field. It then allocates one or more extraction sequence entries for the
 298 * given field, and fill the entries with protocol ID and offset information.
 299 */
 300static enum ice_status
 301ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
 302		    u8 seg, enum ice_flow_field fld)
 303{
 
 304	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
 305	u8 fv_words = hw->blk[params->blk].es.fvw;
 306	struct ice_flow_fld_info *flds;
 307	u16 cnt, ese_bits, i;
 
 
 308	u16 off;
 309
 310	flds = params->prof->segs[seg].fields;
 311
 312	switch (fld) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313	case ICE_FLOW_FIELD_IDX_IPV4_SA:
 314	case ICE_FLOW_FIELD_IDX_IPV4_DA:
 315		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
 316		break;
 317	case ICE_FLOW_FIELD_IDX_IPV6_SA:
 318	case ICE_FLOW_FIELD_IDX_IPV6_DA:
 319		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
 320		break;
 321	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
 322	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
 
 323		prot_id = ICE_PROT_TCP_IL;
 324		break;
 325	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
 326	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
 327		prot_id = ICE_PROT_UDP_IL_OR_S;
 328		break;
 329	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
 330	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
 331		prot_id = ICE_PROT_SCTP_IL;
 332		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
 334		prot_id = ICE_PROT_GRE_OF;
 335		break;
 336	default:
 337		return ICE_ERR_NOT_IMPL;
 338	}
 339
 340	/* Each extraction sequence entry is a word in size, and extracts a
 341	 * word-aligned offset from a protocol header.
 342	 */
 343	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
 344
 345	flds[fld].xtrct.prot_id = prot_id;
 346	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
 347		ICE_FLOW_FV_EXTRACT_SZ;
 348	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
 349	flds[fld].xtrct.idx = params->es_cnt;
 
 350
 351	/* Adjust the next field-entry index after accommodating the number of
 352	 * entries this field consumes
 353	 */
 354	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
 355			   ese_bits);
 356
 357	/* Fill in the extraction sequence entries needed for this field */
 358	off = flds[fld].xtrct.off;
 
 359	for (i = 0; i < cnt; i++) {
 360		u8 idx;
 
 
 
 
 
 
 
 361
 362		/* Make sure the number of extraction sequence required
 363		 * does not exceed the block's capability
 364		 */
 365		if (params->es_cnt >= fv_words)
 366			return ICE_ERR_MAX_LIMIT;
 367
 368		/* some blocks require a reversed field vector layout */
 369		if (hw->blk[params->blk].es.reverse)
 370			idx = fv_words - params->es_cnt - 1;
 371		else
 372			idx = params->es_cnt;
 373
 374		params->es[idx].prot_id = prot_id;
 375		params->es[idx].off = off;
 376		params->es_cnt++;
 
 
 377
 378		off += ICE_FLOW_FV_EXTRACT_SZ;
 379	}
 380
 381	return 0;
 382}
 383
 384/**
 385 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
 386 * @hw: pointer to the HW struct
 387 * @params: information about the flow to be processed
 388 * @seg: index of packet segment whose raw fields are to be be extracted
 389 */
 390static enum ice_status
 391ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
 392		     u8 seg)
 393{
 394	u16 fv_words;
 395	u16 hdrs_sz;
 396	u8 i;
 397
 398	if (!params->prof->segs[seg].raws_cnt)
 399		return 0;
 400
 401	if (params->prof->segs[seg].raws_cnt >
 402	    ARRAY_SIZE(params->prof->segs[seg].raws))
 403		return ICE_ERR_MAX_LIMIT;
 404
 405	/* Offsets within the segment headers are not supported */
 406	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
 407	if (!hdrs_sz)
 408		return ICE_ERR_PARAM;
 409
 410	fv_words = hw->blk[params->blk].es.fvw;
 411
 412	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
 413		struct ice_flow_seg_fld_raw *raw;
 414		u16 off, cnt, j;
 415
 416		raw = &params->prof->segs[seg].raws[i];
 417
 418		/* Storing extraction information */
 419		raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
 420		raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
 421			ICE_FLOW_FV_EXTRACT_SZ;
 422		raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
 423			BITS_PER_BYTE;
 424		raw->info.xtrct.idx = params->es_cnt;
 425
 426		/* Determine the number of field vector entries this raw field
 427		 * consumes.
 428		 */
 429		cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
 430				   (raw->info.src.last * BITS_PER_BYTE),
 431				   (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
 432		off = raw->info.xtrct.off;
 433		for (j = 0; j < cnt; j++) {
 434			u16 idx;
 435
 436			/* Make sure the number of extraction sequence required
 437			 * does not exceed the block's capability
 438			 */
 439			if (params->es_cnt >= hw->blk[params->blk].es.count ||
 440			    params->es_cnt >= ICE_MAX_FV_WORDS)
 441				return ICE_ERR_MAX_LIMIT;
 442
 443			/* some blocks require a reversed field vector layout */
 444			if (hw->blk[params->blk].es.reverse)
 445				idx = fv_words - params->es_cnt - 1;
 446			else
 447				idx = params->es_cnt;
 448
 449			params->es[idx].prot_id = raw->info.xtrct.prot_id;
 450			params->es[idx].off = off;
 451			params->es_cnt++;
 452			off += ICE_FLOW_FV_EXTRACT_SZ;
 453		}
 454	}
 455
 456	return 0;
 457}
 458
 459/**
 460 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
 461 * @hw: pointer to the HW struct
 462 * @params: information about the flow to be processed
 463 *
 464 * This function iterates through all matched fields in the given segments, and
 465 * creates an extraction sequence for the fields.
 466 */
 467static enum ice_status
 468ice_flow_create_xtrct_seq(struct ice_hw *hw,
 469			  struct ice_flow_prof_params *params)
 470{
 471	struct ice_flow_prof *prof = params->prof;
 472	enum ice_status status = 0;
 473	u8 i;
 474
 475	for (i = 0; i < prof->segs_cnt; i++) {
 476		u8 j;
 
 477
 478		for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
 479				 ICE_FLOW_FIELD_IDX_MAX) {
 480			status = ice_flow_xtract_fld(hw, params, i,
 481						     (enum ice_flow_field)j);
 482			if (status)
 483				return status;
 
 484		}
 485
 486		/* Process raw matching bytes */
 487		status = ice_flow_xtract_raws(hw, params, i);
 488		if (status)
 489			return status;
 490	}
 491
 492	return status;
 493}
 494
 495/**
 496 * ice_flow_proc_segs - process all packet segments associated with a profile
 497 * @hw: pointer to the HW struct
 498 * @params: information about the flow to be processed
 499 */
 500static enum ice_status
 501ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
 502{
 503	enum ice_status status;
 504
 505	status = ice_flow_proc_seg_hdrs(params);
 506	if (status)
 507		return status;
 508
 509	status = ice_flow_create_xtrct_seq(hw, params);
 510	if (status)
 511		return status;
 512
 513	switch (params->blk) {
 514	case ICE_BLK_FD:
 515	case ICE_BLK_RSS:
 516		status = 0;
 517		break;
 518	default:
 519		return ICE_ERR_NOT_IMPL;
 520	}
 521
 522	return status;
 523}
 524
 525#define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
 526#define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
 527#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
 
 528
 529/**
 530 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
 531 * @hw: pointer to the HW struct
 532 * @blk: classification stage
 533 * @dir: flow direction
 534 * @segs: array of one or more packet segments that describe the flow
 535 * @segs_cnt: number of packet segments provided
 
 536 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
 537 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
 538 */
 539static struct ice_flow_prof *
 540ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
 541			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
 542			 u8 segs_cnt, u16 vsi_handle, u32 conds)
 543{
 544	struct ice_flow_prof *p, *prof = NULL;
 545
 546	mutex_lock(&hw->fl_profs_locks[blk]);
 547	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
 548		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
 549		    segs_cnt && segs_cnt == p->segs_cnt) {
 550			u8 i;
 551
 552			/* Check for profile-VSI association if specified */
 553			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
 554			    ice_is_vsi_valid(hw, vsi_handle) &&
 555			    !test_bit(vsi_handle, p->vsis))
 556				continue;
 557
 
 
 
 
 
 558			/* Protocol headers must be checked. Matched fields are
 559			 * checked if specified.
 560			 */
 561			for (i = 0; i < segs_cnt; i++)
 562				if (segs[i].hdrs != p->segs[i].hdrs ||
 563				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
 564				     segs[i].match != p->segs[i].match))
 565					break;
 566
 567			/* A match is found if all segments are matched */
 568			if (i == segs_cnt) {
 569				prof = p;
 570				break;
 571			}
 572		}
 573	mutex_unlock(&hw->fl_profs_locks[blk]);
 574
 575	return prof;
 576}
 577
 578/**
 579 * ice_flow_find_prof_id - Look up a profile with given profile ID
 580 * @hw: pointer to the HW struct
 581 * @blk: classification stage
 582 * @prof_id: unique ID to identify this flow profile
 583 */
 584static struct ice_flow_prof *
 585ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
 586{
 587	struct ice_flow_prof *p;
 588
 589	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
 590		if (p->id == prof_id)
 591			return p;
 592
 593	return NULL;
 594}
 595
 596/**
 597 * ice_dealloc_flow_entry - Deallocate flow entry memory
 598 * @hw: pointer to the HW struct
 599 * @entry: flow entry to be removed
 600 */
 601static void
 602ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
 603{
 604	if (!entry)
 605		return;
 606
 607	if (entry->entry)
 608		devm_kfree(ice_hw_to_dev(hw), entry->entry);
 609
 610	devm_kfree(ice_hw_to_dev(hw), entry);
 611}
 612
 613/**
 614 * ice_flow_rem_entry_sync - Remove a flow entry
 615 * @hw: pointer to the HW struct
 616 * @blk: classification stage
 617 * @entry: flow entry to be removed
 618 */
 619static enum ice_status
 620ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
 621			struct ice_flow_entry *entry)
 622{
 623	if (!entry)
 624		return ICE_ERR_BAD_PTR;
 625
 626	list_del(&entry->l_entry);
 627
 628	ice_dealloc_flow_entry(hw, entry);
 629
 630	return 0;
 631}
 632
 633/**
 634 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
 635 * @hw: pointer to the HW struct
 636 * @blk: classification stage
 637 * @dir: flow direction
 638 * @prof_id: unique ID to identify this flow profile
 639 * @segs: array of one or more packet segments that describe the flow
 640 * @segs_cnt: number of packet segments provided
 
 641 * @prof: stores the returned flow profile added
 642 *
 643 * Assumption: the caller has acquired the lock to the profile list
 644 */
 645static enum ice_status
 646ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
 647		       enum ice_flow_dir dir, u64 prof_id,
 648		       struct ice_flow_seg_info *segs, u8 segs_cnt,
 649		       struct ice_flow_prof **prof)
 650{
 651	struct ice_flow_prof_params params;
 652	enum ice_status status;
 
 
 653	u8 i;
 654
 655	if (!prof)
 656		return ICE_ERR_BAD_PTR;
 657
 658	memset(&params, 0, sizeof(params));
 659	params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
 660				   GFP_KERNEL);
 661	if (!params.prof)
 662		return ICE_ERR_NO_MEMORY;
 
 
 
 
 
 
 
 
 
 
 663
 664	/* initialize extraction sequence to all invalid (0xff) */
 665	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
 666		params.es[i].prot_id = ICE_PROT_INVALID;
 667		params.es[i].off = ICE_FV_OFFSET_INVAL;
 668	}
 669
 670	params.blk = blk;
 671	params.prof->id = prof_id;
 672	params.prof->dir = dir;
 673	params.prof->segs_cnt = segs_cnt;
 
 674
 675	/* Make a copy of the segments that need to be persistent in the flow
 676	 * profile instance
 677	 */
 678	for (i = 0; i < segs_cnt; i++)
 679		memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
 680
 681	status = ice_flow_proc_segs(hw, &params);
 682	if (status) {
 683		ice_debug(hw, ICE_DBG_FLOW,
 684			  "Error processing a flow's packet segments\n");
 685		goto out;
 686	}
 687
 688	/* Add a HW profile for this flow profile */
 689	status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
 
 
 690	if (status) {
 691		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
 692		goto out;
 693	}
 694
 695	INIT_LIST_HEAD(&params.prof->entries);
 696	mutex_init(&params.prof->entries_lock);
 697	*prof = params.prof;
 
 698
 699out:
 700	if (status)
 701		devm_kfree(ice_hw_to_dev(hw), params.prof);
 
 
 702
 703	return status;
 704}
 705
 706/**
 707 * ice_flow_rem_prof_sync - remove a flow profile
 708 * @hw: pointer to the hardware structure
 709 * @blk: classification stage
 710 * @prof: pointer to flow profile to remove
 711 *
 712 * Assumption: the caller has acquired the lock to the profile list
 713 */
 714static enum ice_status
 715ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
 716		       struct ice_flow_prof *prof)
 717{
 718	enum ice_status status;
 719
 720	/* Remove all remaining flow entries before removing the flow profile */
 721	if (!list_empty(&prof->entries)) {
 722		struct ice_flow_entry *e, *t;
 723
 724		mutex_lock(&prof->entries_lock);
 725
 726		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
 727			status = ice_flow_rem_entry_sync(hw, blk, e);
 728			if (status)
 729				break;
 730		}
 731
 732		mutex_unlock(&prof->entries_lock);
 733	}
 734
 735	/* Remove all hardware profiles associated with this flow profile */
 736	status = ice_rem_prof(hw, blk, prof->id);
 737	if (!status) {
 
 738		list_del(&prof->l_entry);
 739		mutex_destroy(&prof->entries_lock);
 740		devm_kfree(ice_hw_to_dev(hw), prof);
 741	}
 742
 743	return status;
 744}
 745
 746/**
 747 * ice_flow_assoc_prof - associate a VSI with a flow profile
 748 * @hw: pointer to the hardware structure
 749 * @blk: classification stage
 750 * @prof: pointer to flow profile
 751 * @vsi_handle: software VSI handle
 752 *
 753 * Assumption: the caller has acquired the lock to the profile list
 754 * and the software VSI handle has been validated
 755 */
 756static enum ice_status
 757ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
 758		    struct ice_flow_prof *prof, u16 vsi_handle)
 759{
 760	enum ice_status status = 0;
 761
 762	if (!test_bit(vsi_handle, prof->vsis)) {
 763		status = ice_add_prof_id_flow(hw, blk,
 764					      ice_get_hw_vsi_num(hw,
 765								 vsi_handle),
 766					      prof->id);
 767		if (!status)
 768			set_bit(vsi_handle, prof->vsis);
 769		else
 770			ice_debug(hw, ICE_DBG_FLOW,
 771				  "HW profile add failed, %d\n",
 772				  status);
 773	}
 774
 775	return status;
 776}
 777
 778/**
 779 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
 780 * @hw: pointer to the hardware structure
 781 * @blk: classification stage
 782 * @prof: pointer to flow profile
 783 * @vsi_handle: software VSI handle
 784 *
 785 * Assumption: the caller has acquired the lock to the profile list
 786 * and the software VSI handle has been validated
 787 */
 788static enum ice_status
 789ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
 790		       struct ice_flow_prof *prof, u16 vsi_handle)
 791{
 792	enum ice_status status = 0;
 793
 794	if (test_bit(vsi_handle, prof->vsis)) {
 795		status = ice_rem_prof_id_flow(hw, blk,
 796					      ice_get_hw_vsi_num(hw,
 797								 vsi_handle),
 798					      prof->id);
 799		if (!status)
 800			clear_bit(vsi_handle, prof->vsis);
 801		else
 802			ice_debug(hw, ICE_DBG_FLOW,
 803				  "HW profile remove failed, %d\n",
 804				  status);
 805	}
 806
 807	return status;
 808}
 809
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810/**
 811 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
 812 * @hw: pointer to the HW struct
 813 * @blk: classification stage
 814 * @dir: flow direction
 815 * @prof_id: unique ID to identify this flow profile
 816 * @segs: array of one or more packet segments that describe the flow
 817 * @segs_cnt: number of packet segments provided
 
 818 * @prof: stores the returned flow profile added
 819 */
 820enum ice_status
 821ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
 822		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
 823		  struct ice_flow_prof **prof)
 824{
 825	enum ice_status status;
 826
 827	if (segs_cnt > ICE_FLOW_SEG_MAX)
 828		return ICE_ERR_MAX_LIMIT;
 829
 830	if (!segs_cnt)
 831		return ICE_ERR_PARAM;
 832
 833	if (!segs)
 834		return ICE_ERR_BAD_PTR;
 835
 836	status = ice_flow_val_hdrs(segs, segs_cnt);
 837	if (status)
 838		return status;
 839
 840	mutex_lock(&hw->fl_profs_locks[blk]);
 841
 842	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
 843					prof);
 844	if (!status)
 845		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
 846
 847	mutex_unlock(&hw->fl_profs_locks[blk]);
 848
 849	return status;
 850}
 851
 852/**
 853 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
 854 * @hw: pointer to the HW struct
 855 * @blk: the block for which the flow profile is to be removed
 856 * @prof_id: unique ID of the flow profile to be removed
 857 */
 858enum ice_status
 859ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
 860{
 861	struct ice_flow_prof *prof;
 862	enum ice_status status;
 863
 864	mutex_lock(&hw->fl_profs_locks[blk]);
 865
 866	prof = ice_flow_find_prof_id(hw, blk, prof_id);
 867	if (!prof) {
 868		status = ICE_ERR_DOES_NOT_EXIST;
 869		goto out;
 870	}
 871
 872	/* prof becomes invalid after the call */
 873	status = ice_flow_rem_prof_sync(hw, blk, prof);
 874
 875out:
 876	mutex_unlock(&hw->fl_profs_locks[blk]);
 877
 878	return status;
 879}
 880
 881/**
 882 * ice_flow_add_entry - Add a flow entry
 883 * @hw: pointer to the HW struct
 884 * @blk: classification stage
 885 * @prof_id: ID of the profile to add a new flow entry to
 886 * @entry_id: unique ID to identify this flow entry
 887 * @vsi_handle: software VSI handle for the flow entry
 888 * @prio: priority of the flow entry
 889 * @data: pointer to a data buffer containing flow entry's match values/masks
 890 * @entry_h: pointer to buffer that receives the new flow entry's handle
 891 */
 892enum ice_status
 893ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
 894		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
 895		   void *data, u64 *entry_h)
 896{
 897	struct ice_flow_entry *e = NULL;
 898	struct ice_flow_prof *prof;
 899	enum ice_status status;
 900
 901	/* No flow entry data is expected for RSS */
 902	if (!entry_h || (!data && blk != ICE_BLK_RSS))
 903		return ICE_ERR_BAD_PTR;
 904
 905	if (!ice_is_vsi_valid(hw, vsi_handle))
 906		return ICE_ERR_PARAM;
 907
 908	mutex_lock(&hw->fl_profs_locks[blk]);
 909
 910	prof = ice_flow_find_prof_id(hw, blk, prof_id);
 911	if (!prof) {
 912		status = ICE_ERR_DOES_NOT_EXIST;
 913	} else {
 914		/* Allocate memory for the entry being added and associate
 915		 * the VSI to the found flow profile
 916		 */
 917		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
 918		if (!e)
 919			status = ICE_ERR_NO_MEMORY;
 920		else
 921			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
 922	}
 923
 924	mutex_unlock(&hw->fl_profs_locks[blk]);
 925	if (status)
 926		goto out;
 927
 928	e->id = entry_id;
 929	e->vsi_handle = vsi_handle;
 930	e->prof = prof;
 931	e->priority = prio;
 932
 933	switch (blk) {
 934	case ICE_BLK_FD:
 935	case ICE_BLK_RSS:
 936		break;
 937	default:
 938		status = ICE_ERR_NOT_IMPL;
 939		goto out;
 940	}
 941
 942	mutex_lock(&prof->entries_lock);
 943	list_add(&e->l_entry, &prof->entries);
 944	mutex_unlock(&prof->entries_lock);
 945
 946	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
 947
 948out:
 949	if (status && e) {
 950		if (e->entry)
 951			devm_kfree(ice_hw_to_dev(hw), e->entry);
 952		devm_kfree(ice_hw_to_dev(hw), e);
 953	}
 954
 955	return status;
 956}
 957
 958/**
 959 * ice_flow_rem_entry - Remove a flow entry
 960 * @hw: pointer to the HW struct
 961 * @blk: classification stage
 962 * @entry_h: handle to the flow entry to be removed
 963 */
 964enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
 965				   u64 entry_h)
 966{
 967	struct ice_flow_entry *entry;
 968	struct ice_flow_prof *prof;
 969	enum ice_status status = 0;
 970
 971	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
 972		return ICE_ERR_PARAM;
 973
 974	entry = ICE_FLOW_ENTRY_PTR(entry_h);
 975
 976	/* Retain the pointer to the flow profile as the entry will be freed */
 977	prof = entry->prof;
 978
 979	if (prof) {
 980		mutex_lock(&prof->entries_lock);
 981		status = ice_flow_rem_entry_sync(hw, blk, entry);
 982		mutex_unlock(&prof->entries_lock);
 983	}
 984
 985	return status;
 986}
 987
 988/**
 989 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
 990 * @seg: packet segment the field being set belongs to
 991 * @fld: field to be set
 992 * @field_type: type of the field
 993 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
 994 *           entry's input buffer
 995 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
 996 *            input buffer
 997 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
 998 *            entry's input buffer
 999 *
1000 * This helper function stores information of a field being matched, including
1001 * the type of the field and the locations of the value to match, the mask, and
1002 * and the upper-bound value in the start of the input buffer for a flow entry.
1003 * This function should only be used for fixed-size data structures.
1004 *
1005 * This function also opportunistically determines the protocol headers to be
1006 * present based on the fields being set. Some fields cannot be used alone to
1007 * determine the protocol headers present. Sometimes, fields for particular
1008 * protocol headers are not matched. In those cases, the protocol headers
1009 * must be explicitly set.
1010 */
1011static void
1012ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1013		     enum ice_flow_fld_match_type field_type, u16 val_loc,
1014		     u16 mask_loc, u16 last_loc)
1015{
1016	u64 bit = BIT_ULL(fld);
1017
1018	seg->match |= bit;
1019	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1020		seg->range |= bit;
1021
1022	seg->fields[fld].type = field_type;
1023	seg->fields[fld].src.val = val_loc;
1024	seg->fields[fld].src.mask = mask_loc;
1025	seg->fields[fld].src.last = last_loc;
1026
1027	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1028}
1029
1030/**
1031 * ice_flow_set_fld - specifies locations of field from entry's input buffer
1032 * @seg: packet segment the field being set belongs to
1033 * @fld: field to be set
1034 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1035 *           entry's input buffer
1036 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1037 *            input buffer
1038 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1039 *            entry's input buffer
1040 * @range: indicate if field being matched is to be in a range
1041 *
1042 * This function specifies the locations, in the form of byte offsets from the
1043 * start of the input buffer for a flow entry, from where the value to match,
1044 * the mask value, and upper value can be extracted. These locations are then
1045 * stored in the flow profile. When adding a flow entry associated with the
1046 * flow profile, these locations will be used to quickly extract the values and
1047 * create the content of a match entry. This function should only be used for
1048 * fixed-size data structures.
1049 */
1050void
1051ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1052		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1053{
1054	enum ice_flow_fld_match_type t = range ?
1055		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1056
1057	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1058}
1059
1060/**
1061 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1062 * @seg: packet segment the field being set belongs to
1063 * @off: offset of the raw field from the beginning of the segment in bytes
1064 * @len: length of the raw pattern to be matched
1065 * @val_loc: location of the value to match from entry's input buffer
1066 * @mask_loc: location of mask value from entry's input buffer
1067 *
1068 * This function specifies the offset of the raw field to be match from the
1069 * beginning of the specified packet segment, and the locations, in the form of
1070 * byte offsets from the start of the input buffer for a flow entry, from where
1071 * the value to match and the mask value to be extracted. These locations are
1072 * then stored in the flow profile. When adding flow entries to the associated
1073 * flow profile, these locations can be used to quickly extract the values to
1074 * create the content of a match entry. This function should only be used for
1075 * fixed-size data structures.
1076 */
1077void
1078ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1079		     u16 val_loc, u16 mask_loc)
1080{
1081	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1082		seg->raws[seg->raws_cnt].off = off;
1083		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1084		seg->raws[seg->raws_cnt].info.src.val = val_loc;
1085		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1086		/* The "last" field is used to store the length of the field */
1087		seg->raws[seg->raws_cnt].info.src.last = len;
1088	}
1089
1090	/* Overflows of "raws" will be handled as an error condition later in
1091	 * the flow when this information is processed.
1092	 */
1093	seg->raws_cnt++;
1094}
1095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1097	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1098
1099#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1100	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1101
1102#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1103	(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
 
1104	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1105
1106/**
1107 * ice_flow_set_rss_seg_info - setup packet segments for RSS
1108 * @segs: pointer to the flow field segment(s)
1109 * @hash_fields: fields to be hashed on for the segment(s)
1110 * @flow_hdr: protocol header fields within a packet segment
1111 *
1112 * Helper function to extract fields from hash bitmap and use flow
1113 * header value to set flow field segment for further use in flow
1114 * profile entry or removal.
1115 */
1116static enum ice_status
1117ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1118			  u32 flow_hdr)
1119{
 
1120	u64 val;
1121	u8 i;
 
 
 
1122
1123	for_each_set_bit(i, (unsigned long *)&hash_fields,
1124			 ICE_FLOW_FIELD_IDX_MAX)
1125		ice_flow_set_fld(segs, (enum ice_flow_field)i,
1126				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1127				 ICE_FLOW_FLD_OFF_INVAL, false);
1128
1129	ICE_FLOW_SET_HDRS(segs, flow_hdr);
1130
1131	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1132		return ICE_ERR_PARAM;
 
 
 
 
 
 
 
 
 
1133
1134	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1135	if (val && !is_power_of_2(val))
1136		return ICE_ERR_CFG;
1137
1138	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1139	if (val && !is_power_of_2(val))
1140		return ICE_ERR_CFG;
1141
1142	return 0;
1143}
1144
1145/**
1146 * ice_rem_vsi_rss_list - remove VSI from RSS list
1147 * @hw: pointer to the hardware structure
1148 * @vsi_handle: software VSI handle
1149 *
1150 * Remove the VSI from all RSS configurations in the list.
1151 */
1152void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1153{
1154	struct ice_rss_cfg *r, *tmp;
1155
1156	if (list_empty(&hw->rss_list_head))
1157		return;
1158
1159	mutex_lock(&hw->rss_locks);
1160	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1161		if (test_and_clear_bit(vsi_handle, r->vsis))
1162			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1163				list_del(&r->l_entry);
1164				devm_kfree(ice_hw_to_dev(hw), r);
1165			}
1166	mutex_unlock(&hw->rss_locks);
1167}
1168
1169/**
1170 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1171 * @hw: pointer to the hardware structure
1172 * @vsi_handle: software VSI handle
1173 *
1174 * This function will iterate through all flow profiles and disassociate
1175 * the VSI from that profile. If the flow profile has no VSIs it will
1176 * be removed.
1177 */
1178enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1179{
1180	const enum ice_block blk = ICE_BLK_RSS;
1181	struct ice_flow_prof *p, *t;
1182	enum ice_status status = 0;
1183
1184	if (!ice_is_vsi_valid(hw, vsi_handle))
1185		return ICE_ERR_PARAM;
1186
1187	if (list_empty(&hw->fl_profs[blk]))
1188		return 0;
1189
1190	mutex_lock(&hw->rss_locks);
1191	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1192		if (test_bit(vsi_handle, p->vsis)) {
1193			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1194			if (status)
1195				break;
1196
1197			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1198				status = ice_flow_rem_prof(hw, blk, p->id);
1199				if (status)
1200					break;
1201			}
1202		}
1203	mutex_unlock(&hw->rss_locks);
1204
1205	return status;
1206}
1207
1208/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209 * ice_rem_rss_list - remove RSS configuration from list
1210 * @hw: pointer to the hardware structure
1211 * @vsi_handle: software VSI handle
1212 * @prof: pointer to flow profile
1213 *
1214 * Assumption: lock has already been acquired for RSS list
1215 */
1216static void
1217ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1218{
 
1219	struct ice_rss_cfg *r, *tmp;
1220
1221	/* Search for RSS hash fields associated to the VSI that match the
1222	 * hash configurations associated to the flow profile. If found
1223	 * remove from the RSS entry list of the VSI context and delete entry.
1224	 */
 
1225	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1226		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1227		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1228			clear_bit(vsi_handle, r->vsis);
1229			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1230				list_del(&r->l_entry);
1231				devm_kfree(ice_hw_to_dev(hw), r);
1232			}
1233			return;
1234		}
1235}
1236
1237/**
1238 * ice_add_rss_list - add RSS configuration to list
1239 * @hw: pointer to the hardware structure
1240 * @vsi_handle: software VSI handle
1241 * @prof: pointer to flow profile
1242 *
1243 * Assumption: lock has already been acquired for RSS list
1244 */
1245static enum ice_status
1246ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1247{
 
1248	struct ice_rss_cfg *r, *rss_cfg;
1249
 
1250	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1251		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1252		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1253			set_bit(vsi_handle, r->vsis);
1254			return 0;
1255		}
1256
1257	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1258			       GFP_KERNEL);
1259	if (!rss_cfg)
1260		return ICE_ERR_NO_MEMORY;
1261
1262	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1263	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
 
 
1264	set_bit(vsi_handle, rss_cfg->vsis);
1265
1266	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1267
1268	return 0;
1269}
1270
1271#define ICE_FLOW_PROF_HASH_S	0
1272#define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1273#define ICE_FLOW_PROF_HDR_S	32
1274#define ICE_FLOW_PROF_HDR_M	(0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1275#define ICE_FLOW_PROF_ENCAP_S	63
1276#define ICE_FLOW_PROF_ENCAP_M	(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1277
1278#define ICE_RSS_OUTER_HEADERS	1
1279#define ICE_RSS_INNER_HEADERS	2
1280
1281/* Flow profile ID format:
1282 * [0:31] - Packet match fields
1283 * [32:62] - Protocol header
1284 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1285 */
1286#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1287	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1288	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1289	      ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290
1291/**
1292 * ice_add_rss_cfg_sync - add an RSS configuration
1293 * @hw: pointer to the hardware structure
1294 * @vsi_handle: software VSI handle
1295 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1296 * @addl_hdrs: protocol header fields
1297 * @segs_cnt: packet segment count
1298 *
1299 * Assumption: lock has already been acquired for RSS list
1300 */
1301static enum ice_status
1302ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1303		     u32 addl_hdrs, u8 segs_cnt)
1304{
1305	const enum ice_block blk = ICE_BLK_RSS;
1306	struct ice_flow_prof *prof = NULL;
1307	struct ice_flow_seg_info *segs;
1308	enum ice_status status;
 
1309
1310	if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1311		return ICE_ERR_PARAM;
1312
1313	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1314	if (!segs)
1315		return ICE_ERR_NO_MEMORY;
1316
1317	/* Construct the packet segment info from the hashed fields */
1318	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1319					   addl_hdrs);
1320	if (status)
1321		goto exit;
1322
1323	/* Search for a flow profile that has matching headers, hash fields
1324	 * and has the input VSI associated to it. If found, no further
1325	 * operations required and exit.
1326	 */
1327	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1328					vsi_handle,
1329					ICE_FLOW_FIND_PROF_CHK_FLDS |
 
1330					ICE_FLOW_FIND_PROF_CHK_VSI);
1331	if (prof)
1332		goto exit;
1333
1334	/* Check if a flow profile exists with the same protocol headers and
1335	 * associated with the input VSI. If so disassociate the VSI from
1336	 * this profile. The VSI will be added to a new profile created with
1337	 * the protocol header and new hash field configuration.
1338	 */
1339	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1340					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
 
1341	if (prof) {
1342		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1343		if (!status)
1344			ice_rem_rss_list(hw, vsi_handle, prof);
1345		else
1346			goto exit;
1347
1348		/* Remove profile if it has no VSIs associated */
1349		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1350			status = ice_flow_rem_prof(hw, blk, prof->id);
1351			if (status)
1352				goto exit;
1353		}
1354	}
1355
1356	/* Search for a profile that has same match fields only. If this
1357	 * exists then associate the VSI to this profile.
1358	 */
1359	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1360					vsi_handle,
 
1361					ICE_FLOW_FIND_PROF_CHK_FLDS);
1362	if (prof) {
1363		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1364		if (!status)
1365			status = ice_add_rss_list(hw, vsi_handle, prof);
1366		goto exit;
1367	}
1368
1369	/* Create a new flow profile with generated profile and packet
1370	 * segment information.
1371	 */
1372	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1373				   ICE_FLOW_GEN_PROFID(hashed_flds,
1374						       segs[segs_cnt - 1].hdrs,
1375						       segs_cnt),
1376				   segs, segs_cnt, &prof);
1377	if (status)
1378		goto exit;
1379
 
 
1380	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1381	/* If association to a new flow profile failed then this profile can
1382	 * be removed.
1383	 */
1384	if (status) {
1385		ice_flow_rem_prof(hw, blk, prof->id);
1386		goto exit;
1387	}
1388
1389	status = ice_add_rss_list(hw, vsi_handle, prof);
1390
1391exit:
1392	kfree(segs);
1393	return status;
1394}
1395
1396/**
1397 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1398 * @hw: pointer to the hardware structure
1399 * @vsi_handle: software VSI handle
1400 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1401 * @addl_hdrs: protocol header fields
1402 *
1403 * This function will generate a flow profile based on fields associated with
1404 * the input fields to hash on, the flow type and use the VSI number to add
1405 * a flow entry to the profile.
1406 */
1407enum ice_status
1408ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1409		u32 addl_hdrs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410{
1411	enum ice_status status;
 
 
 
 
1412
1413	if (hashed_flds == ICE_HASH_INVALID ||
1414	    !ice_is_vsi_valid(hw, vsi_handle))
1415		return ICE_ERR_PARAM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416
1417	mutex_lock(&hw->rss_locks);
1418	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1419				      ICE_RSS_OUTER_HEADERS);
1420	if (!status)
1421		status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1422					      addl_hdrs, ICE_RSS_INNER_HEADERS);
 
 
 
 
 
 
 
1423	mutex_unlock(&hw->rss_locks);
1424
1425	return status;
1426}
1427
1428/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1429 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1430 * convert its values to their appropriate flow L3, L4 values.
1431 */
1432#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1433	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1434	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1435#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1436	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1437	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1438#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1439	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1440	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1441	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1442#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1443	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1444	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1445
1446#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1447	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1448	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1449#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1450	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1451	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1452	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1453#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1454	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1455	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1456#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1457	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1458	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1459
1460/**
1461 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1462 * @hw: pointer to the hardware structure
1463 * @vsi_handle: software VSI handle
1464 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1465 *
1466 * This function will take the hash bitmap provided by the AVF driver via a
1467 * message, convert it to ICE-compatible values, and configure RSS flow
1468 * profiles.
1469 */
1470enum ice_status
1471ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1472{
1473	enum ice_status status = 0;
 
 
1474	u64 hash_flds;
1475
 
 
 
 
1476	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1477	    !ice_is_vsi_valid(hw, vsi_handle))
1478		return ICE_ERR_PARAM;
1479
1480	/* Make sure no unsupported bits are specified */
1481	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1482			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1483		return ICE_ERR_CFG;
1484
1485	hash_flds = avf_hash;
1486
1487	/* Always create an L3 RSS configuration for any L4 RSS configuration */
1488	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1489		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1490
1491	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1492		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1493
1494	/* Create the corresponding RSS configuration for each valid hash bit */
1495	while (hash_flds) {
1496		u64 rss_hash = ICE_HASH_INVALID;
1497
1498		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1499			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1500				rss_hash = ICE_FLOW_HASH_IPV4;
1501				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1502			} else if (hash_flds &
1503				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1504				rss_hash = ICE_FLOW_HASH_IPV4 |
1505					ICE_FLOW_HASH_TCP_PORT;
1506				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1507			} else if (hash_flds &
1508				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1509				rss_hash = ICE_FLOW_HASH_IPV4 |
1510					ICE_FLOW_HASH_UDP_PORT;
1511				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1512			} else if (hash_flds &
1513				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1514				rss_hash = ICE_FLOW_HASH_IPV4 |
1515					ICE_FLOW_HASH_SCTP_PORT;
1516				hash_flds &=
1517					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1518			}
1519		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1520			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1521				rss_hash = ICE_FLOW_HASH_IPV6;
1522				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1523			} else if (hash_flds &
1524				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1525				rss_hash = ICE_FLOW_HASH_IPV6 |
1526					ICE_FLOW_HASH_TCP_PORT;
1527				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1528			} else if (hash_flds &
1529				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1530				rss_hash = ICE_FLOW_HASH_IPV6 |
1531					ICE_FLOW_HASH_UDP_PORT;
1532				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1533			} else if (hash_flds &
1534				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1535				rss_hash = ICE_FLOW_HASH_IPV6 |
1536					ICE_FLOW_HASH_SCTP_PORT;
1537				hash_flds &=
1538					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1539			}
1540		}
1541
1542		if (rss_hash == ICE_HASH_INVALID)
1543			return ICE_ERR_OUT_OF_RANGE;
1544
1545		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1546					 ICE_FLOW_SEG_HDR_NONE);
 
 
 
1547		if (status)
1548			break;
1549	}
1550
1551	return status;
1552}
1553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1554/**
1555 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1556 * @hw: pointer to the hardware structure
1557 * @vsi_handle: software VSI handle
1558 */
1559enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1560{
1561	enum ice_status status = 0;
1562	struct ice_rss_cfg *r;
 
1563
1564	if (!ice_is_vsi_valid(hw, vsi_handle))
1565		return ICE_ERR_PARAM;
1566
1567	mutex_lock(&hw->rss_locks);
1568	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1569		if (test_bit(vsi_handle, r->vsis)) {
1570			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1571						      r->hashed_flds,
1572						      r->packet_hdr,
1573						      ICE_RSS_OUTER_HEADERS);
1574			if (status)
1575				break;
1576			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1577						      r->hashed_flds,
1578						      r->packet_hdr,
1579						      ICE_RSS_INNER_HEADERS);
1580			if (status)
1581				break;
1582		}
1583	}
1584	mutex_unlock(&hw->rss_locks);
1585
1586	return status;
1587}
1588
1589/**
1590 * ice_get_rss_cfg - returns hashed fields for the given header types
1591 * @hw: pointer to the hardware structure
1592 * @vsi_handle: software VSI handle
1593 * @hdrs: protocol header type
 
1594 *
1595 * This function will return the match fields of the first instance of flow
1596 * profile having the given header types and containing input VSI
1597 */
1598u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1599{
1600	u64 rss_hash = ICE_HASH_INVALID;
1601	struct ice_rss_cfg *r;
1602
1603	/* verify if the protocol header is non zero and VSI is valid */
1604	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1605		return ICE_HASH_INVALID;
1606
1607	mutex_lock(&hw->rss_locks);
1608	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1609		if (test_bit(vsi_handle, r->vsis) &&
1610		    r->packet_hdr == hdrs) {
1611			rss_hash = r->hashed_flds;
 
1612			break;
1613		}
1614	mutex_unlock(&hw->rss_locks);
1615
1616	return rss_hash;
1617}