1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_flow.h" 6 7 /* Describe properties of a protocol header field */ 8 struct ice_flow_field_info { 9 enum ice_flow_seg_hdr hdr; 10 s16 off; /* Offset from start of a protocol header, in bits */ 11 u16 size; /* Size of fields in bits */ 12 u16 mask; /* 16-bit mask for field */ 13 }; 14 15 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ 16 .hdr = _hdr, \ 17 .off = (_offset_bytes) * BITS_PER_BYTE, \ 18 .size = (_size_bytes) * BITS_PER_BYTE, \ 19 .mask = 0, \ 20 } 21 22 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ 23 .hdr = _hdr, \ 24 .off = (_offset_bytes) * BITS_PER_BYTE, \ 25 .size = (_size_bytes) * BITS_PER_BYTE, \ 26 .mask = _mask, \ 27 } 28 29 /* Table containing properties of supported protocol header fields */ 30 static const 31 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { 32 /* Ether */ 33 /* ICE_FLOW_FIELD_IDX_ETH_DA */ 34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), 35 /* ICE_FLOW_FIELD_IDX_ETH_SA */ 36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), 37 /* ICE_FLOW_FIELD_IDX_S_VLAN */ 38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)), 39 /* ICE_FLOW_FIELD_IDX_C_VLAN */ 40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)), 41 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ 42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)), 43 /* IPv4 / IPv6 */ 44 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ 45 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc), 46 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ 47 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0), 48 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ 49 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00), 50 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ 51 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff), 52 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ 53 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff), 54 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */ 55 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00), 56 /* ICE_FLOW_FIELD_IDX_IPV4_SA */ 57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)), 58 /* ICE_FLOW_FIELD_IDX_IPV4_DA */ 59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)), 60 /* ICE_FLOW_FIELD_IDX_IPV6_SA */ 61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), 62 /* ICE_FLOW_FIELD_IDX_IPV6_DA */ 63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), 64 /* Transport */ 65 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ 66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), 67 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ 68 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)), 69 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ 70 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)), 71 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ 72 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)), 73 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ 74 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), 75 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ 76 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), 77 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ 78 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), 79 /* ARP */ 80 /* ICE_FLOW_FIELD_IDX_ARP_SIP */ 81 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), 82 /* ICE_FLOW_FIELD_IDX_ARP_DIP */ 83 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)), 84 /* ICE_FLOW_FIELD_IDX_ARP_SHA */ 85 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), 86 /* ICE_FLOW_FIELD_IDX_ARP_DHA */ 87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), 88 /* ICE_FLOW_FIELD_IDX_ARP_OP */ 89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)), 90 /* ICMP */ 91 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ 92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1), 93 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ 94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1), 95 /* GRE */ 96 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ 97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, 98 sizeof_field(struct gre_full_hdr, key)), 99 /* GTP */ 100 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */ 101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)), 102 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */ 103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)), 104 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */ 105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)), 106 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */ 107 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), 108 0x3f00), 109 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ 110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), 111 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ 112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), 113 /* PPPoE */ 114 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ 115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), 116 /* PFCP */ 117 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */ 118 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)), 119 /* L2TPv3 */ 120 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */ 121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)), 122 /* ESP */ 123 /* ICE_FLOW_FIELD_IDX_ESP_SPI */ 124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)), 125 /* AH */ 126 /* ICE_FLOW_FIELD_IDX_AH_SPI */ 127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), 128 /* NAT_T_ESP */ 129 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ 130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), 131 }; 132 133 /* Bitmaps indicating relevant packet types for a particular protocol header 134 * 135 * Packet types for packets with an Outer/First/Single MAC header 136 */ 137 static const u32 ice_ptypes_mac_ofos[] = { 138 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 139 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 140 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, 141 0x00000000, 0x00000000, 0x00000000, 0x00000000, 142 0x00000000, 0x00000000, 0x00000000, 0x00000000, 143 0x00000000, 0x00000000, 0x00000000, 0x00000000, 144 0x00000000, 0x00000000, 0x00000000, 0x00000000, 145 0x00000000, 0x00000000, 0x00000000, 0x00000000, 146 }; 147 148 /* Packet types for packets with an Innermost/Last MAC VLAN header */ 149 static const u32 ice_ptypes_macvlan_il[] = { 150 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, 151 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 152 0x00000000, 0x00000000, 0x00000000, 0x00000000, 153 0x00000000, 0x00000000, 0x00000000, 0x00000000, 154 0x00000000, 0x00000000, 0x00000000, 0x00000000, 155 0x00000000, 0x00000000, 0x00000000, 0x00000000, 156 0x00000000, 0x00000000, 0x00000000, 0x00000000, 157 0x00000000, 0x00000000, 0x00000000, 0x00000000, 158 }; 159 160 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT 161 * include IPv4 other PTYPEs 162 */ 163 static const u32 ice_ptypes_ipv4_ofos[] = { 164 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 165 0x00000000, 0x00000155, 0x00000000, 0x00000000, 166 0x00000000, 0x000FC000, 0x00000000, 0x00000000, 167 0x00000000, 0x00000000, 0x00000000, 0x00000000, 168 0x00000000, 0x00000000, 0x00000000, 0x00000000, 169 0x00000000, 0x00000000, 0x00000000, 0x00000000, 170 0x00000000, 0x00000000, 0x00000000, 0x00000000, 171 0x00000000, 0x00000000, 0x00000000, 0x00000000, 172 }; 173 174 /* Packet types for packets with an Outer/First/Single IPv4 header, includes 175 * IPv4 other PTYPEs 176 */ 177 static const u32 ice_ptypes_ipv4_ofos_all[] = { 178 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 179 0x00000000, 0x00000155, 0x00000000, 0x00000000, 180 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101, 181 0x00000000, 0x00000000, 0x00000000, 0x00000000, 182 0x00000000, 0x00000000, 0x00000000, 0x00000000, 183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 184 0x00000000, 0x00000000, 0x00000000, 0x00000000, 185 0x00000000, 0x00000000, 0x00000000, 0x00000000, 186 }; 187 188 /* Packet types for packets with an Innermost/Last IPv4 header */ 189 static const u32 ice_ptypes_ipv4_il[] = { 190 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 191 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 192 0x00000000, 0x00000000, 0x001FF800, 0x00000000, 193 0x00000000, 0x00000000, 0x00000000, 0x00000000, 194 0x00000000, 0x00000000, 0x00000000, 0x00000000, 195 0x00000000, 0x00000000, 0x00000000, 0x00000000, 196 0x00000000, 0x00000000, 0x00000000, 0x00000000, 197 0x00000000, 0x00000000, 0x00000000, 0x00000000, 198 }; 199 200 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT 201 * include IPv6 other PTYPEs 202 */ 203 static const u32 ice_ptypes_ipv6_ofos[] = { 204 0x00000000, 0x00000000, 0x77000000, 0x10002000, 205 0x00000000, 0x000002AA, 0x00000000, 0x00000000, 206 0x00000000, 0x03F00000, 0x00000000, 0x00000000, 207 0x00000000, 0x00000000, 0x00000000, 0x00000000, 208 0x00000000, 0x00000000, 0x00000000, 0x00000000, 209 0x00000000, 0x00000000, 0x00000000, 0x00000000, 210 0x00000000, 0x00000000, 0x00000000, 0x00000000, 211 0x00000000, 0x00000000, 0x00000000, 0x00000000, 212 }; 213 214 /* Packet types for packets with an Outer/First/Single IPv6 header, includes 215 * IPv6 other PTYPEs 216 */ 217 static const u32 ice_ptypes_ipv6_ofos_all[] = { 218 0x00000000, 0x00000000, 0x77000000, 0x10002000, 219 0x00000000, 0x000002AA, 0x00000000, 0x00000000, 220 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, 221 0x00000000, 0x00000000, 0x00000000, 0x00000000, 222 0x00000000, 0x00000000, 0x00000000, 0x00000000, 223 0x00000000, 0x00000000, 0x00000000, 0x00000000, 224 0x00000000, 0x00000000, 0x00000000, 0x00000000, 225 0x00000000, 0x00000000, 0x00000000, 0x00000000, 226 }; 227 228 /* Packet types for packets with an Innermost/Last IPv6 header */ 229 static const u32 ice_ptypes_ipv6_il[] = { 230 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 231 0x00000770, 0x00000000, 0x00000000, 0x00000000, 232 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, 233 0x00000000, 0x00000000, 0x00000000, 0x00000000, 234 0x00000000, 0x00000000, 0x00000000, 0x00000000, 235 0x00000000, 0x00000000, 0x00000000, 0x00000000, 236 0x00000000, 0x00000000, 0x00000000, 0x00000000, 237 0x00000000, 0x00000000, 0x00000000, 0x00000000, 238 }; 239 240 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ 241 static const u32 ice_ipv4_ofos_no_l4[] = { 242 0x10C00000, 0x04000800, 0x00000000, 0x00000000, 243 0x00000000, 0x00000000, 0x00000000, 0x00000000, 244 0x00000000, 0x00000000, 0x00000000, 0x00000000, 245 0x00000000, 0x00000000, 0x00000000, 0x00000000, 246 0x00000000, 0x00000000, 0x00000000, 0x00000000, 247 0x00000000, 0x00000000, 0x00000000, 0x00000000, 248 0x00000000, 0x00000000, 0x00000000, 0x00000000, 249 0x00000000, 0x00000000, 0x00000000, 0x00000000, 250 }; 251 252 /* Packet types for packets with an Outermost/First ARP header */ 253 static const u32 ice_ptypes_arp_of[] = { 254 0x00000800, 0x00000000, 0x00000000, 0x00000000, 255 0x00000000, 0x00000000, 0x00000000, 0x00000000, 256 0x00000000, 0x00000000, 0x00000000, 0x00000000, 257 0x00000000, 0x00000000, 0x00000000, 0x00000000, 258 0x00000000, 0x00000000, 0x00000000, 0x00000000, 259 0x00000000, 0x00000000, 0x00000000, 0x00000000, 260 0x00000000, 0x00000000, 0x00000000, 0x00000000, 261 0x00000000, 0x00000000, 0x00000000, 0x00000000, 262 }; 263 264 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ 265 static const u32 ice_ipv4_il_no_l4[] = { 266 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 267 0x00000008, 0x00000000, 0x00000000, 0x00000000, 268 0x00000000, 0x00000000, 0x00000000, 0x00000000, 269 0x00000000, 0x00000000, 0x00000000, 0x00000000, 270 0x00000000, 0x00000000, 0x00000000, 0x00000000, 271 0x00000000, 0x00000000, 0x00000000, 0x00000000, 272 0x00000000, 0x00000000, 0x00000000, 0x00000000, 273 0x00000000, 0x00000000, 0x00000000, 0x00000000, 274 }; 275 276 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ 277 static const u32 ice_ipv6_ofos_no_l4[] = { 278 0x00000000, 0x00000000, 0x43000000, 0x10002000, 279 0x00000000, 0x00000000, 0x00000000, 0x00000000, 280 0x00000000, 0x00000000, 0x00000000, 0x00000000, 281 0x00000000, 0x00000000, 0x00000000, 0x00000000, 282 0x00000000, 0x00000000, 0x00000000, 0x00000000, 283 0x00000000, 0x00000000, 0x00000000, 0x00000000, 284 0x00000000, 0x00000000, 0x00000000, 0x00000000, 285 0x00000000, 0x00000000, 0x00000000, 0x00000000, 286 }; 287 288 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ 289 static const u32 ice_ipv6_il_no_l4[] = { 290 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 291 0x00000430, 0x00000000, 0x00000000, 0x00000000, 292 0x00000000, 0x00000000, 0x00000000, 0x00000000, 293 0x00000000, 0x00000000, 0x00000000, 0x00000000, 294 0x00000000, 0x00000000, 0x00000000, 0x00000000, 295 0x00000000, 0x00000000, 0x00000000, 0x00000000, 296 0x00000000, 0x00000000, 0x00000000, 0x00000000, 297 0x00000000, 0x00000000, 0x00000000, 0x00000000, 298 }; 299 300 /* UDP Packet types for non-tunneled packets or tunneled 301 * packets with inner UDP. 302 */ 303 static const u32 ice_ptypes_udp_il[] = { 304 0x81000000, 0x20204040, 0x04000010, 0x80810102, 305 0x00000040, 0x00000000, 0x00000000, 0x00000000, 306 0x00000000, 0x00410000, 0x90842000, 0x00000007, 307 0x00000000, 0x00000000, 0x00000000, 0x00000000, 308 0x00000000, 0x00000000, 0x00000000, 0x00000000, 309 0x00000000, 0x00000000, 0x00000000, 0x00000000, 310 0x00000000, 0x00000000, 0x00000000, 0x00000000, 311 0x00000000, 0x00000000, 0x00000000, 0x00000000, 312 }; 313 314 /* Packet types for packets with an Innermost/Last TCP header */ 315 static const u32 ice_ptypes_tcp_il[] = { 316 0x04000000, 0x80810102, 0x10000040, 0x02040408, 317 0x00000102, 0x00000000, 0x00000000, 0x00000000, 318 0x00000000, 0x00820000, 0x21084000, 0x00000000, 319 0x00000000, 0x00000000, 0x00000000, 0x00000000, 320 0x00000000, 0x00000000, 0x00000000, 0x00000000, 321 0x00000000, 0x00000000, 0x00000000, 0x00000000, 322 0x00000000, 0x00000000, 0x00000000, 0x00000000, 323 0x00000000, 0x00000000, 0x00000000, 0x00000000, 324 }; 325 326 /* Packet types for packets with an Innermost/Last SCTP header */ 327 static const u32 ice_ptypes_sctp_il[] = { 328 0x08000000, 0x01020204, 0x20000081, 0x04080810, 329 0x00000204, 0x00000000, 0x00000000, 0x00000000, 330 0x00000000, 0x01040000, 0x00000000, 0x00000000, 331 0x00000000, 0x00000000, 0x00000000, 0x00000000, 332 0x00000000, 0x00000000, 0x00000000, 0x00000000, 333 0x00000000, 0x00000000, 0x00000000, 0x00000000, 334 0x00000000, 0x00000000, 0x00000000, 0x00000000, 335 0x00000000, 0x00000000, 0x00000000, 0x00000000, 336 }; 337 338 /* Packet types for packets with an Outermost/First ICMP header */ 339 static const u32 ice_ptypes_icmp_of[] = { 340 0x10000000, 0x00000000, 0x00000000, 0x00000000, 341 0x00000000, 0x00000000, 0x00000000, 0x00000000, 342 0x00000000, 0x00000000, 0x00000000, 0x00000000, 343 0x00000000, 0x00000000, 0x00000000, 0x00000000, 344 0x00000000, 0x00000000, 0x00000000, 0x00000000, 345 0x00000000, 0x00000000, 0x00000000, 0x00000000, 346 0x00000000, 0x00000000, 0x00000000, 0x00000000, 347 0x00000000, 0x00000000, 0x00000000, 0x00000000, 348 }; 349 350 /* Packet types for packets with an Innermost/Last ICMP header */ 351 static const u32 ice_ptypes_icmp_il[] = { 352 0x00000000, 0x02040408, 0x40000102, 0x08101020, 353 0x00000408, 0x00000000, 0x00000000, 0x00000000, 354 0x00000000, 0x00000000, 0x42108000, 0x00000000, 355 0x00000000, 0x00000000, 0x00000000, 0x00000000, 356 0x00000000, 0x00000000, 0x00000000, 0x00000000, 357 0x00000000, 0x00000000, 0x00000000, 0x00000000, 358 0x00000000, 0x00000000, 0x00000000, 0x00000000, 359 0x00000000, 0x00000000, 0x00000000, 0x00000000, 360 }; 361 362 /* Packet types for packets with an Outermost/First GRE header */ 363 static const u32 ice_ptypes_gre_of[] = { 364 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 365 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 366 0x00000000, 0x00000000, 0x00000000, 0x00000000, 367 0x00000000, 0x00000000, 0x00000000, 0x00000000, 368 0x00000000, 0x00000000, 0x00000000, 0x00000000, 369 0x00000000, 0x00000000, 0x00000000, 0x00000000, 370 0x00000000, 0x00000000, 0x00000000, 0x00000000, 371 0x00000000, 0x00000000, 0x00000000, 0x00000000, 372 }; 373 374 /* Packet types for packets with an Innermost/Last MAC header */ 375 static const u32 ice_ptypes_mac_il[] = { 376 0x00000000, 0x00000000, 0x00000000, 0x00000000, 377 0x00000000, 0x00000000, 0x00000000, 0x00000000, 378 0x00000000, 0x00000000, 0x00000000, 0x00000000, 379 0x00000000, 0x00000000, 0x00000000, 0x00000000, 380 0x00000000, 0x00000000, 0x00000000, 0x00000000, 381 0x00000000, 0x00000000, 0x00000000, 0x00000000, 382 0x00000000, 0x00000000, 0x00000000, 0x00000000, 383 0x00000000, 0x00000000, 0x00000000, 0x00000000, 384 }; 385 386 /* Packet types for GTPC */ 387 static const u32 ice_ptypes_gtpc[] = { 388 0x00000000, 0x00000000, 0x00000000, 0x00000000, 389 0x00000000, 0x00000000, 0x00000000, 0x00000000, 390 0x00000000, 0x00000000, 0x00000180, 0x00000000, 391 0x00000000, 0x00000000, 0x00000000, 0x00000000, 392 0x00000000, 0x00000000, 0x00000000, 0x00000000, 393 0x00000000, 0x00000000, 0x00000000, 0x00000000, 394 0x00000000, 0x00000000, 0x00000000, 0x00000000, 395 0x00000000, 0x00000000, 0x00000000, 0x00000000, 396 }; 397 398 /* Packet types for GTPC with TEID */ 399 static const u32 ice_ptypes_gtpc_tid[] = { 400 0x00000000, 0x00000000, 0x00000000, 0x00000000, 401 0x00000000, 0x00000000, 0x00000000, 0x00000000, 402 0x00000000, 0x00000000, 0x00000060, 0x00000000, 403 0x00000000, 0x00000000, 0x00000000, 0x00000000, 404 0x00000000, 0x00000000, 0x00000000, 0x00000000, 405 0x00000000, 0x00000000, 0x00000000, 0x00000000, 406 0x00000000, 0x00000000, 0x00000000, 0x00000000, 407 0x00000000, 0x00000000, 0x00000000, 0x00000000, 408 }; 409 410 /* Packet types for GTPU */ 411 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { 412 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 413 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 414 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 415 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 416 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 417 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 418 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 419 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 420 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 421 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 422 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 423 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 424 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 425 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 426 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, 427 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 428 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 429 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 430 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 431 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, 432 }; 433 434 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { 435 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 436 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 437 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 438 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 439 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 440 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 441 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 442 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 443 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 444 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 445 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 446 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 447 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 448 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 449 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 450 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 451 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 452 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 453 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 454 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 455 }; 456 457 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { 458 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 459 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 460 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 461 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 462 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, 463 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 464 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 465 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 466 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 467 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, 468 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 469 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 470 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 471 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 472 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, 473 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 474 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 475 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 476 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 477 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, 478 }; 479 480 static const u32 ice_ptypes_gtpu[] = { 481 0x00000000, 0x00000000, 0x00000000, 0x00000000, 482 0x00000000, 0x00000000, 0x00000000, 0x00000000, 483 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000, 484 0x00000000, 0x00000000, 0x00000000, 0x00000000, 485 0x00000000, 0x00000000, 0x00000000, 0x00000000, 486 0x00000000, 0x00000000, 0x00000000, 0x00000000, 487 0x00000000, 0x00000000, 0x00000000, 0x00000000, 488 0x00000000, 0x00000000, 0x00000000, 0x00000000, 489 }; 490 491 /* Packet types for PPPoE */ 492 static const u32 ice_ptypes_pppoe[] = { 493 0x00000000, 0x00000000, 0x00000000, 0x00000000, 494 0x00000000, 0x00000000, 0x00000000, 0x00000000, 495 0x00000000, 0x03ffe000, 0x00000000, 0x00000000, 496 0x00000000, 0x00000000, 0x00000000, 0x00000000, 497 0x00000000, 0x00000000, 0x00000000, 0x00000000, 498 0x00000000, 0x00000000, 0x00000000, 0x00000000, 499 0x00000000, 0x00000000, 0x00000000, 0x00000000, 500 0x00000000, 0x00000000, 0x00000000, 0x00000000, 501 }; 502 503 /* Packet types for packets with PFCP NODE header */ 504 static const u32 ice_ptypes_pfcp_node[] = { 505 0x00000000, 0x00000000, 0x00000000, 0x00000000, 506 0x00000000, 0x00000000, 0x00000000, 0x00000000, 507 0x00000000, 0x00000000, 0x80000000, 0x00000002, 508 0x00000000, 0x00000000, 0x00000000, 0x00000000, 509 0x00000000, 0x00000000, 0x00000000, 0x00000000, 510 0x00000000, 0x00000000, 0x00000000, 0x00000000, 511 0x00000000, 0x00000000, 0x00000000, 0x00000000, 512 0x00000000, 0x00000000, 0x00000000, 0x00000000, 513 }; 514 515 /* Packet types for packets with PFCP SESSION header */ 516 static const u32 ice_ptypes_pfcp_session[] = { 517 0x00000000, 0x00000000, 0x00000000, 0x00000000, 518 0x00000000, 0x00000000, 0x00000000, 0x00000000, 519 0x00000000, 0x00000000, 0x00000000, 0x00000005, 520 0x00000000, 0x00000000, 0x00000000, 0x00000000, 521 0x00000000, 0x00000000, 0x00000000, 0x00000000, 522 0x00000000, 0x00000000, 0x00000000, 0x00000000, 523 0x00000000, 0x00000000, 0x00000000, 0x00000000, 524 0x00000000, 0x00000000, 0x00000000, 0x00000000, 525 }; 526 527 /* Packet types for L2TPv3 */ 528 static const u32 ice_ptypes_l2tpv3[] = { 529 0x00000000, 0x00000000, 0x00000000, 0x00000000, 530 0x00000000, 0x00000000, 0x00000000, 0x00000000, 531 0x00000000, 0x00000000, 0x00000000, 0x00000300, 532 0x00000000, 0x00000000, 0x00000000, 0x00000000, 533 0x00000000, 0x00000000, 0x00000000, 0x00000000, 534 0x00000000, 0x00000000, 0x00000000, 0x00000000, 535 0x00000000, 0x00000000, 0x00000000, 0x00000000, 536 0x00000000, 0x00000000, 0x00000000, 0x00000000, 537 }; 538 539 /* Packet types for ESP */ 540 static const u32 ice_ptypes_esp[] = { 541 0x00000000, 0x00000000, 0x00000000, 0x00000000, 542 0x00000000, 0x00000003, 0x00000000, 0x00000000, 543 0x00000000, 0x00000000, 0x00000000, 0x00000000, 544 0x00000000, 0x00000000, 0x00000000, 0x00000000, 545 0x00000000, 0x00000000, 0x00000000, 0x00000000, 546 0x00000000, 0x00000000, 0x00000000, 0x00000000, 547 0x00000000, 0x00000000, 0x00000000, 0x00000000, 548 0x00000000, 0x00000000, 0x00000000, 0x00000000, 549 }; 550 551 /* Packet types for AH */ 552 static const u32 ice_ptypes_ah[] = { 553 0x00000000, 0x00000000, 0x00000000, 0x00000000, 554 0x00000000, 0x0000000C, 0x00000000, 0x00000000, 555 0x00000000, 0x00000000, 0x00000000, 0x00000000, 556 0x00000000, 0x00000000, 0x00000000, 0x00000000, 557 0x00000000, 0x00000000, 0x00000000, 0x00000000, 558 0x00000000, 0x00000000, 0x00000000, 0x00000000, 559 0x00000000, 0x00000000, 0x00000000, 0x00000000, 560 0x00000000, 0x00000000, 0x00000000, 0x00000000, 561 }; 562 563 /* Packet types for packets with NAT_T ESP header */ 564 static const u32 ice_ptypes_nat_t_esp[] = { 565 0x00000000, 0x00000000, 0x00000000, 0x00000000, 566 0x00000000, 0x00000030, 0x00000000, 0x00000000, 567 0x00000000, 0x00000000, 0x00000000, 0x00000000, 568 0x00000000, 0x00000000, 0x00000000, 0x00000000, 569 0x00000000, 0x00000000, 0x00000000, 0x00000000, 570 0x00000000, 0x00000000, 0x00000000, 0x00000000, 571 0x00000000, 0x00000000, 0x00000000, 0x00000000, 572 0x00000000, 0x00000000, 0x00000000, 0x00000000, 573 }; 574 575 /* Manage parameters and info. used during the creation of a flow profile */ 576 struct ice_flow_prof_params { 577 enum ice_block blk; 578 u16 entry_length; /* # of bytes formatted entry will require */ 579 u8 es_cnt; 580 struct ice_flow_prof *prof; 581 582 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 583 * This will give us the direction flags. 584 */ 585 struct ice_fv_word es[ICE_MAX_FV_WORDS]; 586 /* attributes can be used to add attributes to a particular PTYPE */ 587 const struct ice_ptype_attributes *attr; 588 u16 attr_cnt; 589 590 u16 mask[ICE_MAX_FV_WORDS]; 591 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); 592 }; 593 594 #define ICE_FLOW_RSS_HDRS_INNER_MASK \ 595 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \ 596 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \ 597 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \ 598 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ 599 ICE_FLOW_SEG_HDR_NAT_T_ESP) 600 601 #define ICE_FLOW_SEG_HDRS_L2_MASK \ 602 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) 603 #define ICE_FLOW_SEG_HDRS_L3_MASK \ 604 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) 605 #define ICE_FLOW_SEG_HDRS_L4_MASK \ 606 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ 607 ICE_FLOW_SEG_HDR_SCTP) 608 /* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */ 609 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \ 610 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) 611 612 /** 613 * ice_flow_val_hdrs - validates packet segments for valid protocol headers 614 * @segs: array of one or more packet segments that describe the flow 615 * @segs_cnt: number of packet segments provided 616 */ 617 static enum ice_status 618 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) 619 { 620 u8 i; 621 622 for (i = 0; i < segs_cnt; i++) { 623 /* Multiple L3 headers */ 624 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && 625 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) 626 return ICE_ERR_PARAM; 627 628 /* Multiple L4 headers */ 629 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && 630 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) 631 return ICE_ERR_PARAM; 632 } 633 634 return 0; 635 } 636 637 /* Sizes of fixed known protocol headers without header options */ 638 #define ICE_FLOW_PROT_HDR_SZ_MAC 14 639 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) 640 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 641 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 642 #define ICE_FLOW_PROT_HDR_SZ_ARP 28 643 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8 644 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 645 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 646 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 647 648 /** 649 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers 650 * @params: information about the flow to be processed 651 * @seg: index of packet segment whose header size is to be determined 652 */ 653 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) 654 { 655 u16 sz; 656 657 /* L2 headers */ 658 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? 659 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; 660 661 /* L3 headers */ 662 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) 663 sz += ICE_FLOW_PROT_HDR_SZ_IPV4; 664 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) 665 sz += ICE_FLOW_PROT_HDR_SZ_IPV6; 666 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) 667 sz += ICE_FLOW_PROT_HDR_SZ_ARP; 668 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) 669 /* An L3 header is required if L4 is specified */ 670 return 0; 671 672 /* L4 headers */ 673 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) 674 sz += ICE_FLOW_PROT_HDR_SZ_ICMP; 675 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) 676 sz += ICE_FLOW_PROT_HDR_SZ_TCP; 677 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) 678 sz += ICE_FLOW_PROT_HDR_SZ_UDP; 679 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) 680 sz += ICE_FLOW_PROT_HDR_SZ_SCTP; 681 682 return sz; 683 } 684 685 /** 686 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments 687 * @params: information about the flow to be processed 688 * 689 * This function identifies the packet types associated with the protocol 690 * headers being present in packet segments of the specified flow profile. 691 */ 692 static enum ice_status 693 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) 694 { 695 struct ice_flow_prof *prof; 696 u8 i; 697 698 memset(params->ptypes, 0xff, sizeof(params->ptypes)); 699 700 prof = params->prof; 701 702 for (i = 0; i < params->prof->segs_cnt; i++) { 703 const unsigned long *src; 704 u32 hdrs; 705 706 hdrs = prof->segs[i].hdrs; 707 708 if (hdrs & ICE_FLOW_SEG_HDR_ETH) { 709 src = !i ? (const unsigned long *)ice_ptypes_mac_ofos : 710 (const unsigned long *)ice_ptypes_mac_il; 711 bitmap_and(params->ptypes, params->ptypes, src, 712 ICE_FLOW_PTYPE_MAX); 713 } 714 715 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { 716 src = (const unsigned long *)ice_ptypes_macvlan_il; 717 bitmap_and(params->ptypes, params->ptypes, src, 718 ICE_FLOW_PTYPE_MAX); 719 } 720 721 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { 722 bitmap_and(params->ptypes, params->ptypes, 723 (const unsigned long *)ice_ptypes_arp_of, 724 ICE_FLOW_PTYPE_MAX); 725 } 726 727 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && 728 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { 729 src = i ? (const unsigned long *)ice_ptypes_ipv4_il : 730 (const unsigned long *)ice_ptypes_ipv4_ofos_all; 731 bitmap_and(params->ptypes, params->ptypes, src, 732 ICE_FLOW_PTYPE_MAX); 733 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && 734 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { 735 src = i ? (const unsigned long *)ice_ptypes_ipv6_il : 736 (const unsigned long *)ice_ptypes_ipv6_ofos_all; 737 bitmap_and(params->ptypes, params->ptypes, src, 738 ICE_FLOW_PTYPE_MAX); 739 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && 740 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { 741 src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 : 742 (const unsigned long *)ice_ipv4_il_no_l4; 743 bitmap_and(params->ptypes, params->ptypes, src, 744 ICE_FLOW_PTYPE_MAX); 745 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { 746 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos : 747 (const unsigned long *)ice_ptypes_ipv4_il; 748 bitmap_and(params->ptypes, params->ptypes, src, 749 ICE_FLOW_PTYPE_MAX); 750 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && 751 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { 752 src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 : 753 (const unsigned long *)ice_ipv6_il_no_l4; 754 bitmap_and(params->ptypes, params->ptypes, src, 755 ICE_FLOW_PTYPE_MAX); 756 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { 757 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos : 758 (const unsigned long *)ice_ptypes_ipv6_il; 759 bitmap_and(params->ptypes, params->ptypes, src, 760 ICE_FLOW_PTYPE_MAX); 761 } 762 763 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) { 764 src = (const unsigned long *)ice_ptypes_pppoe; 765 bitmap_and(params->ptypes, params->ptypes, src, 766 ICE_FLOW_PTYPE_MAX); 767 } else { 768 src = (const unsigned long *)ice_ptypes_pppoe; 769 bitmap_andnot(params->ptypes, params->ptypes, src, 770 ICE_FLOW_PTYPE_MAX); 771 } 772 773 if (hdrs & ICE_FLOW_SEG_HDR_UDP) { 774 src = (const unsigned long *)ice_ptypes_udp_il; 775 bitmap_and(params->ptypes, params->ptypes, src, 776 ICE_FLOW_PTYPE_MAX); 777 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { 778 bitmap_and(params->ptypes, params->ptypes, 779 (const unsigned long *)ice_ptypes_tcp_il, 780 ICE_FLOW_PTYPE_MAX); 781 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { 782 src = (const unsigned long *)ice_ptypes_sctp_il; 783 bitmap_and(params->ptypes, params->ptypes, src, 784 ICE_FLOW_PTYPE_MAX); 785 } 786 787 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { 788 src = !i ? (const unsigned long *)ice_ptypes_icmp_of : 789 (const unsigned long *)ice_ptypes_icmp_il; 790 bitmap_and(params->ptypes, params->ptypes, src, 791 ICE_FLOW_PTYPE_MAX); 792 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { 793 if (!i) { 794 src = (const unsigned long *)ice_ptypes_gre_of; 795 bitmap_and(params->ptypes, params->ptypes, 796 src, ICE_FLOW_PTYPE_MAX); 797 } 798 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) { 799 src = (const unsigned long *)ice_ptypes_gtpc; 800 bitmap_and(params->ptypes, params->ptypes, src, 801 ICE_FLOW_PTYPE_MAX); 802 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) { 803 src = (const unsigned long *)ice_ptypes_gtpc_tid; 804 bitmap_and(params->ptypes, params->ptypes, src, 805 ICE_FLOW_PTYPE_MAX); 806 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) { 807 src = (const unsigned long *)ice_ptypes_gtpu; 808 bitmap_and(params->ptypes, params->ptypes, src, 809 ICE_FLOW_PTYPE_MAX); 810 811 /* Attributes for GTP packet with downlink */ 812 params->attr = ice_attr_gtpu_down; 813 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); 814 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) { 815 src = (const unsigned long *)ice_ptypes_gtpu; 816 bitmap_and(params->ptypes, params->ptypes, src, 817 ICE_FLOW_PTYPE_MAX); 818 819 /* Attributes for GTP packet with uplink */ 820 params->attr = ice_attr_gtpu_up; 821 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); 822 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) { 823 src = (const unsigned long *)ice_ptypes_gtpu; 824 bitmap_and(params->ptypes, params->ptypes, src, 825 ICE_FLOW_PTYPE_MAX); 826 827 /* Attributes for GTP packet with Extension Header */ 828 params->attr = ice_attr_gtpu_eh; 829 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh); 830 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { 831 src = (const unsigned long *)ice_ptypes_gtpu; 832 bitmap_and(params->ptypes, params->ptypes, src, 833 ICE_FLOW_PTYPE_MAX); 834 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) { 835 src = (const unsigned long *)ice_ptypes_l2tpv3; 836 bitmap_and(params->ptypes, params->ptypes, src, 837 ICE_FLOW_PTYPE_MAX); 838 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) { 839 src = (const unsigned long *)ice_ptypes_esp; 840 bitmap_and(params->ptypes, params->ptypes, src, 841 ICE_FLOW_PTYPE_MAX); 842 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) { 843 src = (const unsigned long *)ice_ptypes_ah; 844 bitmap_and(params->ptypes, params->ptypes, src, 845 ICE_FLOW_PTYPE_MAX); 846 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) { 847 src = (const unsigned long *)ice_ptypes_nat_t_esp; 848 bitmap_and(params->ptypes, params->ptypes, src, 849 ICE_FLOW_PTYPE_MAX); 850 } 851 852 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) { 853 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE) 854 src = (const unsigned long *)ice_ptypes_pfcp_node; 855 else 856 src = (const unsigned long *)ice_ptypes_pfcp_session; 857 858 bitmap_and(params->ptypes, params->ptypes, src, 859 ICE_FLOW_PTYPE_MAX); 860 } else { 861 src = (const unsigned long *)ice_ptypes_pfcp_node; 862 bitmap_andnot(params->ptypes, params->ptypes, src, 863 ICE_FLOW_PTYPE_MAX); 864 865 src = (const unsigned long *)ice_ptypes_pfcp_session; 866 bitmap_andnot(params->ptypes, params->ptypes, src, 867 ICE_FLOW_PTYPE_MAX); 868 } 869 } 870 871 return 0; 872 } 873 874 /** 875 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field 876 * @hw: pointer to the HW struct 877 * @params: information about the flow to be processed 878 * @seg: packet segment index of the field to be extracted 879 * @fld: ID of field to be extracted 880 * @match: bit field of all fields 881 * 882 * This function determines the protocol ID, offset, and size of the given 883 * field. It then allocates one or more extraction sequence entries for the 884 * given field, and fill the entries with protocol ID and offset information. 885 */ 886 static enum ice_status 887 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, 888 u8 seg, enum ice_flow_field fld, u64 match) 889 { 890 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; 891 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; 892 u8 fv_words = hw->blk[params->blk].es.fvw; 893 struct ice_flow_fld_info *flds; 894 u16 cnt, ese_bits, i; 895 u16 sib_mask = 0; 896 u16 mask; 897 u16 off; 898 899 flds = params->prof->segs[seg].fields; 900 901 switch (fld) { 902 case ICE_FLOW_FIELD_IDX_ETH_DA: 903 case ICE_FLOW_FIELD_IDX_ETH_SA: 904 case ICE_FLOW_FIELD_IDX_S_VLAN: 905 case ICE_FLOW_FIELD_IDX_C_VLAN: 906 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; 907 break; 908 case ICE_FLOW_FIELD_IDX_ETH_TYPE: 909 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; 910 break; 911 case ICE_FLOW_FIELD_IDX_IPV4_DSCP: 912 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 913 break; 914 case ICE_FLOW_FIELD_IDX_IPV6_DSCP: 915 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 916 break; 917 case ICE_FLOW_FIELD_IDX_IPV4_TTL: 918 case ICE_FLOW_FIELD_IDX_IPV4_PROT: 919 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 920 921 /* TTL and PROT share the same extraction seq. entry. 922 * Each is considered a sibling to the other in terms of sharing 923 * the same extraction sequence entry. 924 */ 925 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) 926 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; 927 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) 928 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; 929 930 /* If the sibling field is also included, that field's 931 * mask needs to be included. 932 */ 933 if (match & BIT(sib)) 934 sib_mask = ice_flds_info[sib].mask; 935 break; 936 case ICE_FLOW_FIELD_IDX_IPV6_TTL: 937 case ICE_FLOW_FIELD_IDX_IPV6_PROT: 938 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 939 940 /* TTL and PROT share the same extraction seq. entry. 941 * Each is considered a sibling to the other in terms of sharing 942 * the same extraction sequence entry. 943 */ 944 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) 945 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; 946 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) 947 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; 948 949 /* If the sibling field is also included, that field's 950 * mask needs to be included. 951 */ 952 if (match & BIT(sib)) 953 sib_mask = ice_flds_info[sib].mask; 954 break; 955 case ICE_FLOW_FIELD_IDX_IPV4_SA: 956 case ICE_FLOW_FIELD_IDX_IPV4_DA: 957 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 958 break; 959 case ICE_FLOW_FIELD_IDX_IPV6_SA: 960 case ICE_FLOW_FIELD_IDX_IPV6_DA: 961 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 962 break; 963 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: 964 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: 965 case ICE_FLOW_FIELD_IDX_TCP_FLAGS: 966 prot_id = ICE_PROT_TCP_IL; 967 break; 968 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: 969 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: 970 prot_id = ICE_PROT_UDP_IL_OR_S; 971 break; 972 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: 973 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: 974 prot_id = ICE_PROT_SCTP_IL; 975 break; 976 case ICE_FLOW_FIELD_IDX_GTPC_TEID: 977 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID: 978 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID: 979 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: 980 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: 981 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: 982 /* GTP is accessed through UDP OF protocol */ 983 prot_id = ICE_PROT_UDP_OF; 984 break; 985 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID: 986 prot_id = ICE_PROT_PPPOE; 987 break; 988 case ICE_FLOW_FIELD_IDX_PFCP_SEID: 989 prot_id = ICE_PROT_UDP_IL_OR_S; 990 break; 991 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID: 992 prot_id = ICE_PROT_L2TPV3; 993 break; 994 case ICE_FLOW_FIELD_IDX_ESP_SPI: 995 prot_id = ICE_PROT_ESP_F; 996 break; 997 case ICE_FLOW_FIELD_IDX_AH_SPI: 998 prot_id = ICE_PROT_ESP_2; 999 break; 1000 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI: 1001 prot_id = ICE_PROT_UDP_IL_OR_S; 1002 break; 1003 case ICE_FLOW_FIELD_IDX_ARP_SIP: 1004 case ICE_FLOW_FIELD_IDX_ARP_DIP: 1005 case ICE_FLOW_FIELD_IDX_ARP_SHA: 1006 case ICE_FLOW_FIELD_IDX_ARP_DHA: 1007 case ICE_FLOW_FIELD_IDX_ARP_OP: 1008 prot_id = ICE_PROT_ARP_OF; 1009 break; 1010 case ICE_FLOW_FIELD_IDX_ICMP_TYPE: 1011 case ICE_FLOW_FIELD_IDX_ICMP_CODE: 1012 /* ICMP type and code share the same extraction seq. entry */ 1013 prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? 1014 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; 1015 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? 1016 ICE_FLOW_FIELD_IDX_ICMP_CODE : 1017 ICE_FLOW_FIELD_IDX_ICMP_TYPE; 1018 break; 1019 case ICE_FLOW_FIELD_IDX_GRE_KEYID: 1020 prot_id = ICE_PROT_GRE_OF; 1021 break; 1022 default: 1023 return ICE_ERR_NOT_IMPL; 1024 } 1025 1026 /* Each extraction sequence entry is a word in size, and extracts a 1027 * word-aligned offset from a protocol header. 1028 */ 1029 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; 1030 1031 flds[fld].xtrct.prot_id = prot_id; 1032 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * 1033 ICE_FLOW_FV_EXTRACT_SZ; 1034 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); 1035 flds[fld].xtrct.idx = params->es_cnt; 1036 flds[fld].xtrct.mask = ice_flds_info[fld].mask; 1037 1038 /* Adjust the next field-entry index after accommodating the number of 1039 * entries this field consumes 1040 */ 1041 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, 1042 ese_bits); 1043 1044 /* Fill in the extraction sequence entries needed for this field */ 1045 off = flds[fld].xtrct.off; 1046 mask = flds[fld].xtrct.mask; 1047 for (i = 0; i < cnt; i++) { 1048 /* Only consume an extraction sequence entry if there is no 1049 * sibling field associated with this field or the sibling entry 1050 * already extracts the word shared with this field. 1051 */ 1052 if (sib == ICE_FLOW_FIELD_IDX_MAX || 1053 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || 1054 flds[sib].xtrct.off != off) { 1055 u8 idx; 1056 1057 /* Make sure the number of extraction sequence required 1058 * does not exceed the block's capability 1059 */ 1060 if (params->es_cnt >= fv_words) 1061 return ICE_ERR_MAX_LIMIT; 1062 1063 /* some blocks require a reversed field vector layout */ 1064 if (hw->blk[params->blk].es.reverse) 1065 idx = fv_words - params->es_cnt - 1; 1066 else 1067 idx = params->es_cnt; 1068 1069 params->es[idx].prot_id = prot_id; 1070 params->es[idx].off = off; 1071 params->mask[idx] = mask | sib_mask; 1072 params->es_cnt++; 1073 } 1074 1075 off += ICE_FLOW_FV_EXTRACT_SZ; 1076 } 1077 1078 return 0; 1079 } 1080 1081 /** 1082 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes 1083 * @hw: pointer to the HW struct 1084 * @params: information about the flow to be processed 1085 * @seg: index of packet segment whose raw fields are to be extracted 1086 */ 1087 static enum ice_status 1088 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, 1089 u8 seg) 1090 { 1091 u16 fv_words; 1092 u16 hdrs_sz; 1093 u8 i; 1094 1095 if (!params->prof->segs[seg].raws_cnt) 1096 return 0; 1097 1098 if (params->prof->segs[seg].raws_cnt > 1099 ARRAY_SIZE(params->prof->segs[seg].raws)) 1100 return ICE_ERR_MAX_LIMIT; 1101 1102 /* Offsets within the segment headers are not supported */ 1103 hdrs_sz = ice_flow_calc_seg_sz(params, seg); 1104 if (!hdrs_sz) 1105 return ICE_ERR_PARAM; 1106 1107 fv_words = hw->blk[params->blk].es.fvw; 1108 1109 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { 1110 struct ice_flow_seg_fld_raw *raw; 1111 u16 off, cnt, j; 1112 1113 raw = ¶ms->prof->segs[seg].raws[i]; 1114 1115 /* Storing extraction information */ 1116 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; 1117 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * 1118 ICE_FLOW_FV_EXTRACT_SZ; 1119 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * 1120 BITS_PER_BYTE; 1121 raw->info.xtrct.idx = params->es_cnt; 1122 1123 /* Determine the number of field vector entries this raw field 1124 * consumes. 1125 */ 1126 cnt = DIV_ROUND_UP(raw->info.xtrct.disp + 1127 (raw->info.src.last * BITS_PER_BYTE), 1128 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE)); 1129 off = raw->info.xtrct.off; 1130 for (j = 0; j < cnt; j++) { 1131 u16 idx; 1132 1133 /* Make sure the number of extraction sequence required 1134 * does not exceed the block's capability 1135 */ 1136 if (params->es_cnt >= hw->blk[params->blk].es.count || 1137 params->es_cnt >= ICE_MAX_FV_WORDS) 1138 return ICE_ERR_MAX_LIMIT; 1139 1140 /* some blocks require a reversed field vector layout */ 1141 if (hw->blk[params->blk].es.reverse) 1142 idx = fv_words - params->es_cnt - 1; 1143 else 1144 idx = params->es_cnt; 1145 1146 params->es[idx].prot_id = raw->info.xtrct.prot_id; 1147 params->es[idx].off = off; 1148 params->es_cnt++; 1149 off += ICE_FLOW_FV_EXTRACT_SZ; 1150 } 1151 } 1152 1153 return 0; 1154 } 1155 1156 /** 1157 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments 1158 * @hw: pointer to the HW struct 1159 * @params: information about the flow to be processed 1160 * 1161 * This function iterates through all matched fields in the given segments, and 1162 * creates an extraction sequence for the fields. 1163 */ 1164 static enum ice_status 1165 ice_flow_create_xtrct_seq(struct ice_hw *hw, 1166 struct ice_flow_prof_params *params) 1167 { 1168 struct ice_flow_prof *prof = params->prof; 1169 enum ice_status status = 0; 1170 u8 i; 1171 1172 for (i = 0; i < prof->segs_cnt; i++) { 1173 u64 match = params->prof->segs[i].match; 1174 enum ice_flow_field j; 1175 1176 for_each_set_bit(j, (unsigned long *)&match, 1177 ICE_FLOW_FIELD_IDX_MAX) { 1178 status = ice_flow_xtract_fld(hw, params, i, j, match); 1179 if (status) 1180 return status; 1181 clear_bit(j, (unsigned long *)&match); 1182 } 1183 1184 /* Process raw matching bytes */ 1185 status = ice_flow_xtract_raws(hw, params, i); 1186 if (status) 1187 return status; 1188 } 1189 1190 return status; 1191 } 1192 1193 /** 1194 * ice_flow_proc_segs - process all packet segments associated with a profile 1195 * @hw: pointer to the HW struct 1196 * @params: information about the flow to be processed 1197 */ 1198 static enum ice_status 1199 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) 1200 { 1201 enum ice_status status; 1202 1203 status = ice_flow_proc_seg_hdrs(params); 1204 if (status) 1205 return status; 1206 1207 status = ice_flow_create_xtrct_seq(hw, params); 1208 if (status) 1209 return status; 1210 1211 switch (params->blk) { 1212 case ICE_BLK_FD: 1213 case ICE_BLK_RSS: 1214 status = 0; 1215 break; 1216 default: 1217 return ICE_ERR_NOT_IMPL; 1218 } 1219 1220 return status; 1221 } 1222 1223 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 1224 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 1225 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 1226 1227 /** 1228 * ice_flow_find_prof_conds - Find a profile matching headers and conditions 1229 * @hw: pointer to the HW struct 1230 * @blk: classification stage 1231 * @dir: flow direction 1232 * @segs: array of one or more packet segments that describe the flow 1233 * @segs_cnt: number of packet segments provided 1234 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) 1235 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) 1236 */ 1237 static struct ice_flow_prof * 1238 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, 1239 enum ice_flow_dir dir, struct ice_flow_seg_info *segs, 1240 u8 segs_cnt, u16 vsi_handle, u32 conds) 1241 { 1242 struct ice_flow_prof *p, *prof = NULL; 1243 1244 mutex_lock(&hw->fl_profs_locks[blk]); 1245 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 1246 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && 1247 segs_cnt && segs_cnt == p->segs_cnt) { 1248 u8 i; 1249 1250 /* Check for profile-VSI association if specified */ 1251 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && 1252 ice_is_vsi_valid(hw, vsi_handle) && 1253 !test_bit(vsi_handle, p->vsis)) 1254 continue; 1255 1256 /* Protocol headers must be checked. Matched fields are 1257 * checked if specified. 1258 */ 1259 for (i = 0; i < segs_cnt; i++) 1260 if (segs[i].hdrs != p->segs[i].hdrs || 1261 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && 1262 segs[i].match != p->segs[i].match)) 1263 break; 1264 1265 /* A match is found if all segments are matched */ 1266 if (i == segs_cnt) { 1267 prof = p; 1268 break; 1269 } 1270 } 1271 mutex_unlock(&hw->fl_profs_locks[blk]); 1272 1273 return prof; 1274 } 1275 1276 /** 1277 * ice_flow_find_prof_id - Look up a profile with given profile ID 1278 * @hw: pointer to the HW struct 1279 * @blk: classification stage 1280 * @prof_id: unique ID to identify this flow profile 1281 */ 1282 static struct ice_flow_prof * 1283 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 1284 { 1285 struct ice_flow_prof *p; 1286 1287 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 1288 if (p->id == prof_id) 1289 return p; 1290 1291 return NULL; 1292 } 1293 1294 /** 1295 * ice_dealloc_flow_entry - Deallocate flow entry memory 1296 * @hw: pointer to the HW struct 1297 * @entry: flow entry to be removed 1298 */ 1299 static void 1300 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) 1301 { 1302 if (!entry) 1303 return; 1304 1305 if (entry->entry) 1306 devm_kfree(ice_hw_to_dev(hw), entry->entry); 1307 1308 devm_kfree(ice_hw_to_dev(hw), entry); 1309 } 1310 1311 /** 1312 * ice_flow_rem_entry_sync - Remove a flow entry 1313 * @hw: pointer to the HW struct 1314 * @blk: classification stage 1315 * @entry: flow entry to be removed 1316 */ 1317 static enum ice_status 1318 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, 1319 struct ice_flow_entry *entry) 1320 { 1321 if (!entry) 1322 return ICE_ERR_BAD_PTR; 1323 1324 list_del(&entry->l_entry); 1325 1326 ice_dealloc_flow_entry(hw, entry); 1327 1328 return 0; 1329 } 1330 1331 /** 1332 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields 1333 * @hw: pointer to the HW struct 1334 * @blk: classification stage 1335 * @dir: flow direction 1336 * @prof_id: unique ID to identify this flow profile 1337 * @segs: array of one or more packet segments that describe the flow 1338 * @segs_cnt: number of packet segments provided 1339 * @prof: stores the returned flow profile added 1340 * 1341 * Assumption: the caller has acquired the lock to the profile list 1342 */ 1343 static enum ice_status 1344 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, 1345 enum ice_flow_dir dir, u64 prof_id, 1346 struct ice_flow_seg_info *segs, u8 segs_cnt, 1347 struct ice_flow_prof **prof) 1348 { 1349 struct ice_flow_prof_params *params; 1350 enum ice_status status; 1351 u8 i; 1352 1353 if (!prof) 1354 return ICE_ERR_BAD_PTR; 1355 1356 params = kzalloc(sizeof(*params), GFP_KERNEL); 1357 if (!params) 1358 return ICE_ERR_NO_MEMORY; 1359 1360 params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), 1361 GFP_KERNEL); 1362 if (!params->prof) { 1363 status = ICE_ERR_NO_MEMORY; 1364 goto free_params; 1365 } 1366 1367 /* initialize extraction sequence to all invalid (0xff) */ 1368 for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 1369 params->es[i].prot_id = ICE_PROT_INVALID; 1370 params->es[i].off = ICE_FV_OFFSET_INVAL; 1371 } 1372 1373 params->blk = blk; 1374 params->prof->id = prof_id; 1375 params->prof->dir = dir; 1376 params->prof->segs_cnt = segs_cnt; 1377 1378 /* Make a copy of the segments that need to be persistent in the flow 1379 * profile instance 1380 */ 1381 for (i = 0; i < segs_cnt; i++) 1382 memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs)); 1383 1384 status = ice_flow_proc_segs(hw, params); 1385 if (status) { 1386 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); 1387 goto out; 1388 } 1389 1390 /* Add a HW profile for this flow profile */ 1391 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, 1392 params->attr, params->attr_cnt, params->es, 1393 params->mask); 1394 if (status) { 1395 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); 1396 goto out; 1397 } 1398 1399 INIT_LIST_HEAD(¶ms->prof->entries); 1400 mutex_init(¶ms->prof->entries_lock); 1401 *prof = params->prof; 1402 1403 out: 1404 if (status) 1405 devm_kfree(ice_hw_to_dev(hw), params->prof); 1406 free_params: 1407 kfree(params); 1408 1409 return status; 1410 } 1411 1412 /** 1413 * ice_flow_rem_prof_sync - remove a flow profile 1414 * @hw: pointer to the hardware structure 1415 * @blk: classification stage 1416 * @prof: pointer to flow profile to remove 1417 * 1418 * Assumption: the caller has acquired the lock to the profile list 1419 */ 1420 static enum ice_status 1421 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, 1422 struct ice_flow_prof *prof) 1423 { 1424 enum ice_status status; 1425 1426 /* Remove all remaining flow entries before removing the flow profile */ 1427 if (!list_empty(&prof->entries)) { 1428 struct ice_flow_entry *e, *t; 1429 1430 mutex_lock(&prof->entries_lock); 1431 1432 list_for_each_entry_safe(e, t, &prof->entries, l_entry) { 1433 status = ice_flow_rem_entry_sync(hw, blk, e); 1434 if (status) 1435 break; 1436 } 1437 1438 mutex_unlock(&prof->entries_lock); 1439 } 1440 1441 /* Remove all hardware profiles associated with this flow profile */ 1442 status = ice_rem_prof(hw, blk, prof->id); 1443 if (!status) { 1444 list_del(&prof->l_entry); 1445 mutex_destroy(&prof->entries_lock); 1446 devm_kfree(ice_hw_to_dev(hw), prof); 1447 } 1448 1449 return status; 1450 } 1451 1452 /** 1453 * ice_flow_assoc_prof - associate a VSI with a flow profile 1454 * @hw: pointer to the hardware structure 1455 * @blk: classification stage 1456 * @prof: pointer to flow profile 1457 * @vsi_handle: software VSI handle 1458 * 1459 * Assumption: the caller has acquired the lock to the profile list 1460 * and the software VSI handle has been validated 1461 */ 1462 static enum ice_status 1463 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, 1464 struct ice_flow_prof *prof, u16 vsi_handle) 1465 { 1466 enum ice_status status = 0; 1467 1468 if (!test_bit(vsi_handle, prof->vsis)) { 1469 status = ice_add_prof_id_flow(hw, blk, 1470 ice_get_hw_vsi_num(hw, 1471 vsi_handle), 1472 prof->id); 1473 if (!status) 1474 set_bit(vsi_handle, prof->vsis); 1475 else 1476 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", 1477 status); 1478 } 1479 1480 return status; 1481 } 1482 1483 /** 1484 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile 1485 * @hw: pointer to the hardware structure 1486 * @blk: classification stage 1487 * @prof: pointer to flow profile 1488 * @vsi_handle: software VSI handle 1489 * 1490 * Assumption: the caller has acquired the lock to the profile list 1491 * and the software VSI handle has been validated 1492 */ 1493 static enum ice_status 1494 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, 1495 struct ice_flow_prof *prof, u16 vsi_handle) 1496 { 1497 enum ice_status status = 0; 1498 1499 if (test_bit(vsi_handle, prof->vsis)) { 1500 status = ice_rem_prof_id_flow(hw, blk, 1501 ice_get_hw_vsi_num(hw, 1502 vsi_handle), 1503 prof->id); 1504 if (!status) 1505 clear_bit(vsi_handle, prof->vsis); 1506 else 1507 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", 1508 status); 1509 } 1510 1511 return status; 1512 } 1513 1514 /** 1515 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields 1516 * @hw: pointer to the HW struct 1517 * @blk: classification stage 1518 * @dir: flow direction 1519 * @prof_id: unique ID to identify this flow profile 1520 * @segs: array of one or more packet segments that describe the flow 1521 * @segs_cnt: number of packet segments provided 1522 * @prof: stores the returned flow profile added 1523 */ 1524 enum ice_status 1525 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 1526 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 1527 struct ice_flow_prof **prof) 1528 { 1529 enum ice_status status; 1530 1531 if (segs_cnt > ICE_FLOW_SEG_MAX) 1532 return ICE_ERR_MAX_LIMIT; 1533 1534 if (!segs_cnt) 1535 return ICE_ERR_PARAM; 1536 1537 if (!segs) 1538 return ICE_ERR_BAD_PTR; 1539 1540 status = ice_flow_val_hdrs(segs, segs_cnt); 1541 if (status) 1542 return status; 1543 1544 mutex_lock(&hw->fl_profs_locks[blk]); 1545 1546 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, 1547 prof); 1548 if (!status) 1549 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); 1550 1551 mutex_unlock(&hw->fl_profs_locks[blk]); 1552 1553 return status; 1554 } 1555 1556 /** 1557 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it 1558 * @hw: pointer to the HW struct 1559 * @blk: the block for which the flow profile is to be removed 1560 * @prof_id: unique ID of the flow profile to be removed 1561 */ 1562 enum ice_status 1563 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 1564 { 1565 struct ice_flow_prof *prof; 1566 enum ice_status status; 1567 1568 mutex_lock(&hw->fl_profs_locks[blk]); 1569 1570 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1571 if (!prof) { 1572 status = ICE_ERR_DOES_NOT_EXIST; 1573 goto out; 1574 } 1575 1576 /* prof becomes invalid after the call */ 1577 status = ice_flow_rem_prof_sync(hw, blk, prof); 1578 1579 out: 1580 mutex_unlock(&hw->fl_profs_locks[blk]); 1581 1582 return status; 1583 } 1584 1585 /** 1586 * ice_flow_add_entry - Add a flow entry 1587 * @hw: pointer to the HW struct 1588 * @blk: classification stage 1589 * @prof_id: ID of the profile to add a new flow entry to 1590 * @entry_id: unique ID to identify this flow entry 1591 * @vsi_handle: software VSI handle for the flow entry 1592 * @prio: priority of the flow entry 1593 * @data: pointer to a data buffer containing flow entry's match values/masks 1594 * @entry_h: pointer to buffer that receives the new flow entry's handle 1595 */ 1596 enum ice_status 1597 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 1598 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, 1599 void *data, u64 *entry_h) 1600 { 1601 struct ice_flow_entry *e = NULL; 1602 struct ice_flow_prof *prof; 1603 enum ice_status status; 1604 1605 /* No flow entry data is expected for RSS */ 1606 if (!entry_h || (!data && blk != ICE_BLK_RSS)) 1607 return ICE_ERR_BAD_PTR; 1608 1609 if (!ice_is_vsi_valid(hw, vsi_handle)) 1610 return ICE_ERR_PARAM; 1611 1612 mutex_lock(&hw->fl_profs_locks[blk]); 1613 1614 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1615 if (!prof) { 1616 status = ICE_ERR_DOES_NOT_EXIST; 1617 } else { 1618 /* Allocate memory for the entry being added and associate 1619 * the VSI to the found flow profile 1620 */ 1621 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); 1622 if (!e) 1623 status = ICE_ERR_NO_MEMORY; 1624 else 1625 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1626 } 1627 1628 mutex_unlock(&hw->fl_profs_locks[blk]); 1629 if (status) 1630 goto out; 1631 1632 e->id = entry_id; 1633 e->vsi_handle = vsi_handle; 1634 e->prof = prof; 1635 e->priority = prio; 1636 1637 switch (blk) { 1638 case ICE_BLK_FD: 1639 case ICE_BLK_RSS: 1640 break; 1641 default: 1642 status = ICE_ERR_NOT_IMPL; 1643 goto out; 1644 } 1645 1646 mutex_lock(&prof->entries_lock); 1647 list_add(&e->l_entry, &prof->entries); 1648 mutex_unlock(&prof->entries_lock); 1649 1650 *entry_h = ICE_FLOW_ENTRY_HNDL(e); 1651 1652 out: 1653 if (status && e) { 1654 if (e->entry) 1655 devm_kfree(ice_hw_to_dev(hw), e->entry); 1656 devm_kfree(ice_hw_to_dev(hw), e); 1657 } 1658 1659 return status; 1660 } 1661 1662 /** 1663 * ice_flow_rem_entry - Remove a flow entry 1664 * @hw: pointer to the HW struct 1665 * @blk: classification stage 1666 * @entry_h: handle to the flow entry to be removed 1667 */ 1668 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, 1669 u64 entry_h) 1670 { 1671 struct ice_flow_entry *entry; 1672 struct ice_flow_prof *prof; 1673 enum ice_status status = 0; 1674 1675 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) 1676 return ICE_ERR_PARAM; 1677 1678 entry = ICE_FLOW_ENTRY_PTR(entry_h); 1679 1680 /* Retain the pointer to the flow profile as the entry will be freed */ 1681 prof = entry->prof; 1682 1683 if (prof) { 1684 mutex_lock(&prof->entries_lock); 1685 status = ice_flow_rem_entry_sync(hw, blk, entry); 1686 mutex_unlock(&prof->entries_lock); 1687 } 1688 1689 return status; 1690 } 1691 1692 /** 1693 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer 1694 * @seg: packet segment the field being set belongs to 1695 * @fld: field to be set 1696 * @field_type: type of the field 1697 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1698 * entry's input buffer 1699 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1700 * input buffer 1701 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1702 * entry's input buffer 1703 * 1704 * This helper function stores information of a field being matched, including 1705 * the type of the field and the locations of the value to match, the mask, and 1706 * the upper-bound value in the start of the input buffer for a flow entry. 1707 * This function should only be used for fixed-size data structures. 1708 * 1709 * This function also opportunistically determines the protocol headers to be 1710 * present based on the fields being set. Some fields cannot be used alone to 1711 * determine the protocol headers present. Sometimes, fields for particular 1712 * protocol headers are not matched. In those cases, the protocol headers 1713 * must be explicitly set. 1714 */ 1715 static void 1716 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1717 enum ice_flow_fld_match_type field_type, u16 val_loc, 1718 u16 mask_loc, u16 last_loc) 1719 { 1720 u64 bit = BIT_ULL(fld); 1721 1722 seg->match |= bit; 1723 if (field_type == ICE_FLOW_FLD_TYPE_RANGE) 1724 seg->range |= bit; 1725 1726 seg->fields[fld].type = field_type; 1727 seg->fields[fld].src.val = val_loc; 1728 seg->fields[fld].src.mask = mask_loc; 1729 seg->fields[fld].src.last = last_loc; 1730 1731 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); 1732 } 1733 1734 /** 1735 * ice_flow_set_fld - specifies locations of field from entry's input buffer 1736 * @seg: packet segment the field being set belongs to 1737 * @fld: field to be set 1738 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1739 * entry's input buffer 1740 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1741 * input buffer 1742 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1743 * entry's input buffer 1744 * @range: indicate if field being matched is to be in a range 1745 * 1746 * This function specifies the locations, in the form of byte offsets from the 1747 * start of the input buffer for a flow entry, from where the value to match, 1748 * the mask value, and upper value can be extracted. These locations are then 1749 * stored in the flow profile. When adding a flow entry associated with the 1750 * flow profile, these locations will be used to quickly extract the values and 1751 * create the content of a match entry. This function should only be used for 1752 * fixed-size data structures. 1753 */ 1754 void 1755 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1756 u16 val_loc, u16 mask_loc, u16 last_loc, bool range) 1757 { 1758 enum ice_flow_fld_match_type t = range ? 1759 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; 1760 1761 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); 1762 } 1763 1764 /** 1765 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf 1766 * @seg: packet segment the field being set belongs to 1767 * @off: offset of the raw field from the beginning of the segment in bytes 1768 * @len: length of the raw pattern to be matched 1769 * @val_loc: location of the value to match from entry's input buffer 1770 * @mask_loc: location of mask value from entry's input buffer 1771 * 1772 * This function specifies the offset of the raw field to be match from the 1773 * beginning of the specified packet segment, and the locations, in the form of 1774 * byte offsets from the start of the input buffer for a flow entry, from where 1775 * the value to match and the mask value to be extracted. These locations are 1776 * then stored in the flow profile. When adding flow entries to the associated 1777 * flow profile, these locations can be used to quickly extract the values to 1778 * create the content of a match entry. This function should only be used for 1779 * fixed-size data structures. 1780 */ 1781 void 1782 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 1783 u16 val_loc, u16 mask_loc) 1784 { 1785 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { 1786 seg->raws[seg->raws_cnt].off = off; 1787 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; 1788 seg->raws[seg->raws_cnt].info.src.val = val_loc; 1789 seg->raws[seg->raws_cnt].info.src.mask = mask_loc; 1790 /* The "last" field is used to store the length of the field */ 1791 seg->raws[seg->raws_cnt].info.src.last = len; 1792 } 1793 1794 /* Overflows of "raws" will be handled as an error condition later in 1795 * the flow when this information is processed. 1796 */ 1797 seg->raws_cnt++; 1798 } 1799 1800 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ 1801 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) 1802 1803 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ 1804 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) 1805 1806 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ 1807 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) 1808 1809 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ 1810 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \ 1811 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ 1812 ICE_FLOW_RSS_SEG_HDR_L4_MASKS) 1813 1814 /** 1815 * ice_flow_set_rss_seg_info - setup packet segments for RSS 1816 * @segs: pointer to the flow field segment(s) 1817 * @hash_fields: fields to be hashed on for the segment(s) 1818 * @flow_hdr: protocol header fields within a packet segment 1819 * 1820 * Helper function to extract fields from hash bitmap and use flow 1821 * header value to set flow field segment for further use in flow 1822 * profile entry or removal. 1823 */ 1824 static enum ice_status 1825 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, 1826 u32 flow_hdr) 1827 { 1828 u64 val; 1829 u8 i; 1830 1831 for_each_set_bit(i, (unsigned long *)&hash_fields, 1832 ICE_FLOW_FIELD_IDX_MAX) 1833 ice_flow_set_fld(segs, (enum ice_flow_field)i, 1834 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1835 ICE_FLOW_FLD_OFF_INVAL, false); 1836 1837 ICE_FLOW_SET_HDRS(segs, flow_hdr); 1838 1839 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & 1840 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) 1841 return ICE_ERR_PARAM; 1842 1843 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); 1844 if (val && !is_power_of_2(val)) 1845 return ICE_ERR_CFG; 1846 1847 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); 1848 if (val && !is_power_of_2(val)) 1849 return ICE_ERR_CFG; 1850 1851 return 0; 1852 } 1853 1854 /** 1855 * ice_rem_vsi_rss_list - remove VSI from RSS list 1856 * @hw: pointer to the hardware structure 1857 * @vsi_handle: software VSI handle 1858 * 1859 * Remove the VSI from all RSS configurations in the list. 1860 */ 1861 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) 1862 { 1863 struct ice_rss_cfg *r, *tmp; 1864 1865 if (list_empty(&hw->rss_list_head)) 1866 return; 1867 1868 mutex_lock(&hw->rss_locks); 1869 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1870 if (test_and_clear_bit(vsi_handle, r->vsis)) 1871 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1872 list_del(&r->l_entry); 1873 devm_kfree(ice_hw_to_dev(hw), r); 1874 } 1875 mutex_unlock(&hw->rss_locks); 1876 } 1877 1878 /** 1879 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI 1880 * @hw: pointer to the hardware structure 1881 * @vsi_handle: software VSI handle 1882 * 1883 * This function will iterate through all flow profiles and disassociate 1884 * the VSI from that profile. If the flow profile has no VSIs it will 1885 * be removed. 1886 */ 1887 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1888 { 1889 const enum ice_block blk = ICE_BLK_RSS; 1890 struct ice_flow_prof *p, *t; 1891 enum ice_status status = 0; 1892 1893 if (!ice_is_vsi_valid(hw, vsi_handle)) 1894 return ICE_ERR_PARAM; 1895 1896 if (list_empty(&hw->fl_profs[blk])) 1897 return 0; 1898 1899 mutex_lock(&hw->rss_locks); 1900 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) 1901 if (test_bit(vsi_handle, p->vsis)) { 1902 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); 1903 if (status) 1904 break; 1905 1906 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { 1907 status = ice_flow_rem_prof(hw, blk, p->id); 1908 if (status) 1909 break; 1910 } 1911 } 1912 mutex_unlock(&hw->rss_locks); 1913 1914 return status; 1915 } 1916 1917 /** 1918 * ice_rem_rss_list - remove RSS configuration from list 1919 * @hw: pointer to the hardware structure 1920 * @vsi_handle: software VSI handle 1921 * @prof: pointer to flow profile 1922 * 1923 * Assumption: lock has already been acquired for RSS list 1924 */ 1925 static void 1926 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1927 { 1928 struct ice_rss_cfg *r, *tmp; 1929 1930 /* Search for RSS hash fields associated to the VSI that match the 1931 * hash configurations associated to the flow profile. If found 1932 * remove from the RSS entry list of the VSI context and delete entry. 1933 */ 1934 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1935 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1936 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1937 clear_bit(vsi_handle, r->vsis); 1938 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1939 list_del(&r->l_entry); 1940 devm_kfree(ice_hw_to_dev(hw), r); 1941 } 1942 return; 1943 } 1944 } 1945 1946 /** 1947 * ice_add_rss_list - add RSS configuration to list 1948 * @hw: pointer to the hardware structure 1949 * @vsi_handle: software VSI handle 1950 * @prof: pointer to flow profile 1951 * 1952 * Assumption: lock has already been acquired for RSS list 1953 */ 1954 static enum ice_status 1955 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1956 { 1957 struct ice_rss_cfg *r, *rss_cfg; 1958 1959 list_for_each_entry(r, &hw->rss_list_head, l_entry) 1960 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1961 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1962 set_bit(vsi_handle, r->vsis); 1963 return 0; 1964 } 1965 1966 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), 1967 GFP_KERNEL); 1968 if (!rss_cfg) 1969 return ICE_ERR_NO_MEMORY; 1970 1971 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; 1972 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; 1973 set_bit(vsi_handle, rss_cfg->vsis); 1974 1975 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); 1976 1977 return 0; 1978 } 1979 1980 #define ICE_FLOW_PROF_HASH_S 0 1981 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) 1982 #define ICE_FLOW_PROF_HDR_S 32 1983 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) 1984 #define ICE_FLOW_PROF_ENCAP_S 63 1985 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) 1986 1987 #define ICE_RSS_OUTER_HEADERS 1 1988 #define ICE_RSS_INNER_HEADERS 2 1989 1990 /* Flow profile ID format: 1991 * [0:31] - Packet match fields 1992 * [32:62] - Protocol header 1993 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled 1994 */ 1995 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ 1996 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ 1997 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ 1998 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) 1999 2000 /** 2001 * ice_add_rss_cfg_sync - add an RSS configuration 2002 * @hw: pointer to the hardware structure 2003 * @vsi_handle: software VSI handle 2004 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 2005 * @addl_hdrs: protocol header fields 2006 * @segs_cnt: packet segment count 2007 * 2008 * Assumption: lock has already been acquired for RSS list 2009 */ 2010 static enum ice_status 2011 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 2012 u32 addl_hdrs, u8 segs_cnt) 2013 { 2014 const enum ice_block blk = ICE_BLK_RSS; 2015 struct ice_flow_prof *prof = NULL; 2016 struct ice_flow_seg_info *segs; 2017 enum ice_status status; 2018 2019 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) 2020 return ICE_ERR_PARAM; 2021 2022 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); 2023 if (!segs) 2024 return ICE_ERR_NO_MEMORY; 2025 2026 /* Construct the packet segment info from the hashed fields */ 2027 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, 2028 addl_hdrs); 2029 if (status) 2030 goto exit; 2031 2032 /* Search for a flow profile that has matching headers, hash fields 2033 * and has the input VSI associated to it. If found, no further 2034 * operations required and exit. 2035 */ 2036 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 2037 vsi_handle, 2038 ICE_FLOW_FIND_PROF_CHK_FLDS | 2039 ICE_FLOW_FIND_PROF_CHK_VSI); 2040 if (prof) 2041 goto exit; 2042 2043 /* Check if a flow profile exists with the same protocol headers and 2044 * associated with the input VSI. If so disassociate the VSI from 2045 * this profile. The VSI will be added to a new profile created with 2046 * the protocol header and new hash field configuration. 2047 */ 2048 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 2049 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); 2050 if (prof) { 2051 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); 2052 if (!status) 2053 ice_rem_rss_list(hw, vsi_handle, prof); 2054 else 2055 goto exit; 2056 2057 /* Remove profile if it has no VSIs associated */ 2058 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) { 2059 status = ice_flow_rem_prof(hw, blk, prof->id); 2060 if (status) 2061 goto exit; 2062 } 2063 } 2064 2065 /* Search for a profile that has same match fields only. If this 2066 * exists then associate the VSI to this profile. 2067 */ 2068 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 2069 vsi_handle, 2070 ICE_FLOW_FIND_PROF_CHK_FLDS); 2071 if (prof) { 2072 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 2073 if (!status) 2074 status = ice_add_rss_list(hw, vsi_handle, prof); 2075 goto exit; 2076 } 2077 2078 /* Create a new flow profile with generated profile and packet 2079 * segment information. 2080 */ 2081 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, 2082 ICE_FLOW_GEN_PROFID(hashed_flds, 2083 segs[segs_cnt - 1].hdrs, 2084 segs_cnt), 2085 segs, segs_cnt, &prof); 2086 if (status) 2087 goto exit; 2088 2089 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 2090 /* If association to a new flow profile failed then this profile can 2091 * be removed. 2092 */ 2093 if (status) { 2094 ice_flow_rem_prof(hw, blk, prof->id); 2095 goto exit; 2096 } 2097 2098 status = ice_add_rss_list(hw, vsi_handle, prof); 2099 2100 exit: 2101 kfree(segs); 2102 return status; 2103 } 2104 2105 /** 2106 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields 2107 * @hw: pointer to the hardware structure 2108 * @vsi_handle: software VSI handle 2109 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 2110 * @addl_hdrs: protocol header fields 2111 * 2112 * This function will generate a flow profile based on fields associated with 2113 * the input fields to hash on, the flow type and use the VSI number to add 2114 * a flow entry to the profile. 2115 */ 2116 enum ice_status 2117 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 2118 u32 addl_hdrs) 2119 { 2120 enum ice_status status; 2121 2122 if (hashed_flds == ICE_HASH_INVALID || 2123 !ice_is_vsi_valid(hw, vsi_handle)) 2124 return ICE_ERR_PARAM; 2125 2126 mutex_lock(&hw->rss_locks); 2127 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, 2128 ICE_RSS_OUTER_HEADERS); 2129 if (!status) 2130 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, 2131 addl_hdrs, ICE_RSS_INNER_HEADERS); 2132 mutex_unlock(&hw->rss_locks); 2133 2134 return status; 2135 } 2136 2137 /* Mapping of AVF hash bit fields to an L3-L4 hash combination. 2138 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, 2139 * convert its values to their appropriate flow L3, L4 values. 2140 */ 2141 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ 2142 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ 2143 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) 2144 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ 2145 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ 2146 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) 2147 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ 2148 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ 2149 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ 2150 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) 2151 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ 2152 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ 2153 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) 2154 2155 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ 2156 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ 2157 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) 2158 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ 2159 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ 2160 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ 2161 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) 2162 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ 2163 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ 2164 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) 2165 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ 2166 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ 2167 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) 2168 2169 /** 2170 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver 2171 * @hw: pointer to the hardware structure 2172 * @vsi_handle: software VSI handle 2173 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure 2174 * 2175 * This function will take the hash bitmap provided by the AVF driver via a 2176 * message, convert it to ICE-compatible values, and configure RSS flow 2177 * profiles. 2178 */ 2179 enum ice_status 2180 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) 2181 { 2182 enum ice_status status = 0; 2183 u64 hash_flds; 2184 2185 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || 2186 !ice_is_vsi_valid(hw, vsi_handle)) 2187 return ICE_ERR_PARAM; 2188 2189 /* Make sure no unsupported bits are specified */ 2190 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | 2191 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) 2192 return ICE_ERR_CFG; 2193 2194 hash_flds = avf_hash; 2195 2196 /* Always create an L3 RSS configuration for any L4 RSS configuration */ 2197 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) 2198 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; 2199 2200 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) 2201 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; 2202 2203 /* Create the corresponding RSS configuration for each valid hash bit */ 2204 while (hash_flds) { 2205 u64 rss_hash = ICE_HASH_INVALID; 2206 2207 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { 2208 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { 2209 rss_hash = ICE_FLOW_HASH_IPV4; 2210 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; 2211 } else if (hash_flds & 2212 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { 2213 rss_hash = ICE_FLOW_HASH_IPV4 | 2214 ICE_FLOW_HASH_TCP_PORT; 2215 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; 2216 } else if (hash_flds & 2217 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { 2218 rss_hash = ICE_FLOW_HASH_IPV4 | 2219 ICE_FLOW_HASH_UDP_PORT; 2220 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; 2221 } else if (hash_flds & 2222 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { 2223 rss_hash = ICE_FLOW_HASH_IPV4 | 2224 ICE_FLOW_HASH_SCTP_PORT; 2225 hash_flds &= 2226 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); 2227 } 2228 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { 2229 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { 2230 rss_hash = ICE_FLOW_HASH_IPV6; 2231 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; 2232 } else if (hash_flds & 2233 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { 2234 rss_hash = ICE_FLOW_HASH_IPV6 | 2235 ICE_FLOW_HASH_TCP_PORT; 2236 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; 2237 } else if (hash_flds & 2238 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { 2239 rss_hash = ICE_FLOW_HASH_IPV6 | 2240 ICE_FLOW_HASH_UDP_PORT; 2241 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; 2242 } else if (hash_flds & 2243 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { 2244 rss_hash = ICE_FLOW_HASH_IPV6 | 2245 ICE_FLOW_HASH_SCTP_PORT; 2246 hash_flds &= 2247 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); 2248 } 2249 } 2250 2251 if (rss_hash == ICE_HASH_INVALID) 2252 return ICE_ERR_OUT_OF_RANGE; 2253 2254 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, 2255 ICE_FLOW_SEG_HDR_NONE); 2256 if (status) 2257 break; 2258 } 2259 2260 return status; 2261 } 2262 2263 /** 2264 * ice_replay_rss_cfg - replay RSS configurations associated with VSI 2265 * @hw: pointer to the hardware structure 2266 * @vsi_handle: software VSI handle 2267 */ 2268 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 2269 { 2270 enum ice_status status = 0; 2271 struct ice_rss_cfg *r; 2272 2273 if (!ice_is_vsi_valid(hw, vsi_handle)) 2274 return ICE_ERR_PARAM; 2275 2276 mutex_lock(&hw->rss_locks); 2277 list_for_each_entry(r, &hw->rss_list_head, l_entry) { 2278 if (test_bit(vsi_handle, r->vsis)) { 2279 status = ice_add_rss_cfg_sync(hw, vsi_handle, 2280 r->hashed_flds, 2281 r->packet_hdr, 2282 ICE_RSS_OUTER_HEADERS); 2283 if (status) 2284 break; 2285 status = ice_add_rss_cfg_sync(hw, vsi_handle, 2286 r->hashed_flds, 2287 r->packet_hdr, 2288 ICE_RSS_INNER_HEADERS); 2289 if (status) 2290 break; 2291 } 2292 } 2293 mutex_unlock(&hw->rss_locks); 2294 2295 return status; 2296 } 2297 2298 /** 2299 * ice_get_rss_cfg - returns hashed fields for the given header types 2300 * @hw: pointer to the hardware structure 2301 * @vsi_handle: software VSI handle 2302 * @hdrs: protocol header type 2303 * 2304 * This function will return the match fields of the first instance of flow 2305 * profile having the given header types and containing input VSI 2306 */ 2307 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) 2308 { 2309 u64 rss_hash = ICE_HASH_INVALID; 2310 struct ice_rss_cfg *r; 2311 2312 /* verify if the protocol header is non zero and VSI is valid */ 2313 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) 2314 return ICE_HASH_INVALID; 2315 2316 mutex_lock(&hw->rss_locks); 2317 list_for_each_entry(r, &hw->rss_list_head, l_entry) 2318 if (test_bit(vsi_handle, r->vsis) && 2319 r->packet_hdr == hdrs) { 2320 rss_hash = r->hashed_flds; 2321 break; 2322 } 2323 mutex_unlock(&hw->rss_locks); 2324 2325 return rss_hash; 2326 } 2327