1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_flow.h" 6 7 /* Describe properties of a protocol header field */ 8 struct ice_flow_field_info { 9 enum ice_flow_seg_hdr hdr; 10 s16 off; /* Offset from start of a protocol header, in bits */ 11 u16 size; /* Size of fields in bits */ 12 u16 mask; /* 16-bit mask for field */ 13 }; 14 15 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ 16 .hdr = _hdr, \ 17 .off = (_offset_bytes) * BITS_PER_BYTE, \ 18 .size = (_size_bytes) * BITS_PER_BYTE, \ 19 .mask = 0, \ 20 } 21 22 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ 23 .hdr = _hdr, \ 24 .off = (_offset_bytes) * BITS_PER_BYTE, \ 25 .size = (_size_bytes) * BITS_PER_BYTE, \ 26 .mask = _mask, \ 27 } 28 29 /* Table containing properties of supported protocol header fields */ 30 static const 31 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { 32 /* Ether */ 33 /* ICE_FLOW_FIELD_IDX_ETH_DA */ 34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), 35 /* ICE_FLOW_FIELD_IDX_ETH_SA */ 36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), 37 /* ICE_FLOW_FIELD_IDX_S_VLAN */ 38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)), 39 /* ICE_FLOW_FIELD_IDX_C_VLAN */ 40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)), 41 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ 42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)), 43 /* IPv4 / IPv6 */ 44 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ 45 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc), 46 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ 47 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0), 48 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ 49 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00), 50 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ 51 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff), 52 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ 53 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff), 54 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */ 55 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00), 56 /* ICE_FLOW_FIELD_IDX_IPV4_SA */ 57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)), 58 /* ICE_FLOW_FIELD_IDX_IPV4_DA */ 59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)), 60 /* ICE_FLOW_FIELD_IDX_IPV6_SA */ 61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), 62 /* ICE_FLOW_FIELD_IDX_IPV6_DA */ 63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), 64 /* Transport */ 65 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ 66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), 67 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ 68 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)), 69 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ 70 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)), 71 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ 72 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)), 73 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ 74 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), 75 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ 76 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), 77 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ 78 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), 79 /* ARP */ 80 /* ICE_FLOW_FIELD_IDX_ARP_SIP */ 81 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), 82 /* ICE_FLOW_FIELD_IDX_ARP_DIP */ 83 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)), 84 /* ICE_FLOW_FIELD_IDX_ARP_SHA */ 85 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), 86 /* ICE_FLOW_FIELD_IDX_ARP_DHA */ 87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), 88 /* ICE_FLOW_FIELD_IDX_ARP_OP */ 89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)), 90 /* ICMP */ 91 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ 92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1), 93 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ 94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1), 95 /* GRE */ 96 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ 97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, 98 sizeof_field(struct gre_full_hdr, key)), 99 /* GTP */ 100 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */ 101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)), 102 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */ 103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)), 104 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */ 105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)), 106 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */ 107 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), 108 0x3f00), 109 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ 110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), 111 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ 112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), 113 /* PPPoE */ 114 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ 115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), 116 /* PFCP */ 117 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */ 118 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)), 119 /* L2TPv3 */ 120 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */ 121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)), 122 /* ESP */ 123 /* ICE_FLOW_FIELD_IDX_ESP_SPI */ 124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)), 125 /* AH */ 126 /* ICE_FLOW_FIELD_IDX_AH_SPI */ 127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), 128 /* NAT_T_ESP */ 129 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ 130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), 131 }; 132 133 /* Bitmaps indicating relevant packet types for a particular protocol header 134 * 135 * Packet types for packets with an Outer/First/Single MAC header 136 */ 137 static const u32 ice_ptypes_mac_ofos[] = { 138 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 139 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 140 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, 141 0x00000000, 0x00000000, 0x00000000, 0x00000000, 142 0x00000000, 0x00000000, 0x00000000, 0x00000000, 143 0x00000000, 0x00000000, 0x00000000, 0x00000000, 144 0x00000000, 0x00000000, 0x00000000, 0x00000000, 145 0x00000000, 0x00000000, 0x00000000, 0x00000000, 146 }; 147 148 /* Packet types for packets with an Innermost/Last MAC VLAN header */ 149 static const u32 ice_ptypes_macvlan_il[] = { 150 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, 151 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 152 0x00000000, 0x00000000, 0x00000000, 0x00000000, 153 0x00000000, 0x00000000, 0x00000000, 0x00000000, 154 0x00000000, 0x00000000, 0x00000000, 0x00000000, 155 0x00000000, 0x00000000, 0x00000000, 0x00000000, 156 0x00000000, 0x00000000, 0x00000000, 0x00000000, 157 0x00000000, 0x00000000, 0x00000000, 0x00000000, 158 }; 159 160 /* Packet types for packets with an Outer/First/Single IPv4 header */ 161 static const u32 ice_ptypes_ipv4_ofos[] = { 162 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 163 0x00000000, 0x00000155, 0x00000000, 0x00000000, 164 0x00000000, 0x000FC000, 0x00000000, 0x00000000, 165 0x00000000, 0x00000000, 0x00000000, 0x00000000, 166 0x00000000, 0x00000000, 0x00000000, 0x00000000, 167 0x00000000, 0x00000000, 0x00000000, 0x00000000, 168 0x00000000, 0x00000000, 0x00000000, 0x00000000, 169 0x00000000, 0x00000000, 0x00000000, 0x00000000, 170 0x00000000, 0x00000000, 0x00000000, 0x00000000, 171 0x00000000, 0x00000000, 0x00000000, 0x00000000, 172 }; 173 174 /* Packet types for packets with an Innermost/Last IPv4 header */ 175 static const u32 ice_ptypes_ipv4_il[] = { 176 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 177 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 178 0x00000000, 0x00000000, 0x001FF800, 0x00000000, 179 0x00000000, 0x00000000, 0x00000000, 0x00000000, 180 0x00000000, 0x00000000, 0x00000000, 0x00000000, 181 0x00000000, 0x00000000, 0x00000000, 0x00000000, 182 0x00000000, 0x00000000, 0x00000000, 0x00000000, 183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 184 }; 185 186 /* Packet types for packets with an Outer/First/Single IPv6 header */ 187 static const u32 ice_ptypes_ipv6_ofos[] = { 188 0x00000000, 0x00000000, 0x77000000, 0x10002000, 189 0x00000000, 0x000002AA, 0x00000000, 0x00000000, 190 0x00000000, 0x03F00000, 0x00000000, 0x00000000, 191 0x00000000, 0x00000000, 0x00000000, 0x00000000, 192 0x00000000, 0x00000000, 0x00000000, 0x00000000, 193 0x00000000, 0x00000000, 0x00000000, 0x00000000, 194 0x00000000, 0x00000000, 0x00000000, 0x00000000, 195 0x00000000, 0x00000000, 0x00000000, 0x00000000, 196 0x00000000, 0x00000000, 0x00000000, 0x00000000, 197 0x00000000, 0x00000000, 0x00000000, 0x00000000, 198 }; 199 200 /* Packet types for packets with an Innermost/Last IPv6 header */ 201 static const u32 ice_ptypes_ipv6_il[] = { 202 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 203 0x00000770, 0x00000000, 0x00000000, 0x00000000, 204 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, 205 0x00000000, 0x00000000, 0x00000000, 0x00000000, 206 0x00000000, 0x00000000, 0x00000000, 0x00000000, 207 0x00000000, 0x00000000, 0x00000000, 0x00000000, 208 0x00000000, 0x00000000, 0x00000000, 0x00000000, 209 0x00000000, 0x00000000, 0x00000000, 0x00000000, 210 }; 211 212 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ 213 static const u32 ice_ipv4_ofos_no_l4[] = { 214 0x10C00000, 0x04000800, 0x00000000, 0x00000000, 215 0x00000000, 0x00000000, 0x00000000, 0x00000000, 216 0x00000000, 0x00000000, 0x00000000, 0x00000000, 217 0x00000000, 0x00000000, 0x00000000, 0x00000000, 218 0x00000000, 0x00000000, 0x00000000, 0x00000000, 219 0x00000000, 0x00000000, 0x00000000, 0x00000000, 220 0x00000000, 0x00000000, 0x00000000, 0x00000000, 221 0x00000000, 0x00000000, 0x00000000, 0x00000000, 222 }; 223 224 /* Packet types for packets with an Outermost/First ARP header */ 225 static const u32 ice_ptypes_arp_of[] = { 226 0x00000800, 0x00000000, 0x00000000, 0x00000000, 227 0x00000000, 0x00000000, 0x00000000, 0x00000000, 228 0x00000000, 0x00000000, 0x00000000, 0x00000000, 229 0x00000000, 0x00000000, 0x00000000, 0x00000000, 230 0x00000000, 0x00000000, 0x00000000, 0x00000000, 231 0x00000000, 0x00000000, 0x00000000, 0x00000000, 232 0x00000000, 0x00000000, 0x00000000, 0x00000000, 233 0x00000000, 0x00000000, 0x00000000, 0x00000000, 234 }; 235 236 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ 237 static const u32 ice_ipv4_il_no_l4[] = { 238 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 239 0x00000008, 0x00000000, 0x00000000, 0x00000000, 240 0x00000000, 0x00000000, 0x00000000, 0x00000000, 241 0x00000000, 0x00000000, 0x00000000, 0x00000000, 242 0x00000000, 0x00000000, 0x00000000, 0x00000000, 243 0x00000000, 0x00000000, 0x00000000, 0x00000000, 244 0x00000000, 0x00000000, 0x00000000, 0x00000000, 245 0x00000000, 0x00000000, 0x00000000, 0x00000000, 246 }; 247 248 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ 249 static const u32 ice_ipv6_ofos_no_l4[] = { 250 0x00000000, 0x00000000, 0x43000000, 0x10002000, 251 0x00000000, 0x00000000, 0x00000000, 0x00000000, 252 0x00000000, 0x00000000, 0x00000000, 0x00000000, 253 0x00000000, 0x00000000, 0x00000000, 0x00000000, 254 0x00000000, 0x00000000, 0x00000000, 0x00000000, 255 0x00000000, 0x00000000, 0x00000000, 0x00000000, 256 0x00000000, 0x00000000, 0x00000000, 0x00000000, 257 0x00000000, 0x00000000, 0x00000000, 0x00000000, 258 }; 259 260 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ 261 static const u32 ice_ipv6_il_no_l4[] = { 262 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 263 0x00000430, 0x00000000, 0x00000000, 0x00000000, 264 0x00000000, 0x00000000, 0x00000000, 0x00000000, 265 0x00000000, 0x00000000, 0x00000000, 0x00000000, 266 0x00000000, 0x00000000, 0x00000000, 0x00000000, 267 0x00000000, 0x00000000, 0x00000000, 0x00000000, 268 0x00000000, 0x00000000, 0x00000000, 0x00000000, 269 0x00000000, 0x00000000, 0x00000000, 0x00000000, 270 }; 271 272 /* UDP Packet types for non-tunneled packets or tunneled 273 * packets with inner UDP. 274 */ 275 static const u32 ice_ptypes_udp_il[] = { 276 0x81000000, 0x20204040, 0x04000010, 0x80810102, 277 0x00000040, 0x00000000, 0x00000000, 0x00000000, 278 0x00000000, 0x00410000, 0x90842000, 0x00000007, 279 0x00000000, 0x00000000, 0x00000000, 0x00000000, 280 0x00000000, 0x00000000, 0x00000000, 0x00000000, 281 0x00000000, 0x00000000, 0x00000000, 0x00000000, 282 0x00000000, 0x00000000, 0x00000000, 0x00000000, 283 0x00000000, 0x00000000, 0x00000000, 0x00000000, 284 }; 285 286 /* Packet types for packets with an Innermost/Last TCP header */ 287 static const u32 ice_ptypes_tcp_il[] = { 288 0x04000000, 0x80810102, 0x10000040, 0x02040408, 289 0x00000102, 0x00000000, 0x00000000, 0x00000000, 290 0x00000000, 0x00820000, 0x21084000, 0x00000000, 291 0x00000000, 0x00000000, 0x00000000, 0x00000000, 292 0x00000000, 0x00000000, 0x00000000, 0x00000000, 293 0x00000000, 0x00000000, 0x00000000, 0x00000000, 294 0x00000000, 0x00000000, 0x00000000, 0x00000000, 295 0x00000000, 0x00000000, 0x00000000, 0x00000000, 296 }; 297 298 /* Packet types for packets with an Innermost/Last SCTP header */ 299 static const u32 ice_ptypes_sctp_il[] = { 300 0x08000000, 0x01020204, 0x20000081, 0x04080810, 301 0x00000204, 0x00000000, 0x00000000, 0x00000000, 302 0x00000000, 0x01040000, 0x00000000, 0x00000000, 303 0x00000000, 0x00000000, 0x00000000, 0x00000000, 304 0x00000000, 0x00000000, 0x00000000, 0x00000000, 305 0x00000000, 0x00000000, 0x00000000, 0x00000000, 306 0x00000000, 0x00000000, 0x00000000, 0x00000000, 307 0x00000000, 0x00000000, 0x00000000, 0x00000000, 308 }; 309 310 /* Packet types for packets with an Outermost/First ICMP header */ 311 static const u32 ice_ptypes_icmp_of[] = { 312 0x10000000, 0x00000000, 0x00000000, 0x00000000, 313 0x00000000, 0x00000000, 0x00000000, 0x00000000, 314 0x00000000, 0x00000000, 0x00000000, 0x00000000, 315 0x00000000, 0x00000000, 0x00000000, 0x00000000, 316 0x00000000, 0x00000000, 0x00000000, 0x00000000, 317 0x00000000, 0x00000000, 0x00000000, 0x00000000, 318 0x00000000, 0x00000000, 0x00000000, 0x00000000, 319 0x00000000, 0x00000000, 0x00000000, 0x00000000, 320 }; 321 322 /* Packet types for packets with an Innermost/Last ICMP header */ 323 static const u32 ice_ptypes_icmp_il[] = { 324 0x00000000, 0x02040408, 0x40000102, 0x08101020, 325 0x00000408, 0x00000000, 0x00000000, 0x00000000, 326 0x00000000, 0x00000000, 0x42108000, 0x00000000, 327 0x00000000, 0x00000000, 0x00000000, 0x00000000, 328 0x00000000, 0x00000000, 0x00000000, 0x00000000, 329 0x00000000, 0x00000000, 0x00000000, 0x00000000, 330 0x00000000, 0x00000000, 0x00000000, 0x00000000, 331 0x00000000, 0x00000000, 0x00000000, 0x00000000, 332 }; 333 334 /* Packet types for packets with an Outermost/First GRE header */ 335 static const u32 ice_ptypes_gre_of[] = { 336 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 337 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 338 0x00000000, 0x00000000, 0x00000000, 0x00000000, 339 0x00000000, 0x00000000, 0x00000000, 0x00000000, 340 0x00000000, 0x00000000, 0x00000000, 0x00000000, 341 0x00000000, 0x00000000, 0x00000000, 0x00000000, 342 0x00000000, 0x00000000, 0x00000000, 0x00000000, 343 0x00000000, 0x00000000, 0x00000000, 0x00000000, 344 }; 345 346 /* Packet types for packets with an Innermost/Last MAC header */ 347 static const u32 ice_ptypes_mac_il[] = { 348 0x00000000, 0x00000000, 0x00000000, 0x00000000, 349 0x00000000, 0x00000000, 0x00000000, 0x00000000, 350 0x00000000, 0x00000000, 0x00000000, 0x00000000, 351 0x00000000, 0x00000000, 0x00000000, 0x00000000, 352 0x00000000, 0x00000000, 0x00000000, 0x00000000, 353 0x00000000, 0x00000000, 0x00000000, 0x00000000, 354 0x00000000, 0x00000000, 0x00000000, 0x00000000, 355 0x00000000, 0x00000000, 0x00000000, 0x00000000, 356 }; 357 358 /* Packet types for GTPC */ 359 static const u32 ice_ptypes_gtpc[] = { 360 0x00000000, 0x00000000, 0x00000000, 0x00000000, 361 0x00000000, 0x00000000, 0x00000000, 0x00000000, 362 0x00000000, 0x00000000, 0x00000180, 0x00000000, 363 0x00000000, 0x00000000, 0x00000000, 0x00000000, 364 0x00000000, 0x00000000, 0x00000000, 0x00000000, 365 0x00000000, 0x00000000, 0x00000000, 0x00000000, 366 0x00000000, 0x00000000, 0x00000000, 0x00000000, 367 0x00000000, 0x00000000, 0x00000000, 0x00000000, 368 }; 369 370 /* Packet types for GTPC with TEID */ 371 static const u32 ice_ptypes_gtpc_tid[] = { 372 0x00000000, 0x00000000, 0x00000000, 0x00000000, 373 0x00000000, 0x00000000, 0x00000000, 0x00000000, 374 0x00000000, 0x00000000, 0x00000060, 0x00000000, 375 0x00000000, 0x00000000, 0x00000000, 0x00000000, 376 0x00000000, 0x00000000, 0x00000000, 0x00000000, 377 0x00000000, 0x00000000, 0x00000000, 0x00000000, 378 0x00000000, 0x00000000, 0x00000000, 0x00000000, 379 0x00000000, 0x00000000, 0x00000000, 0x00000000, 380 }; 381 382 /* Packet types for GTPU */ 383 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { 384 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 385 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 386 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 387 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 388 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 389 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 390 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 391 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 392 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 393 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 394 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 395 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 396 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 397 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 398 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, 399 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 400 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 401 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, 402 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, 403 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, 404 }; 405 406 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { 407 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 408 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 409 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 410 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 411 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 412 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 413 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 414 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 415 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 416 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 417 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 418 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 419 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 420 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 421 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 422 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 423 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 424 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 425 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 426 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, 427 }; 428 429 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { 430 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 431 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 432 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 433 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 434 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, 435 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 436 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 437 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 438 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 439 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, 440 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 441 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 442 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 443 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 444 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, 445 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, 446 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 447 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, 448 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, 449 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, 450 }; 451 452 static const u32 ice_ptypes_gtpu[] = { 453 0x00000000, 0x00000000, 0x00000000, 0x00000000, 454 0x00000000, 0x00000000, 0x00000000, 0x00000000, 455 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000, 456 0x00000000, 0x00000000, 0x00000000, 0x00000000, 457 0x00000000, 0x00000000, 0x00000000, 0x00000000, 458 0x00000000, 0x00000000, 0x00000000, 0x00000000, 459 0x00000000, 0x00000000, 0x00000000, 0x00000000, 460 0x00000000, 0x00000000, 0x00000000, 0x00000000, 461 }; 462 463 /* Packet types for PPPoE */ 464 static const u32 ice_ptypes_pppoe[] = { 465 0x00000000, 0x00000000, 0x00000000, 0x00000000, 466 0x00000000, 0x00000000, 0x00000000, 0x00000000, 467 0x00000000, 0x03ffe000, 0x00000000, 0x00000000, 468 0x00000000, 0x00000000, 0x00000000, 0x00000000, 469 0x00000000, 0x00000000, 0x00000000, 0x00000000, 470 0x00000000, 0x00000000, 0x00000000, 0x00000000, 471 0x00000000, 0x00000000, 0x00000000, 0x00000000, 472 0x00000000, 0x00000000, 0x00000000, 0x00000000, 473 }; 474 475 /* Packet types for packets with PFCP NODE header */ 476 static const u32 ice_ptypes_pfcp_node[] = { 477 0x00000000, 0x00000000, 0x00000000, 0x00000000, 478 0x00000000, 0x00000000, 0x00000000, 0x00000000, 479 0x00000000, 0x00000000, 0x80000000, 0x00000002, 480 0x00000000, 0x00000000, 0x00000000, 0x00000000, 481 0x00000000, 0x00000000, 0x00000000, 0x00000000, 482 0x00000000, 0x00000000, 0x00000000, 0x00000000, 483 0x00000000, 0x00000000, 0x00000000, 0x00000000, 484 0x00000000, 0x00000000, 0x00000000, 0x00000000, 485 }; 486 487 /* Packet types for packets with PFCP SESSION header */ 488 static const u32 ice_ptypes_pfcp_session[] = { 489 0x00000000, 0x00000000, 0x00000000, 0x00000000, 490 0x00000000, 0x00000000, 0x00000000, 0x00000000, 491 0x00000000, 0x00000000, 0x00000000, 0x00000005, 492 0x00000000, 0x00000000, 0x00000000, 0x00000000, 493 0x00000000, 0x00000000, 0x00000000, 0x00000000, 494 0x00000000, 0x00000000, 0x00000000, 0x00000000, 495 0x00000000, 0x00000000, 0x00000000, 0x00000000, 496 0x00000000, 0x00000000, 0x00000000, 0x00000000, 497 }; 498 499 /* Packet types for L2TPv3 */ 500 static const u32 ice_ptypes_l2tpv3[] = { 501 0x00000000, 0x00000000, 0x00000000, 0x00000000, 502 0x00000000, 0x00000000, 0x00000000, 0x00000000, 503 0x00000000, 0x00000000, 0x00000000, 0x00000300, 504 0x00000000, 0x00000000, 0x00000000, 0x00000000, 505 0x00000000, 0x00000000, 0x00000000, 0x00000000, 506 0x00000000, 0x00000000, 0x00000000, 0x00000000, 507 0x00000000, 0x00000000, 0x00000000, 0x00000000, 508 0x00000000, 0x00000000, 0x00000000, 0x00000000, 509 }; 510 511 /* Packet types for ESP */ 512 static const u32 ice_ptypes_esp[] = { 513 0x00000000, 0x00000000, 0x00000000, 0x00000000, 514 0x00000000, 0x00000003, 0x00000000, 0x00000000, 515 0x00000000, 0x00000000, 0x00000000, 0x00000000, 516 0x00000000, 0x00000000, 0x00000000, 0x00000000, 517 0x00000000, 0x00000000, 0x00000000, 0x00000000, 518 0x00000000, 0x00000000, 0x00000000, 0x00000000, 519 0x00000000, 0x00000000, 0x00000000, 0x00000000, 520 0x00000000, 0x00000000, 0x00000000, 0x00000000, 521 }; 522 523 /* Packet types for AH */ 524 static const u32 ice_ptypes_ah[] = { 525 0x00000000, 0x00000000, 0x00000000, 0x00000000, 526 0x00000000, 0x0000000C, 0x00000000, 0x00000000, 527 0x00000000, 0x00000000, 0x00000000, 0x00000000, 528 0x00000000, 0x00000000, 0x00000000, 0x00000000, 529 0x00000000, 0x00000000, 0x00000000, 0x00000000, 530 0x00000000, 0x00000000, 0x00000000, 0x00000000, 531 0x00000000, 0x00000000, 0x00000000, 0x00000000, 532 0x00000000, 0x00000000, 0x00000000, 0x00000000, 533 }; 534 535 /* Packet types for packets with NAT_T ESP header */ 536 static const u32 ice_ptypes_nat_t_esp[] = { 537 0x00000000, 0x00000000, 0x00000000, 0x00000000, 538 0x00000000, 0x00000030, 0x00000000, 0x00000000, 539 0x00000000, 0x00000000, 0x00000000, 0x00000000, 540 0x00000000, 0x00000000, 0x00000000, 0x00000000, 541 0x00000000, 0x00000000, 0x00000000, 0x00000000, 542 0x00000000, 0x00000000, 0x00000000, 0x00000000, 543 0x00000000, 0x00000000, 0x00000000, 0x00000000, 544 0x00000000, 0x00000000, 0x00000000, 0x00000000, 545 }; 546 547 /* Manage parameters and info. used during the creation of a flow profile */ 548 struct ice_flow_prof_params { 549 enum ice_block blk; 550 u16 entry_length; /* # of bytes formatted entry will require */ 551 u8 es_cnt; 552 struct ice_flow_prof *prof; 553 554 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 555 * This will give us the direction flags. 556 */ 557 struct ice_fv_word es[ICE_MAX_FV_WORDS]; 558 /* attributes can be used to add attributes to a particular PTYPE */ 559 const struct ice_ptype_attributes *attr; 560 u16 attr_cnt; 561 562 u16 mask[ICE_MAX_FV_WORDS]; 563 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); 564 }; 565 566 #define ICE_FLOW_RSS_HDRS_INNER_MASK \ 567 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \ 568 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \ 569 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \ 570 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ 571 ICE_FLOW_SEG_HDR_NAT_T_ESP) 572 573 #define ICE_FLOW_SEG_HDRS_L2_MASK \ 574 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) 575 #define ICE_FLOW_SEG_HDRS_L3_MASK \ 576 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) 577 #define ICE_FLOW_SEG_HDRS_L4_MASK \ 578 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ 579 ICE_FLOW_SEG_HDR_SCTP) 580 581 /** 582 * ice_flow_val_hdrs - validates packet segments for valid protocol headers 583 * @segs: array of one or more packet segments that describe the flow 584 * @segs_cnt: number of packet segments provided 585 */ 586 static enum ice_status 587 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) 588 { 589 u8 i; 590 591 for (i = 0; i < segs_cnt; i++) { 592 /* Multiple L3 headers */ 593 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && 594 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) 595 return ICE_ERR_PARAM; 596 597 /* Multiple L4 headers */ 598 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && 599 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) 600 return ICE_ERR_PARAM; 601 } 602 603 return 0; 604 } 605 606 /* Sizes of fixed known protocol headers without header options */ 607 #define ICE_FLOW_PROT_HDR_SZ_MAC 14 608 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) 609 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 610 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 611 #define ICE_FLOW_PROT_HDR_SZ_ARP 28 612 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8 613 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 614 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 615 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 616 617 /** 618 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers 619 * @params: information about the flow to be processed 620 * @seg: index of packet segment whose header size is to be determined 621 */ 622 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) 623 { 624 u16 sz; 625 626 /* L2 headers */ 627 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? 628 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; 629 630 /* L3 headers */ 631 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) 632 sz += ICE_FLOW_PROT_HDR_SZ_IPV4; 633 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) 634 sz += ICE_FLOW_PROT_HDR_SZ_IPV6; 635 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) 636 sz += ICE_FLOW_PROT_HDR_SZ_ARP; 637 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) 638 /* An L3 header is required if L4 is specified */ 639 return 0; 640 641 /* L4 headers */ 642 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) 643 sz += ICE_FLOW_PROT_HDR_SZ_ICMP; 644 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) 645 sz += ICE_FLOW_PROT_HDR_SZ_TCP; 646 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) 647 sz += ICE_FLOW_PROT_HDR_SZ_UDP; 648 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) 649 sz += ICE_FLOW_PROT_HDR_SZ_SCTP; 650 651 return sz; 652 } 653 654 /** 655 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments 656 * @params: information about the flow to be processed 657 * 658 * This function identifies the packet types associated with the protocol 659 * headers being present in packet segments of the specified flow profile. 660 */ 661 static enum ice_status 662 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) 663 { 664 struct ice_flow_prof *prof; 665 u8 i; 666 667 memset(params->ptypes, 0xff, sizeof(params->ptypes)); 668 669 prof = params->prof; 670 671 for (i = 0; i < params->prof->segs_cnt; i++) { 672 const unsigned long *src; 673 u32 hdrs; 674 675 hdrs = prof->segs[i].hdrs; 676 677 if (hdrs & ICE_FLOW_SEG_HDR_ETH) { 678 src = !i ? (const unsigned long *)ice_ptypes_mac_ofos : 679 (const unsigned long *)ice_ptypes_mac_il; 680 bitmap_and(params->ptypes, params->ptypes, src, 681 ICE_FLOW_PTYPE_MAX); 682 } 683 684 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { 685 src = (const unsigned long *)ice_ptypes_macvlan_il; 686 bitmap_and(params->ptypes, params->ptypes, src, 687 ICE_FLOW_PTYPE_MAX); 688 } 689 690 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { 691 bitmap_and(params->ptypes, params->ptypes, 692 (const unsigned long *)ice_ptypes_arp_of, 693 ICE_FLOW_PTYPE_MAX); 694 } 695 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && 696 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { 697 src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 : 698 (const unsigned long *)ice_ipv4_il_no_l4; 699 bitmap_and(params->ptypes, params->ptypes, src, 700 ICE_FLOW_PTYPE_MAX); 701 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { 702 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos : 703 (const unsigned long *)ice_ptypes_ipv4_il; 704 bitmap_and(params->ptypes, params->ptypes, src, 705 ICE_FLOW_PTYPE_MAX); 706 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && 707 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { 708 src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 : 709 (const unsigned long *)ice_ipv6_il_no_l4; 710 bitmap_and(params->ptypes, params->ptypes, src, 711 ICE_FLOW_PTYPE_MAX); 712 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { 713 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos : 714 (const unsigned long *)ice_ptypes_ipv6_il; 715 bitmap_and(params->ptypes, params->ptypes, src, 716 ICE_FLOW_PTYPE_MAX); 717 } 718 719 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) { 720 src = (const unsigned long *)ice_ptypes_pppoe; 721 bitmap_and(params->ptypes, params->ptypes, src, 722 ICE_FLOW_PTYPE_MAX); 723 } else { 724 src = (const unsigned long *)ice_ptypes_pppoe; 725 bitmap_andnot(params->ptypes, params->ptypes, src, 726 ICE_FLOW_PTYPE_MAX); 727 } 728 729 if (hdrs & ICE_FLOW_SEG_HDR_UDP) { 730 src = (const unsigned long *)ice_ptypes_udp_il; 731 bitmap_and(params->ptypes, params->ptypes, src, 732 ICE_FLOW_PTYPE_MAX); 733 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { 734 bitmap_and(params->ptypes, params->ptypes, 735 (const unsigned long *)ice_ptypes_tcp_il, 736 ICE_FLOW_PTYPE_MAX); 737 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { 738 src = (const unsigned long *)ice_ptypes_sctp_il; 739 bitmap_and(params->ptypes, params->ptypes, src, 740 ICE_FLOW_PTYPE_MAX); 741 } 742 743 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { 744 src = !i ? (const unsigned long *)ice_ptypes_icmp_of : 745 (const unsigned long *)ice_ptypes_icmp_il; 746 bitmap_and(params->ptypes, params->ptypes, src, 747 ICE_FLOW_PTYPE_MAX); 748 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { 749 if (!i) { 750 src = (const unsigned long *)ice_ptypes_gre_of; 751 bitmap_and(params->ptypes, params->ptypes, 752 src, ICE_FLOW_PTYPE_MAX); 753 } 754 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) { 755 src = (const unsigned long *)ice_ptypes_gtpc; 756 bitmap_and(params->ptypes, params->ptypes, src, 757 ICE_FLOW_PTYPE_MAX); 758 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) { 759 src = (const unsigned long *)ice_ptypes_gtpc_tid; 760 bitmap_and(params->ptypes, params->ptypes, src, 761 ICE_FLOW_PTYPE_MAX); 762 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) { 763 src = (const unsigned long *)ice_ptypes_gtpu; 764 bitmap_and(params->ptypes, params->ptypes, src, 765 ICE_FLOW_PTYPE_MAX); 766 767 /* Attributes for GTP packet with downlink */ 768 params->attr = ice_attr_gtpu_down; 769 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); 770 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) { 771 src = (const unsigned long *)ice_ptypes_gtpu; 772 bitmap_and(params->ptypes, params->ptypes, src, 773 ICE_FLOW_PTYPE_MAX); 774 775 /* Attributes for GTP packet with uplink */ 776 params->attr = ice_attr_gtpu_up; 777 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); 778 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) { 779 src = (const unsigned long *)ice_ptypes_gtpu; 780 bitmap_and(params->ptypes, params->ptypes, src, 781 ICE_FLOW_PTYPE_MAX); 782 783 /* Attributes for GTP packet with Extension Header */ 784 params->attr = ice_attr_gtpu_eh; 785 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh); 786 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { 787 src = (const unsigned long *)ice_ptypes_gtpu; 788 bitmap_and(params->ptypes, params->ptypes, src, 789 ICE_FLOW_PTYPE_MAX); 790 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) { 791 src = (const unsigned long *)ice_ptypes_l2tpv3; 792 bitmap_and(params->ptypes, params->ptypes, src, 793 ICE_FLOW_PTYPE_MAX); 794 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) { 795 src = (const unsigned long *)ice_ptypes_esp; 796 bitmap_and(params->ptypes, params->ptypes, src, 797 ICE_FLOW_PTYPE_MAX); 798 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) { 799 src = (const unsigned long *)ice_ptypes_ah; 800 bitmap_and(params->ptypes, params->ptypes, src, 801 ICE_FLOW_PTYPE_MAX); 802 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) { 803 src = (const unsigned long *)ice_ptypes_nat_t_esp; 804 bitmap_and(params->ptypes, params->ptypes, src, 805 ICE_FLOW_PTYPE_MAX); 806 } 807 808 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) { 809 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE) 810 src = (const unsigned long *)ice_ptypes_pfcp_node; 811 else 812 src = (const unsigned long *)ice_ptypes_pfcp_session; 813 814 bitmap_and(params->ptypes, params->ptypes, src, 815 ICE_FLOW_PTYPE_MAX); 816 } else { 817 src = (const unsigned long *)ice_ptypes_pfcp_node; 818 bitmap_andnot(params->ptypes, params->ptypes, src, 819 ICE_FLOW_PTYPE_MAX); 820 821 src = (const unsigned long *)ice_ptypes_pfcp_session; 822 bitmap_andnot(params->ptypes, params->ptypes, src, 823 ICE_FLOW_PTYPE_MAX); 824 } 825 } 826 827 return 0; 828 } 829 830 /** 831 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field 832 * @hw: pointer to the HW struct 833 * @params: information about the flow to be processed 834 * @seg: packet segment index of the field to be extracted 835 * @fld: ID of field to be extracted 836 * @match: bit field of all fields 837 * 838 * This function determines the protocol ID, offset, and size of the given 839 * field. It then allocates one or more extraction sequence entries for the 840 * given field, and fill the entries with protocol ID and offset information. 841 */ 842 static enum ice_status 843 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, 844 u8 seg, enum ice_flow_field fld, u64 match) 845 { 846 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; 847 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; 848 u8 fv_words = hw->blk[params->blk].es.fvw; 849 struct ice_flow_fld_info *flds; 850 u16 cnt, ese_bits, i; 851 u16 sib_mask = 0; 852 u16 mask; 853 u16 off; 854 855 flds = params->prof->segs[seg].fields; 856 857 switch (fld) { 858 case ICE_FLOW_FIELD_IDX_ETH_DA: 859 case ICE_FLOW_FIELD_IDX_ETH_SA: 860 case ICE_FLOW_FIELD_IDX_S_VLAN: 861 case ICE_FLOW_FIELD_IDX_C_VLAN: 862 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; 863 break; 864 case ICE_FLOW_FIELD_IDX_ETH_TYPE: 865 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; 866 break; 867 case ICE_FLOW_FIELD_IDX_IPV4_DSCP: 868 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 869 break; 870 case ICE_FLOW_FIELD_IDX_IPV6_DSCP: 871 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 872 break; 873 case ICE_FLOW_FIELD_IDX_IPV4_TTL: 874 case ICE_FLOW_FIELD_IDX_IPV4_PROT: 875 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 876 877 /* TTL and PROT share the same extraction seq. entry. 878 * Each is considered a sibling to the other in terms of sharing 879 * the same extraction sequence entry. 880 */ 881 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) 882 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; 883 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) 884 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; 885 886 /* If the sibling field is also included, that field's 887 * mask needs to be included. 888 */ 889 if (match & BIT(sib)) 890 sib_mask = ice_flds_info[sib].mask; 891 break; 892 case ICE_FLOW_FIELD_IDX_IPV6_TTL: 893 case ICE_FLOW_FIELD_IDX_IPV6_PROT: 894 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 895 896 /* TTL and PROT share the same extraction seq. entry. 897 * Each is considered a sibling to the other in terms of sharing 898 * the same extraction sequence entry. 899 */ 900 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) 901 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; 902 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) 903 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; 904 905 /* If the sibling field is also included, that field's 906 * mask needs to be included. 907 */ 908 if (match & BIT(sib)) 909 sib_mask = ice_flds_info[sib].mask; 910 break; 911 case ICE_FLOW_FIELD_IDX_IPV4_SA: 912 case ICE_FLOW_FIELD_IDX_IPV4_DA: 913 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 914 break; 915 case ICE_FLOW_FIELD_IDX_IPV6_SA: 916 case ICE_FLOW_FIELD_IDX_IPV6_DA: 917 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 918 break; 919 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: 920 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: 921 case ICE_FLOW_FIELD_IDX_TCP_FLAGS: 922 prot_id = ICE_PROT_TCP_IL; 923 break; 924 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: 925 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: 926 prot_id = ICE_PROT_UDP_IL_OR_S; 927 break; 928 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: 929 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: 930 prot_id = ICE_PROT_SCTP_IL; 931 break; 932 case ICE_FLOW_FIELD_IDX_GTPC_TEID: 933 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID: 934 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID: 935 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: 936 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: 937 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: 938 /* GTP is accessed through UDP OF protocol */ 939 prot_id = ICE_PROT_UDP_OF; 940 break; 941 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID: 942 prot_id = ICE_PROT_PPPOE; 943 break; 944 case ICE_FLOW_FIELD_IDX_PFCP_SEID: 945 prot_id = ICE_PROT_UDP_IL_OR_S; 946 break; 947 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID: 948 prot_id = ICE_PROT_L2TPV3; 949 break; 950 case ICE_FLOW_FIELD_IDX_ESP_SPI: 951 prot_id = ICE_PROT_ESP_F; 952 break; 953 case ICE_FLOW_FIELD_IDX_AH_SPI: 954 prot_id = ICE_PROT_ESP_2; 955 break; 956 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI: 957 prot_id = ICE_PROT_UDP_IL_OR_S; 958 break; 959 case ICE_FLOW_FIELD_IDX_ARP_SIP: 960 case ICE_FLOW_FIELD_IDX_ARP_DIP: 961 case ICE_FLOW_FIELD_IDX_ARP_SHA: 962 case ICE_FLOW_FIELD_IDX_ARP_DHA: 963 case ICE_FLOW_FIELD_IDX_ARP_OP: 964 prot_id = ICE_PROT_ARP_OF; 965 break; 966 case ICE_FLOW_FIELD_IDX_ICMP_TYPE: 967 case ICE_FLOW_FIELD_IDX_ICMP_CODE: 968 /* ICMP type and code share the same extraction seq. entry */ 969 prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? 970 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; 971 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? 972 ICE_FLOW_FIELD_IDX_ICMP_CODE : 973 ICE_FLOW_FIELD_IDX_ICMP_TYPE; 974 break; 975 case ICE_FLOW_FIELD_IDX_GRE_KEYID: 976 prot_id = ICE_PROT_GRE_OF; 977 break; 978 default: 979 return ICE_ERR_NOT_IMPL; 980 } 981 982 /* Each extraction sequence entry is a word in size, and extracts a 983 * word-aligned offset from a protocol header. 984 */ 985 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; 986 987 flds[fld].xtrct.prot_id = prot_id; 988 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * 989 ICE_FLOW_FV_EXTRACT_SZ; 990 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); 991 flds[fld].xtrct.idx = params->es_cnt; 992 flds[fld].xtrct.mask = ice_flds_info[fld].mask; 993 994 /* Adjust the next field-entry index after accommodating the number of 995 * entries this field consumes 996 */ 997 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, 998 ese_bits); 999 1000 /* Fill in the extraction sequence entries needed for this field */ 1001 off = flds[fld].xtrct.off; 1002 mask = flds[fld].xtrct.mask; 1003 for (i = 0; i < cnt; i++) { 1004 /* Only consume an extraction sequence entry if there is no 1005 * sibling field associated with this field or the sibling entry 1006 * already extracts the word shared with this field. 1007 */ 1008 if (sib == ICE_FLOW_FIELD_IDX_MAX || 1009 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || 1010 flds[sib].xtrct.off != off) { 1011 u8 idx; 1012 1013 /* Make sure the number of extraction sequence required 1014 * does not exceed the block's capability 1015 */ 1016 if (params->es_cnt >= fv_words) 1017 return ICE_ERR_MAX_LIMIT; 1018 1019 /* some blocks require a reversed field vector layout */ 1020 if (hw->blk[params->blk].es.reverse) 1021 idx = fv_words - params->es_cnt - 1; 1022 else 1023 idx = params->es_cnt; 1024 1025 params->es[idx].prot_id = prot_id; 1026 params->es[idx].off = off; 1027 params->mask[idx] = mask | sib_mask; 1028 params->es_cnt++; 1029 } 1030 1031 off += ICE_FLOW_FV_EXTRACT_SZ; 1032 } 1033 1034 return 0; 1035 } 1036 1037 /** 1038 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes 1039 * @hw: pointer to the HW struct 1040 * @params: information about the flow to be processed 1041 * @seg: index of packet segment whose raw fields are to be extracted 1042 */ 1043 static enum ice_status 1044 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, 1045 u8 seg) 1046 { 1047 u16 fv_words; 1048 u16 hdrs_sz; 1049 u8 i; 1050 1051 if (!params->prof->segs[seg].raws_cnt) 1052 return 0; 1053 1054 if (params->prof->segs[seg].raws_cnt > 1055 ARRAY_SIZE(params->prof->segs[seg].raws)) 1056 return ICE_ERR_MAX_LIMIT; 1057 1058 /* Offsets within the segment headers are not supported */ 1059 hdrs_sz = ice_flow_calc_seg_sz(params, seg); 1060 if (!hdrs_sz) 1061 return ICE_ERR_PARAM; 1062 1063 fv_words = hw->blk[params->blk].es.fvw; 1064 1065 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { 1066 struct ice_flow_seg_fld_raw *raw; 1067 u16 off, cnt, j; 1068 1069 raw = ¶ms->prof->segs[seg].raws[i]; 1070 1071 /* Storing extraction information */ 1072 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; 1073 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * 1074 ICE_FLOW_FV_EXTRACT_SZ; 1075 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * 1076 BITS_PER_BYTE; 1077 raw->info.xtrct.idx = params->es_cnt; 1078 1079 /* Determine the number of field vector entries this raw field 1080 * consumes. 1081 */ 1082 cnt = DIV_ROUND_UP(raw->info.xtrct.disp + 1083 (raw->info.src.last * BITS_PER_BYTE), 1084 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE)); 1085 off = raw->info.xtrct.off; 1086 for (j = 0; j < cnt; j++) { 1087 u16 idx; 1088 1089 /* Make sure the number of extraction sequence required 1090 * does not exceed the block's capability 1091 */ 1092 if (params->es_cnt >= hw->blk[params->blk].es.count || 1093 params->es_cnt >= ICE_MAX_FV_WORDS) 1094 return ICE_ERR_MAX_LIMIT; 1095 1096 /* some blocks require a reversed field vector layout */ 1097 if (hw->blk[params->blk].es.reverse) 1098 idx = fv_words - params->es_cnt - 1; 1099 else 1100 idx = params->es_cnt; 1101 1102 params->es[idx].prot_id = raw->info.xtrct.prot_id; 1103 params->es[idx].off = off; 1104 params->es_cnt++; 1105 off += ICE_FLOW_FV_EXTRACT_SZ; 1106 } 1107 } 1108 1109 return 0; 1110 } 1111 1112 /** 1113 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments 1114 * @hw: pointer to the HW struct 1115 * @params: information about the flow to be processed 1116 * 1117 * This function iterates through all matched fields in the given segments, and 1118 * creates an extraction sequence for the fields. 1119 */ 1120 static enum ice_status 1121 ice_flow_create_xtrct_seq(struct ice_hw *hw, 1122 struct ice_flow_prof_params *params) 1123 { 1124 struct ice_flow_prof *prof = params->prof; 1125 enum ice_status status = 0; 1126 u8 i; 1127 1128 for (i = 0; i < prof->segs_cnt; i++) { 1129 u64 match = params->prof->segs[i].match; 1130 enum ice_flow_field j; 1131 1132 for_each_set_bit(j, (unsigned long *)&match, 1133 ICE_FLOW_FIELD_IDX_MAX) { 1134 status = ice_flow_xtract_fld(hw, params, i, j, match); 1135 if (status) 1136 return status; 1137 clear_bit(j, (unsigned long *)&match); 1138 } 1139 1140 /* Process raw matching bytes */ 1141 status = ice_flow_xtract_raws(hw, params, i); 1142 if (status) 1143 return status; 1144 } 1145 1146 return status; 1147 } 1148 1149 /** 1150 * ice_flow_proc_segs - process all packet segments associated with a profile 1151 * @hw: pointer to the HW struct 1152 * @params: information about the flow to be processed 1153 */ 1154 static enum ice_status 1155 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) 1156 { 1157 enum ice_status status; 1158 1159 status = ice_flow_proc_seg_hdrs(params); 1160 if (status) 1161 return status; 1162 1163 status = ice_flow_create_xtrct_seq(hw, params); 1164 if (status) 1165 return status; 1166 1167 switch (params->blk) { 1168 case ICE_BLK_FD: 1169 case ICE_BLK_RSS: 1170 status = 0; 1171 break; 1172 default: 1173 return ICE_ERR_NOT_IMPL; 1174 } 1175 1176 return status; 1177 } 1178 1179 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 1180 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 1181 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 1182 1183 /** 1184 * ice_flow_find_prof_conds - Find a profile matching headers and conditions 1185 * @hw: pointer to the HW struct 1186 * @blk: classification stage 1187 * @dir: flow direction 1188 * @segs: array of one or more packet segments that describe the flow 1189 * @segs_cnt: number of packet segments provided 1190 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) 1191 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) 1192 */ 1193 static struct ice_flow_prof * 1194 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, 1195 enum ice_flow_dir dir, struct ice_flow_seg_info *segs, 1196 u8 segs_cnt, u16 vsi_handle, u32 conds) 1197 { 1198 struct ice_flow_prof *p, *prof = NULL; 1199 1200 mutex_lock(&hw->fl_profs_locks[blk]); 1201 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 1202 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && 1203 segs_cnt && segs_cnt == p->segs_cnt) { 1204 u8 i; 1205 1206 /* Check for profile-VSI association if specified */ 1207 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && 1208 ice_is_vsi_valid(hw, vsi_handle) && 1209 !test_bit(vsi_handle, p->vsis)) 1210 continue; 1211 1212 /* Protocol headers must be checked. Matched fields are 1213 * checked if specified. 1214 */ 1215 for (i = 0; i < segs_cnt; i++) 1216 if (segs[i].hdrs != p->segs[i].hdrs || 1217 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && 1218 segs[i].match != p->segs[i].match)) 1219 break; 1220 1221 /* A match is found if all segments are matched */ 1222 if (i == segs_cnt) { 1223 prof = p; 1224 break; 1225 } 1226 } 1227 mutex_unlock(&hw->fl_profs_locks[blk]); 1228 1229 return prof; 1230 } 1231 1232 /** 1233 * ice_flow_find_prof_id - Look up a profile with given profile ID 1234 * @hw: pointer to the HW struct 1235 * @blk: classification stage 1236 * @prof_id: unique ID to identify this flow profile 1237 */ 1238 static struct ice_flow_prof * 1239 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 1240 { 1241 struct ice_flow_prof *p; 1242 1243 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 1244 if (p->id == prof_id) 1245 return p; 1246 1247 return NULL; 1248 } 1249 1250 /** 1251 * ice_dealloc_flow_entry - Deallocate flow entry memory 1252 * @hw: pointer to the HW struct 1253 * @entry: flow entry to be removed 1254 */ 1255 static void 1256 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) 1257 { 1258 if (!entry) 1259 return; 1260 1261 if (entry->entry) 1262 devm_kfree(ice_hw_to_dev(hw), entry->entry); 1263 1264 devm_kfree(ice_hw_to_dev(hw), entry); 1265 } 1266 1267 /** 1268 * ice_flow_rem_entry_sync - Remove a flow entry 1269 * @hw: pointer to the HW struct 1270 * @blk: classification stage 1271 * @entry: flow entry to be removed 1272 */ 1273 static enum ice_status 1274 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, 1275 struct ice_flow_entry *entry) 1276 { 1277 if (!entry) 1278 return ICE_ERR_BAD_PTR; 1279 1280 list_del(&entry->l_entry); 1281 1282 ice_dealloc_flow_entry(hw, entry); 1283 1284 return 0; 1285 } 1286 1287 /** 1288 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields 1289 * @hw: pointer to the HW struct 1290 * @blk: classification stage 1291 * @dir: flow direction 1292 * @prof_id: unique ID to identify this flow profile 1293 * @segs: array of one or more packet segments that describe the flow 1294 * @segs_cnt: number of packet segments provided 1295 * @prof: stores the returned flow profile added 1296 * 1297 * Assumption: the caller has acquired the lock to the profile list 1298 */ 1299 static enum ice_status 1300 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, 1301 enum ice_flow_dir dir, u64 prof_id, 1302 struct ice_flow_seg_info *segs, u8 segs_cnt, 1303 struct ice_flow_prof **prof) 1304 { 1305 struct ice_flow_prof_params *params; 1306 enum ice_status status; 1307 u8 i; 1308 1309 if (!prof) 1310 return ICE_ERR_BAD_PTR; 1311 1312 params = kzalloc(sizeof(*params), GFP_KERNEL); 1313 if (!params) 1314 return ICE_ERR_NO_MEMORY; 1315 1316 params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), 1317 GFP_KERNEL); 1318 if (!params->prof) { 1319 status = ICE_ERR_NO_MEMORY; 1320 goto free_params; 1321 } 1322 1323 /* initialize extraction sequence to all invalid (0xff) */ 1324 for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 1325 params->es[i].prot_id = ICE_PROT_INVALID; 1326 params->es[i].off = ICE_FV_OFFSET_INVAL; 1327 } 1328 1329 params->blk = blk; 1330 params->prof->id = prof_id; 1331 params->prof->dir = dir; 1332 params->prof->segs_cnt = segs_cnt; 1333 1334 /* Make a copy of the segments that need to be persistent in the flow 1335 * profile instance 1336 */ 1337 for (i = 0; i < segs_cnt; i++) 1338 memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs)); 1339 1340 status = ice_flow_proc_segs(hw, params); 1341 if (status) { 1342 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); 1343 goto out; 1344 } 1345 1346 /* Add a HW profile for this flow profile */ 1347 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, 1348 params->attr, params->attr_cnt, params->es, 1349 params->mask); 1350 if (status) { 1351 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); 1352 goto out; 1353 } 1354 1355 INIT_LIST_HEAD(¶ms->prof->entries); 1356 mutex_init(¶ms->prof->entries_lock); 1357 *prof = params->prof; 1358 1359 out: 1360 if (status) 1361 devm_kfree(ice_hw_to_dev(hw), params->prof); 1362 free_params: 1363 kfree(params); 1364 1365 return status; 1366 } 1367 1368 /** 1369 * ice_flow_rem_prof_sync - remove a flow profile 1370 * @hw: pointer to the hardware structure 1371 * @blk: classification stage 1372 * @prof: pointer to flow profile to remove 1373 * 1374 * Assumption: the caller has acquired the lock to the profile list 1375 */ 1376 static enum ice_status 1377 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, 1378 struct ice_flow_prof *prof) 1379 { 1380 enum ice_status status; 1381 1382 /* Remove all remaining flow entries before removing the flow profile */ 1383 if (!list_empty(&prof->entries)) { 1384 struct ice_flow_entry *e, *t; 1385 1386 mutex_lock(&prof->entries_lock); 1387 1388 list_for_each_entry_safe(e, t, &prof->entries, l_entry) { 1389 status = ice_flow_rem_entry_sync(hw, blk, e); 1390 if (status) 1391 break; 1392 } 1393 1394 mutex_unlock(&prof->entries_lock); 1395 } 1396 1397 /* Remove all hardware profiles associated with this flow profile */ 1398 status = ice_rem_prof(hw, blk, prof->id); 1399 if (!status) { 1400 list_del(&prof->l_entry); 1401 mutex_destroy(&prof->entries_lock); 1402 devm_kfree(ice_hw_to_dev(hw), prof); 1403 } 1404 1405 return status; 1406 } 1407 1408 /** 1409 * ice_flow_assoc_prof - associate a VSI with a flow profile 1410 * @hw: pointer to the hardware structure 1411 * @blk: classification stage 1412 * @prof: pointer to flow profile 1413 * @vsi_handle: software VSI handle 1414 * 1415 * Assumption: the caller has acquired the lock to the profile list 1416 * and the software VSI handle has been validated 1417 */ 1418 static enum ice_status 1419 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, 1420 struct ice_flow_prof *prof, u16 vsi_handle) 1421 { 1422 enum ice_status status = 0; 1423 1424 if (!test_bit(vsi_handle, prof->vsis)) { 1425 status = ice_add_prof_id_flow(hw, blk, 1426 ice_get_hw_vsi_num(hw, 1427 vsi_handle), 1428 prof->id); 1429 if (!status) 1430 set_bit(vsi_handle, prof->vsis); 1431 else 1432 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", 1433 status); 1434 } 1435 1436 return status; 1437 } 1438 1439 /** 1440 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile 1441 * @hw: pointer to the hardware structure 1442 * @blk: classification stage 1443 * @prof: pointer to flow profile 1444 * @vsi_handle: software VSI handle 1445 * 1446 * Assumption: the caller has acquired the lock to the profile list 1447 * and the software VSI handle has been validated 1448 */ 1449 static enum ice_status 1450 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, 1451 struct ice_flow_prof *prof, u16 vsi_handle) 1452 { 1453 enum ice_status status = 0; 1454 1455 if (test_bit(vsi_handle, prof->vsis)) { 1456 status = ice_rem_prof_id_flow(hw, blk, 1457 ice_get_hw_vsi_num(hw, 1458 vsi_handle), 1459 prof->id); 1460 if (!status) 1461 clear_bit(vsi_handle, prof->vsis); 1462 else 1463 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", 1464 status); 1465 } 1466 1467 return status; 1468 } 1469 1470 /** 1471 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields 1472 * @hw: pointer to the HW struct 1473 * @blk: classification stage 1474 * @dir: flow direction 1475 * @prof_id: unique ID to identify this flow profile 1476 * @segs: array of one or more packet segments that describe the flow 1477 * @segs_cnt: number of packet segments provided 1478 * @prof: stores the returned flow profile added 1479 */ 1480 enum ice_status 1481 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 1482 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 1483 struct ice_flow_prof **prof) 1484 { 1485 enum ice_status status; 1486 1487 if (segs_cnt > ICE_FLOW_SEG_MAX) 1488 return ICE_ERR_MAX_LIMIT; 1489 1490 if (!segs_cnt) 1491 return ICE_ERR_PARAM; 1492 1493 if (!segs) 1494 return ICE_ERR_BAD_PTR; 1495 1496 status = ice_flow_val_hdrs(segs, segs_cnt); 1497 if (status) 1498 return status; 1499 1500 mutex_lock(&hw->fl_profs_locks[blk]); 1501 1502 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, 1503 prof); 1504 if (!status) 1505 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); 1506 1507 mutex_unlock(&hw->fl_profs_locks[blk]); 1508 1509 return status; 1510 } 1511 1512 /** 1513 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it 1514 * @hw: pointer to the HW struct 1515 * @blk: the block for which the flow profile is to be removed 1516 * @prof_id: unique ID of the flow profile to be removed 1517 */ 1518 enum ice_status 1519 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 1520 { 1521 struct ice_flow_prof *prof; 1522 enum ice_status status; 1523 1524 mutex_lock(&hw->fl_profs_locks[blk]); 1525 1526 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1527 if (!prof) { 1528 status = ICE_ERR_DOES_NOT_EXIST; 1529 goto out; 1530 } 1531 1532 /* prof becomes invalid after the call */ 1533 status = ice_flow_rem_prof_sync(hw, blk, prof); 1534 1535 out: 1536 mutex_unlock(&hw->fl_profs_locks[blk]); 1537 1538 return status; 1539 } 1540 1541 /** 1542 * ice_flow_add_entry - Add a flow entry 1543 * @hw: pointer to the HW struct 1544 * @blk: classification stage 1545 * @prof_id: ID of the profile to add a new flow entry to 1546 * @entry_id: unique ID to identify this flow entry 1547 * @vsi_handle: software VSI handle for the flow entry 1548 * @prio: priority of the flow entry 1549 * @data: pointer to a data buffer containing flow entry's match values/masks 1550 * @entry_h: pointer to buffer that receives the new flow entry's handle 1551 */ 1552 enum ice_status 1553 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 1554 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, 1555 void *data, u64 *entry_h) 1556 { 1557 struct ice_flow_entry *e = NULL; 1558 struct ice_flow_prof *prof; 1559 enum ice_status status; 1560 1561 /* No flow entry data is expected for RSS */ 1562 if (!entry_h || (!data && blk != ICE_BLK_RSS)) 1563 return ICE_ERR_BAD_PTR; 1564 1565 if (!ice_is_vsi_valid(hw, vsi_handle)) 1566 return ICE_ERR_PARAM; 1567 1568 mutex_lock(&hw->fl_profs_locks[blk]); 1569 1570 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1571 if (!prof) { 1572 status = ICE_ERR_DOES_NOT_EXIST; 1573 } else { 1574 /* Allocate memory for the entry being added and associate 1575 * the VSI to the found flow profile 1576 */ 1577 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); 1578 if (!e) 1579 status = ICE_ERR_NO_MEMORY; 1580 else 1581 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1582 } 1583 1584 mutex_unlock(&hw->fl_profs_locks[blk]); 1585 if (status) 1586 goto out; 1587 1588 e->id = entry_id; 1589 e->vsi_handle = vsi_handle; 1590 e->prof = prof; 1591 e->priority = prio; 1592 1593 switch (blk) { 1594 case ICE_BLK_FD: 1595 case ICE_BLK_RSS: 1596 break; 1597 default: 1598 status = ICE_ERR_NOT_IMPL; 1599 goto out; 1600 } 1601 1602 mutex_lock(&prof->entries_lock); 1603 list_add(&e->l_entry, &prof->entries); 1604 mutex_unlock(&prof->entries_lock); 1605 1606 *entry_h = ICE_FLOW_ENTRY_HNDL(e); 1607 1608 out: 1609 if (status && e) { 1610 if (e->entry) 1611 devm_kfree(ice_hw_to_dev(hw), e->entry); 1612 devm_kfree(ice_hw_to_dev(hw), e); 1613 } 1614 1615 return status; 1616 } 1617 1618 /** 1619 * ice_flow_rem_entry - Remove a flow entry 1620 * @hw: pointer to the HW struct 1621 * @blk: classification stage 1622 * @entry_h: handle to the flow entry to be removed 1623 */ 1624 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, 1625 u64 entry_h) 1626 { 1627 struct ice_flow_entry *entry; 1628 struct ice_flow_prof *prof; 1629 enum ice_status status = 0; 1630 1631 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) 1632 return ICE_ERR_PARAM; 1633 1634 entry = ICE_FLOW_ENTRY_PTR(entry_h); 1635 1636 /* Retain the pointer to the flow profile as the entry will be freed */ 1637 prof = entry->prof; 1638 1639 if (prof) { 1640 mutex_lock(&prof->entries_lock); 1641 status = ice_flow_rem_entry_sync(hw, blk, entry); 1642 mutex_unlock(&prof->entries_lock); 1643 } 1644 1645 return status; 1646 } 1647 1648 /** 1649 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer 1650 * @seg: packet segment the field being set belongs to 1651 * @fld: field to be set 1652 * @field_type: type of the field 1653 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1654 * entry's input buffer 1655 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1656 * input buffer 1657 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1658 * entry's input buffer 1659 * 1660 * This helper function stores information of a field being matched, including 1661 * the type of the field and the locations of the value to match, the mask, and 1662 * the upper-bound value in the start of the input buffer for a flow entry. 1663 * This function should only be used for fixed-size data structures. 1664 * 1665 * This function also opportunistically determines the protocol headers to be 1666 * present based on the fields being set. Some fields cannot be used alone to 1667 * determine the protocol headers present. Sometimes, fields for particular 1668 * protocol headers are not matched. In those cases, the protocol headers 1669 * must be explicitly set. 1670 */ 1671 static void 1672 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1673 enum ice_flow_fld_match_type field_type, u16 val_loc, 1674 u16 mask_loc, u16 last_loc) 1675 { 1676 u64 bit = BIT_ULL(fld); 1677 1678 seg->match |= bit; 1679 if (field_type == ICE_FLOW_FLD_TYPE_RANGE) 1680 seg->range |= bit; 1681 1682 seg->fields[fld].type = field_type; 1683 seg->fields[fld].src.val = val_loc; 1684 seg->fields[fld].src.mask = mask_loc; 1685 seg->fields[fld].src.last = last_loc; 1686 1687 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); 1688 } 1689 1690 /** 1691 * ice_flow_set_fld - specifies locations of field from entry's input buffer 1692 * @seg: packet segment the field being set belongs to 1693 * @fld: field to be set 1694 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1695 * entry's input buffer 1696 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1697 * input buffer 1698 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1699 * entry's input buffer 1700 * @range: indicate if field being matched is to be in a range 1701 * 1702 * This function specifies the locations, in the form of byte offsets from the 1703 * start of the input buffer for a flow entry, from where the value to match, 1704 * the mask value, and upper value can be extracted. These locations are then 1705 * stored in the flow profile. When adding a flow entry associated with the 1706 * flow profile, these locations will be used to quickly extract the values and 1707 * create the content of a match entry. This function should only be used for 1708 * fixed-size data structures. 1709 */ 1710 void 1711 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1712 u16 val_loc, u16 mask_loc, u16 last_loc, bool range) 1713 { 1714 enum ice_flow_fld_match_type t = range ? 1715 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; 1716 1717 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); 1718 } 1719 1720 /** 1721 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf 1722 * @seg: packet segment the field being set belongs to 1723 * @off: offset of the raw field from the beginning of the segment in bytes 1724 * @len: length of the raw pattern to be matched 1725 * @val_loc: location of the value to match from entry's input buffer 1726 * @mask_loc: location of mask value from entry's input buffer 1727 * 1728 * This function specifies the offset of the raw field to be match from the 1729 * beginning of the specified packet segment, and the locations, in the form of 1730 * byte offsets from the start of the input buffer for a flow entry, from where 1731 * the value to match and the mask value to be extracted. These locations are 1732 * then stored in the flow profile. When adding flow entries to the associated 1733 * flow profile, these locations can be used to quickly extract the values to 1734 * create the content of a match entry. This function should only be used for 1735 * fixed-size data structures. 1736 */ 1737 void 1738 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 1739 u16 val_loc, u16 mask_loc) 1740 { 1741 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { 1742 seg->raws[seg->raws_cnt].off = off; 1743 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; 1744 seg->raws[seg->raws_cnt].info.src.val = val_loc; 1745 seg->raws[seg->raws_cnt].info.src.mask = mask_loc; 1746 /* The "last" field is used to store the length of the field */ 1747 seg->raws[seg->raws_cnt].info.src.last = len; 1748 } 1749 1750 /* Overflows of "raws" will be handled as an error condition later in 1751 * the flow when this information is processed. 1752 */ 1753 seg->raws_cnt++; 1754 } 1755 1756 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ 1757 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) 1758 1759 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ 1760 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) 1761 1762 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ 1763 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) 1764 1765 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ 1766 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \ 1767 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ 1768 ICE_FLOW_RSS_SEG_HDR_L4_MASKS) 1769 1770 /** 1771 * ice_flow_set_rss_seg_info - setup packet segments for RSS 1772 * @segs: pointer to the flow field segment(s) 1773 * @hash_fields: fields to be hashed on for the segment(s) 1774 * @flow_hdr: protocol header fields within a packet segment 1775 * 1776 * Helper function to extract fields from hash bitmap and use flow 1777 * header value to set flow field segment for further use in flow 1778 * profile entry or removal. 1779 */ 1780 static enum ice_status 1781 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, 1782 u32 flow_hdr) 1783 { 1784 u64 val; 1785 u8 i; 1786 1787 for_each_set_bit(i, (unsigned long *)&hash_fields, 1788 ICE_FLOW_FIELD_IDX_MAX) 1789 ice_flow_set_fld(segs, (enum ice_flow_field)i, 1790 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1791 ICE_FLOW_FLD_OFF_INVAL, false); 1792 1793 ICE_FLOW_SET_HDRS(segs, flow_hdr); 1794 1795 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & 1796 ~ICE_FLOW_RSS_HDRS_INNER_MASK) 1797 return ICE_ERR_PARAM; 1798 1799 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); 1800 if (val && !is_power_of_2(val)) 1801 return ICE_ERR_CFG; 1802 1803 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); 1804 if (val && !is_power_of_2(val)) 1805 return ICE_ERR_CFG; 1806 1807 return 0; 1808 } 1809 1810 /** 1811 * ice_rem_vsi_rss_list - remove VSI from RSS list 1812 * @hw: pointer to the hardware structure 1813 * @vsi_handle: software VSI handle 1814 * 1815 * Remove the VSI from all RSS configurations in the list. 1816 */ 1817 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) 1818 { 1819 struct ice_rss_cfg *r, *tmp; 1820 1821 if (list_empty(&hw->rss_list_head)) 1822 return; 1823 1824 mutex_lock(&hw->rss_locks); 1825 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1826 if (test_and_clear_bit(vsi_handle, r->vsis)) 1827 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1828 list_del(&r->l_entry); 1829 devm_kfree(ice_hw_to_dev(hw), r); 1830 } 1831 mutex_unlock(&hw->rss_locks); 1832 } 1833 1834 /** 1835 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI 1836 * @hw: pointer to the hardware structure 1837 * @vsi_handle: software VSI handle 1838 * 1839 * This function will iterate through all flow profiles and disassociate 1840 * the VSI from that profile. If the flow profile has no VSIs it will 1841 * be removed. 1842 */ 1843 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1844 { 1845 const enum ice_block blk = ICE_BLK_RSS; 1846 struct ice_flow_prof *p, *t; 1847 enum ice_status status = 0; 1848 1849 if (!ice_is_vsi_valid(hw, vsi_handle)) 1850 return ICE_ERR_PARAM; 1851 1852 if (list_empty(&hw->fl_profs[blk])) 1853 return 0; 1854 1855 mutex_lock(&hw->rss_locks); 1856 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) 1857 if (test_bit(vsi_handle, p->vsis)) { 1858 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); 1859 if (status) 1860 break; 1861 1862 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { 1863 status = ice_flow_rem_prof(hw, blk, p->id); 1864 if (status) 1865 break; 1866 } 1867 } 1868 mutex_unlock(&hw->rss_locks); 1869 1870 return status; 1871 } 1872 1873 /** 1874 * ice_rem_rss_list - remove RSS configuration from list 1875 * @hw: pointer to the hardware structure 1876 * @vsi_handle: software VSI handle 1877 * @prof: pointer to flow profile 1878 * 1879 * Assumption: lock has already been acquired for RSS list 1880 */ 1881 static void 1882 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1883 { 1884 struct ice_rss_cfg *r, *tmp; 1885 1886 /* Search for RSS hash fields associated to the VSI that match the 1887 * hash configurations associated to the flow profile. If found 1888 * remove from the RSS entry list of the VSI context and delete entry. 1889 */ 1890 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1891 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1892 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1893 clear_bit(vsi_handle, r->vsis); 1894 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1895 list_del(&r->l_entry); 1896 devm_kfree(ice_hw_to_dev(hw), r); 1897 } 1898 return; 1899 } 1900 } 1901 1902 /** 1903 * ice_add_rss_list - add RSS configuration to list 1904 * @hw: pointer to the hardware structure 1905 * @vsi_handle: software VSI handle 1906 * @prof: pointer to flow profile 1907 * 1908 * Assumption: lock has already been acquired for RSS list 1909 */ 1910 static enum ice_status 1911 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1912 { 1913 struct ice_rss_cfg *r, *rss_cfg; 1914 1915 list_for_each_entry(r, &hw->rss_list_head, l_entry) 1916 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1917 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1918 set_bit(vsi_handle, r->vsis); 1919 return 0; 1920 } 1921 1922 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), 1923 GFP_KERNEL); 1924 if (!rss_cfg) 1925 return ICE_ERR_NO_MEMORY; 1926 1927 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; 1928 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; 1929 set_bit(vsi_handle, rss_cfg->vsis); 1930 1931 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); 1932 1933 return 0; 1934 } 1935 1936 #define ICE_FLOW_PROF_HASH_S 0 1937 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) 1938 #define ICE_FLOW_PROF_HDR_S 32 1939 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) 1940 #define ICE_FLOW_PROF_ENCAP_S 63 1941 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) 1942 1943 #define ICE_RSS_OUTER_HEADERS 1 1944 #define ICE_RSS_INNER_HEADERS 2 1945 1946 /* Flow profile ID format: 1947 * [0:31] - Packet match fields 1948 * [32:62] - Protocol header 1949 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled 1950 */ 1951 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ 1952 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ 1953 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ 1954 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) 1955 1956 /** 1957 * ice_add_rss_cfg_sync - add an RSS configuration 1958 * @hw: pointer to the hardware structure 1959 * @vsi_handle: software VSI handle 1960 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1961 * @addl_hdrs: protocol header fields 1962 * @segs_cnt: packet segment count 1963 * 1964 * Assumption: lock has already been acquired for RSS list 1965 */ 1966 static enum ice_status 1967 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1968 u32 addl_hdrs, u8 segs_cnt) 1969 { 1970 const enum ice_block blk = ICE_BLK_RSS; 1971 struct ice_flow_prof *prof = NULL; 1972 struct ice_flow_seg_info *segs; 1973 enum ice_status status; 1974 1975 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) 1976 return ICE_ERR_PARAM; 1977 1978 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); 1979 if (!segs) 1980 return ICE_ERR_NO_MEMORY; 1981 1982 /* Construct the packet segment info from the hashed fields */ 1983 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, 1984 addl_hdrs); 1985 if (status) 1986 goto exit; 1987 1988 /* Search for a flow profile that has matching headers, hash fields 1989 * and has the input VSI associated to it. If found, no further 1990 * operations required and exit. 1991 */ 1992 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1993 vsi_handle, 1994 ICE_FLOW_FIND_PROF_CHK_FLDS | 1995 ICE_FLOW_FIND_PROF_CHK_VSI); 1996 if (prof) 1997 goto exit; 1998 1999 /* Check if a flow profile exists with the same protocol headers and 2000 * associated with the input VSI. If so disassociate the VSI from 2001 * this profile. The VSI will be added to a new profile created with 2002 * the protocol header and new hash field configuration. 2003 */ 2004 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 2005 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); 2006 if (prof) { 2007 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); 2008 if (!status) 2009 ice_rem_rss_list(hw, vsi_handle, prof); 2010 else 2011 goto exit; 2012 2013 /* Remove profile if it has no VSIs associated */ 2014 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) { 2015 status = ice_flow_rem_prof(hw, blk, prof->id); 2016 if (status) 2017 goto exit; 2018 } 2019 } 2020 2021 /* Search for a profile that has same match fields only. If this 2022 * exists then associate the VSI to this profile. 2023 */ 2024 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 2025 vsi_handle, 2026 ICE_FLOW_FIND_PROF_CHK_FLDS); 2027 if (prof) { 2028 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 2029 if (!status) 2030 status = ice_add_rss_list(hw, vsi_handle, prof); 2031 goto exit; 2032 } 2033 2034 /* Create a new flow profile with generated profile and packet 2035 * segment information. 2036 */ 2037 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, 2038 ICE_FLOW_GEN_PROFID(hashed_flds, 2039 segs[segs_cnt - 1].hdrs, 2040 segs_cnt), 2041 segs, segs_cnt, &prof); 2042 if (status) 2043 goto exit; 2044 2045 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 2046 /* If association to a new flow profile failed then this profile can 2047 * be removed. 2048 */ 2049 if (status) { 2050 ice_flow_rem_prof(hw, blk, prof->id); 2051 goto exit; 2052 } 2053 2054 status = ice_add_rss_list(hw, vsi_handle, prof); 2055 2056 exit: 2057 kfree(segs); 2058 return status; 2059 } 2060 2061 /** 2062 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields 2063 * @hw: pointer to the hardware structure 2064 * @vsi_handle: software VSI handle 2065 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 2066 * @addl_hdrs: protocol header fields 2067 * 2068 * This function will generate a flow profile based on fields associated with 2069 * the input fields to hash on, the flow type and use the VSI number to add 2070 * a flow entry to the profile. 2071 */ 2072 enum ice_status 2073 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 2074 u32 addl_hdrs) 2075 { 2076 enum ice_status status; 2077 2078 if (hashed_flds == ICE_HASH_INVALID || 2079 !ice_is_vsi_valid(hw, vsi_handle)) 2080 return ICE_ERR_PARAM; 2081 2082 mutex_lock(&hw->rss_locks); 2083 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, 2084 ICE_RSS_OUTER_HEADERS); 2085 if (!status) 2086 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, 2087 addl_hdrs, ICE_RSS_INNER_HEADERS); 2088 mutex_unlock(&hw->rss_locks); 2089 2090 return status; 2091 } 2092 2093 /* Mapping of AVF hash bit fields to an L3-L4 hash combination. 2094 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, 2095 * convert its values to their appropriate flow L3, L4 values. 2096 */ 2097 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ 2098 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ 2099 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) 2100 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ 2101 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ 2102 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) 2103 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ 2104 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ 2105 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ 2106 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) 2107 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ 2108 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ 2109 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) 2110 2111 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ 2112 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ 2113 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) 2114 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ 2115 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ 2116 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ 2117 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) 2118 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ 2119 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ 2120 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) 2121 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ 2122 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ 2123 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) 2124 2125 /** 2126 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver 2127 * @hw: pointer to the hardware structure 2128 * @vsi_handle: software VSI handle 2129 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure 2130 * 2131 * This function will take the hash bitmap provided by the AVF driver via a 2132 * message, convert it to ICE-compatible values, and configure RSS flow 2133 * profiles. 2134 */ 2135 enum ice_status 2136 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) 2137 { 2138 enum ice_status status = 0; 2139 u64 hash_flds; 2140 2141 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || 2142 !ice_is_vsi_valid(hw, vsi_handle)) 2143 return ICE_ERR_PARAM; 2144 2145 /* Make sure no unsupported bits are specified */ 2146 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | 2147 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) 2148 return ICE_ERR_CFG; 2149 2150 hash_flds = avf_hash; 2151 2152 /* Always create an L3 RSS configuration for any L4 RSS configuration */ 2153 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) 2154 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; 2155 2156 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) 2157 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; 2158 2159 /* Create the corresponding RSS configuration for each valid hash bit */ 2160 while (hash_flds) { 2161 u64 rss_hash = ICE_HASH_INVALID; 2162 2163 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { 2164 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { 2165 rss_hash = ICE_FLOW_HASH_IPV4; 2166 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; 2167 } else if (hash_flds & 2168 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { 2169 rss_hash = ICE_FLOW_HASH_IPV4 | 2170 ICE_FLOW_HASH_TCP_PORT; 2171 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; 2172 } else if (hash_flds & 2173 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { 2174 rss_hash = ICE_FLOW_HASH_IPV4 | 2175 ICE_FLOW_HASH_UDP_PORT; 2176 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; 2177 } else if (hash_flds & 2178 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { 2179 rss_hash = ICE_FLOW_HASH_IPV4 | 2180 ICE_FLOW_HASH_SCTP_PORT; 2181 hash_flds &= 2182 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); 2183 } 2184 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { 2185 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { 2186 rss_hash = ICE_FLOW_HASH_IPV6; 2187 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; 2188 } else if (hash_flds & 2189 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { 2190 rss_hash = ICE_FLOW_HASH_IPV6 | 2191 ICE_FLOW_HASH_TCP_PORT; 2192 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; 2193 } else if (hash_flds & 2194 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { 2195 rss_hash = ICE_FLOW_HASH_IPV6 | 2196 ICE_FLOW_HASH_UDP_PORT; 2197 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; 2198 } else if (hash_flds & 2199 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { 2200 rss_hash = ICE_FLOW_HASH_IPV6 | 2201 ICE_FLOW_HASH_SCTP_PORT; 2202 hash_flds &= 2203 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); 2204 } 2205 } 2206 2207 if (rss_hash == ICE_HASH_INVALID) 2208 return ICE_ERR_OUT_OF_RANGE; 2209 2210 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, 2211 ICE_FLOW_SEG_HDR_NONE); 2212 if (status) 2213 break; 2214 } 2215 2216 return status; 2217 } 2218 2219 /** 2220 * ice_replay_rss_cfg - replay RSS configurations associated with VSI 2221 * @hw: pointer to the hardware structure 2222 * @vsi_handle: software VSI handle 2223 */ 2224 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 2225 { 2226 enum ice_status status = 0; 2227 struct ice_rss_cfg *r; 2228 2229 if (!ice_is_vsi_valid(hw, vsi_handle)) 2230 return ICE_ERR_PARAM; 2231 2232 mutex_lock(&hw->rss_locks); 2233 list_for_each_entry(r, &hw->rss_list_head, l_entry) { 2234 if (test_bit(vsi_handle, r->vsis)) { 2235 status = ice_add_rss_cfg_sync(hw, vsi_handle, 2236 r->hashed_flds, 2237 r->packet_hdr, 2238 ICE_RSS_OUTER_HEADERS); 2239 if (status) 2240 break; 2241 status = ice_add_rss_cfg_sync(hw, vsi_handle, 2242 r->hashed_flds, 2243 r->packet_hdr, 2244 ICE_RSS_INNER_HEADERS); 2245 if (status) 2246 break; 2247 } 2248 } 2249 mutex_unlock(&hw->rss_locks); 2250 2251 return status; 2252 } 2253 2254 /** 2255 * ice_get_rss_cfg - returns hashed fields for the given header types 2256 * @hw: pointer to the hardware structure 2257 * @vsi_handle: software VSI handle 2258 * @hdrs: protocol header type 2259 * 2260 * This function will return the match fields of the first instance of flow 2261 * profile having the given header types and containing input VSI 2262 */ 2263 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) 2264 { 2265 u64 rss_hash = ICE_HASH_INVALID; 2266 struct ice_rss_cfg *r; 2267 2268 /* verify if the protocol header is non zero and VSI is valid */ 2269 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) 2270 return ICE_HASH_INVALID; 2271 2272 mutex_lock(&hw->rss_locks); 2273 list_for_each_entry(r, &hw->rss_list_head, l_entry) 2274 if (test_bit(vsi_handle, r->vsis) && 2275 r->packet_hdr == hdrs) { 2276 rss_hash = r->hashed_flds; 2277 break; 2278 } 2279 mutex_unlock(&hw->rss_locks); 2280 2281 return rss_hash; 2282 } 2283